diff --git a/.gitignore b/.gitignore index 33d7760..ed4ec33 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ -/vendor/ config.php .env misirlou-api diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 0000000..2e0f525 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,63 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/erikdubbelboer/fasthttp" + packages = [".","fasthttputil","stackless"] + revision = "34f277ffbd84ab45fde698c5b251a6c3acf222f3" + +[[projects]] + name = "github.com/go-sql-driver/mysql" + packages = ["."] + revision = "a0583e0143b1624142adab07e0e97fe106d99561" + version = "v1.3" + +[[projects]] + name = "github.com/jinzhu/gorm" + packages = [".","dialects/mysql"] + revision = "6ed508ec6a4ecb3531899a69cbc746ccf65a4166" + version = "v1.9.1" + +[[projects]] + branch = "master" + name = "github.com/jinzhu/inflection" + packages = ["."] + revision = "04140366298a54a039076d798123ffa108fff46c" + +[[projects]] + name = "github.com/klauspost/compress" + packages = ["flate","gzip","zlib"] + revision = "6c8db69c4b49dd4df1fff66996cf556176d0b9bf" + version = "v1.2.1" + +[[projects]] + name = "github.com/klauspost/cpuid" + packages = ["."] + revision = "ae7887de9fa5d2db4eaa8174a7eff2c1ac00f2da" + version = "v1.1" + +[[projects]] + name = "github.com/klauspost/crc32" + packages = ["."] + revision = "cb6bfca970f6908083f26f39a79009d608efd5cd" + version = "v1.1" + +[[projects]] + name = "github.com/thehowl/fasthttprouter" + packages = ["."] + revision = "0e77ecfde28ad422d670671c17415e1d15f0f5be" + version = "v0.2.1" + +[[projects]] + branch = "master" + name = "github.com/valyala/bytebufferpool" + packages = ["."] + revision = "e746df99fe4a3986f4d4f79e13c1e0117ce9c2f7" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "38042664f8e88a691393fc94467eec4408a96b7e21d65b8dbbe63e48cc36b8ef" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 0000000..833c119 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,11 @@ +[[constraint]] + name = "github.com/thehowl/fasthttprouter" + version = "0.2.1" + +[[constraint]] + name = "github.com/erikdubbelboer/fasthttp" + branch = "master" + +[[constraint]] + name = "github.com/jinzhu/gorm" + version = "v1.9.1" diff --git a/vendor/github.com/erikdubbelboer/fasthttp/.gitignore b/vendor/github.com/erikdubbelboer/fasthttp/.gitignore new file mode 100644 index 0000000..7b58ce4 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/.gitignore @@ -0,0 +1,3 @@ +tags +*.pprof +*.fasthttp.gz diff --git a/vendor/github.com/erikdubbelboer/fasthttp/.travis.yml b/vendor/github.com/erikdubbelboer/fasthttp/.travis.yml new file mode 100644 index 0000000..3ed568b --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.9.x + - 1.8.x + +script: + # build test for supported platforms + - GOOS=linux go build + - GOOS=darwin go build + - GOOS=freebsd go build + - GOOS=windows go build + - GOARCH=386 go build + + # run tests on a standard platform + - go test -v ./... diff --git a/vendor/github.com/erikdubbelboer/fasthttp/LICENSE b/vendor/github.com/erikdubbelboer/fasthttp/LICENSE new file mode 100644 index 0000000..22bf00c --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015-2016 Aliaksandr Valialkin, VertaMedia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/erikdubbelboer/fasthttp/README.md b/vendor/github.com/erikdubbelboer/fasthttp/README.md new file mode 100644 index 0000000..0b15e40 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/README.md @@ -0,0 +1,577 @@ +[![Build Status](https://travis-ci.org/erikdubbelboer/fasthttp.svg)](https://travis-ci.org/erikdubbelboer/fasthttp) +[![GoDoc](https://godoc.org/github.com/erikdubbelboer/fasthttp?status.svg)](http://godoc.org/github.com/erikdubbelboer/fasthttp) +[![Go Report](https://goreportcard.com/badge/github.com/erikdubbelboer/fasthttp)](https://goreportcard.com/report/github.com/erikdubbelboer/fasthttp) + +# fasthttp +Fast HTTP implementation for Go. + +Currently fasthttp is successfully used by [VertaMedia](https://vertamedia.com/) +in a production serving up to 200K rps from more than 1.5M concurrent keep-alive +connections per physical server. + +[TechEmpower Benchmark round 12 results](https://www.techempower.com/benchmarks/#section=data-r12&hw=peak&test=plaintext) + +[Server Benchmarks](#http-server-performance-comparison-with-nethttp) + +[Client Benchmarks](#http-client-comparison-with-nethttp) + +[Install](#install) + +[Documentation](https://godoc.org/github.com/erikdubbelboer/fasthttp) + +[Examples from docs](https://godoc.org/github.com/erikdubbelboer/fasthttp#pkg-examples) + +[Code examples](examples) + +[Switching from net/http to fasthttp](#switching-from-nethttp-to-fasthttp) + +[Fasthttp best practices](#fasthttp-best-practices) + +[Tricks with byte buffers](#tricks-with-byte-buffers) + +[Related projects](#related-projects) + +[FAQ](#faq) + +# HTTP server performance comparison with [net/http](https://golang.org/pkg/net/http/) + +In short, fasthttp server is up to 10 times faster than net/http. +Below are benchmark results. + +*GOMAXPROCS=1* + +net/http server: +``` +$ GOMAXPROCS=1 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s +BenchmarkNetHTTPServerGet1ReqPerConn 1000000 12052 ns/op 2297 B/op 29 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn 1000000 12278 ns/op 2327 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn 2000000 8903 ns/op 2112 B/op 19 allocs/op +BenchmarkNetHTTPServerGet10KReqPerConn 2000000 8451 ns/op 2058 B/op 18 allocs/op +BenchmarkNetHTTPServerGet1ReqPerConn10KClients 500000 26733 ns/op 3229 B/op 29 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn10KClients 1000000 23351 ns/op 3211 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn10KClients 1000000 13390 ns/op 2483 B/op 19 allocs/op +BenchmarkNetHTTPServerGet100ReqPerConn10KClients 1000000 13484 ns/op 2171 B/op 18 allocs/op +``` + +fasthttp server: +``` +$ GOMAXPROCS=1 go test -bench=kServerGet -benchmem -benchtime=10s +BenchmarkServerGet1ReqPerConn 10000000 1559 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn 10000000 1248 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn 20000000 797 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10KReqPerConn 20000000 716 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet1ReqPerConn10KClients 10000000 1974 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn10KClients 10000000 1352 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn10KClients 20000000 789 ns/op 2 B/op 0 allocs/op +BenchmarkServerGet100ReqPerConn10KClients 20000000 604 ns/op 0 B/op 0 allocs/op +``` + +*GOMAXPROCS=4* + +net/http server: +``` +$ GOMAXPROCS=4 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s +BenchmarkNetHTTPServerGet1ReqPerConn-4 3000000 4529 ns/op 2389 B/op 29 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn-4 5000000 3896 ns/op 2418 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn-4 5000000 3145 ns/op 2160 B/op 19 allocs/op +BenchmarkNetHTTPServerGet10KReqPerConn-4 5000000 3054 ns/op 2065 B/op 18 allocs/op +BenchmarkNetHTTPServerGet1ReqPerConn10KClients-4 1000000 10321 ns/op 3710 B/op 30 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn10KClients-4 2000000 7556 ns/op 3296 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn10KClients-4 5000000 3905 ns/op 2349 B/op 19 allocs/op +BenchmarkNetHTTPServerGet100ReqPerConn10KClients-4 5000000 3435 ns/op 2130 B/op 18 allocs/op +``` + +fasthttp server: +``` +$ GOMAXPROCS=4 go test -bench=kServerGet -benchmem -benchtime=10s +BenchmarkServerGet1ReqPerConn-4 10000000 1141 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn-4 20000000 707 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn-4 30000000 341 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10KReqPerConn-4 50000000 310 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet1ReqPerConn10KClients-4 10000000 1119 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn10KClients-4 20000000 644 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn10KClients-4 30000000 346 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet100ReqPerConn10KClients-4 50000000 282 ns/op 0 B/op 0 allocs/op +``` + +# HTTP client comparison with net/http + +In short, fasthttp client is up to 10 times faster than net/http. +Below are benchmark results. + +*GOMAXPROCS=1* + +net/http client: +``` +$ GOMAXPROCS=1 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkNetHTTPClientDoFastServer 1000000 12567 ns/op 2616 B/op 35 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1TCP 200000 67030 ns/op 5028 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10TCP 300000 51098 ns/op 5031 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100TCP 300000 45096 ns/op 5026 B/op 55 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1Inmemory 500000 24779 ns/op 5035 B/op 57 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10Inmemory 1000000 26425 ns/op 5035 B/op 57 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100Inmemory 500000 28515 ns/op 5045 B/op 57 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1000Inmemory 500000 39511 ns/op 5096 B/op 56 allocs/op +``` + +fasthttp client: +``` +$ GOMAXPROCS=1 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkClientDoFastServer 20000000 865 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1TCP 1000000 18711 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10TCP 1000000 14664 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100TCP 1000000 14043 ns/op 1 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1Inmemory 5000000 3965 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10Inmemory 3000000 4060 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100Inmemory 5000000 3396 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1000Inmemory 5000000 3306 ns/op 2 B/op 0 allocs/op +``` + +*GOMAXPROCS=4* + +net/http client: +``` +$ GOMAXPROCS=4 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkNetHTTPClientDoFastServer-4 2000000 8774 ns/op 2619 B/op 35 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1TCP-4 500000 22951 ns/op 5047 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10TCP-4 1000000 19182 ns/op 5037 B/op 55 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100TCP-4 1000000 16535 ns/op 5031 B/op 55 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1Inmemory-4 1000000 14495 ns/op 5038 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10Inmemory-4 1000000 10237 ns/op 5034 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100Inmemory-4 1000000 10125 ns/op 5045 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1000Inmemory-4 1000000 11132 ns/op 5136 B/op 56 allocs/op +``` + +fasthttp client: +``` +$ GOMAXPROCS=4 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkClientDoFastServer-4 50000000 397 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1TCP-4 2000000 7388 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10TCP-4 2000000 6689 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100TCP-4 3000000 4927 ns/op 1 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1Inmemory-4 10000000 1604 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10Inmemory-4 10000000 1458 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100Inmemory-4 10000000 1329 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1000Inmemory-4 10000000 1316 ns/op 5 B/op 0 allocs/op +``` + + +# Install + +``` +go get -u github.com/erikdubbelboer/fasthttp +``` + + +# Switching from net/http to fasthttp + +Unfortunately, fasthttp doesn't provide API identical to net/http. +See the [FAQ](#faq) for details. +There is [net/http -> fasthttp handler converter](https://godoc.org/github.com/erikdubbelboer/fasthttp/fasthttpadaptor), +but it is better to write fasthttp request handlers by hand in order to use +all of the fasthttp advantages (especially high performance :) ). + +Important points: + +* Fasthttp works with [RequestHandler functions](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHandler) +instead of objects implementing [Handler interface](https://golang.org/pkg/net/http/#Handler). +Fortunately, it is easy to pass bound struct methods to fasthttp: + + ```go + type MyHandler struct { + foobar string + } + + // request handler in net/http style, i.e. method bound to MyHandler struct. + func (h *MyHandler) HandleFastHTTP(ctx *fasthttp.RequestCtx) { + // notice that we may access MyHandler properties here - see h.foobar. + fmt.Fprintf(ctx, "Hello, world! Requested path is %q. Foobar is %q", + ctx.Path(), h.foobar) + } + + // request handler in fasthttp style, i.e. just plain function. + func fastHTTPHandler(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "Hi there! RequestURI is %q", ctx.RequestURI()) + } + + // pass bound struct method to fasthttp + myHandler := &MyHandler{ + foobar: "foobar", + } + fasthttp.ListenAndServe(":8080", myHandler.HandleFastHTTP) + + // pass plain function to fasthttp + fasthttp.ListenAndServe(":8081", fastHTTPHandler) + ``` + +* The [RequestHandler](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHandler) +accepts only one argument - [RequestCtx](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx). +It contains all the functionality required for http request processing +and response writing. Below is an example of a simple request handler conversion +from net/http to fasthttp. + + ```go + // net/http request handler + requestHandler := func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/foo": + fooHandler(w, r) + case "/bar": + barHandler(w, r) + default: + http.Error(w, "Unsupported path", http.StatusNotFound) + } + } + ``` + + ```go + // the corresponding fasthttp request handler + requestHandler := func(ctx *fasthttp.RequestCtx) { + switch string(ctx.Path()) { + case "/foo": + fooHandler(ctx) + case "/bar": + barHandler(ctx) + default: + ctx.Error("Unsupported path", fasthttp.StatusNotFound) + } + } + ``` + +* Fasthttp allows setting response headers and writing response body +in an arbitrary order. There is no 'headers first, then body' restriction +like in net/http. The following code is valid for fasthttp: + + ```go + requestHandler := func(ctx *fasthttp.RequestCtx) { + // set some headers and status code first + ctx.SetContentType("foo/bar") + ctx.SetStatusCode(fasthttp.StatusOK) + + // then write the first part of body + fmt.Fprintf(ctx, "this is the first part of body\n") + + // then set more headers + ctx.Response.Header.Set("Foo-Bar", "baz") + + // then write more body + fmt.Fprintf(ctx, "this is the second part of body\n") + + // then override already written body + ctx.SetBody([]byte("this is completely new body contents")) + + // then update status code + ctx.SetStatusCode(fasthttp.StatusNotFound) + + // basically, anything may be updated many times before + // returning from RequestHandler. + // + // Unlike net/http fasthttp doesn't put response to the wire until + // returning from RequestHandler. + } + ``` + +* Fasthttp doesn't provide [ServeMux](https://golang.org/pkg/net/http/#ServeMux), +but there are more powerful third-party routers and web frameworks +with fasthttp support: + + * [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) + * [fasthttprouter](https://github.com/buaazp/fasthttprouter) + * [lu](https://github.com/vincentLiuxiang/lu) + + Net/http code with simple ServeMux is trivially converted to fasthttp code: + + ```go + // net/http code + + m := &http.ServeMux{} + m.HandleFunc("/foo", fooHandlerFunc) + m.HandleFunc("/bar", barHandlerFunc) + m.Handle("/baz", bazHandler) + + http.ListenAndServe(":80", m) + ``` + + ```go + // the corresponding fasthttp code + m := func(ctx *fasthttp.RequestCtx) { + switch string(ctx.Path()) { + case "/foo": + fooHandlerFunc(ctx) + case "/bar": + barHandlerFunc(ctx) + case "/baz": + bazHandler.HandlerFunc(ctx) + default: + ctx.Error("not found", fasthttp.StatusNotFound) + } + } + + fasthttp.ListenAndServe(":80", m) + ``` + +* net/http -> fasthttp conversion table: + + * All the pseudocode below assumes w, r and ctx have these types: + ```go + var ( + w http.ResponseWriter + r *http.Request + ctx *fasthttp.RequestCtx + ) + ``` + * r.Body -> [ctx.PostBody()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.PostBody) + * r.URL.Path -> [ctx.Path()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.Path) + * r.URL -> [ctx.URI()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.URI) + * r.Method -> [ctx.Method()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.Method) + * r.Header -> [ctx.Request.Header](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHeader) + * r.Header.Get() -> [ctx.Request.Header.Peek()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHeader.Peek) + * r.Host -> [ctx.Host()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.Host) + * r.Form -> [ctx.QueryArgs()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.QueryArgs) + + [ctx.PostArgs()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.PostArgs) + * r.PostForm -> [ctx.PostArgs()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.PostArgs) + * r.FormValue() -> [ctx.FormValue()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.FormValue) + * r.FormFile() -> [ctx.FormFile()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.FormFile) + * r.MultipartForm -> [ctx.MultipartForm()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.MultipartForm) + * r.RemoteAddr -> [ctx.RemoteAddr()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.RemoteAddr) + * r.RequestURI -> [ctx.RequestURI()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.RequestURI) + * r.TLS -> [ctx.IsTLS()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.IsTLS) + * r.Cookie() -> [ctx.Request.Header.Cookie()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHeader.Cookie) + * r.Referer() -> [ctx.Referer()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.Referer) + * r.UserAgent() -> [ctx.UserAgent()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.UserAgent) + * w.Header() -> [ctx.Response.Header](https://godoc.org/github.com/erikdubbelboer/fasthttp#ResponseHeader) + * w.Header().Set() -> [ctx.Response.Header.Set()](https://godoc.org/github.com/erikdubbelboer/fasthttp#ResponseHeader.Set) + * w.Header().Set("Content-Type") -> [ctx.SetContentType()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.SetContentType) + * w.Header().Set("Set-Cookie") -> [ctx.Response.Header.SetCookie()](https://godoc.org/github.com/erikdubbelboer/fasthttp#ResponseHeader.SetCookie) + * w.Write() -> [ctx.Write()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.Write), + [ctx.SetBody()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.SetBody), + [ctx.SetBodyStream()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.SetBodyStream), + [ctx.SetBodyStreamWriter()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.SetBodyStreamWriter) + * w.WriteHeader() -> [ctx.SetStatusCode()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.SetStatusCode) + * w.(http.Hijacker).Hijack() -> [ctx.Hijack()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.Hijack) + * http.Error() -> [ctx.Error()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.Error) + * http.FileServer() -> [fasthttp.FSHandler()](https://godoc.org/github.com/erikdubbelboer/fasthttp#FSHandler), + [fasthttp.FS](https://godoc.org/github.com/erikdubbelboer/fasthttp#FS) + * http.ServeFile() -> [fasthttp.ServeFile()](https://godoc.org/github.com/erikdubbelboer/fasthttp#ServeFile) + * http.Redirect() -> [ctx.Redirect()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.Redirect) + * http.NotFound() -> [ctx.NotFound()](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.NotFound) + * http.StripPrefix() -> [fasthttp.PathRewriteFunc](https://godoc.org/github.com/erikdubbelboer/fasthttp#PathRewriteFunc) + +* *VERY IMPORTANT!* Fasthttp disallows holding references +to [RequestCtx](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx) or to its' +members after returning from [RequestHandler](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHandler). +Otherwise [data races](http://blog.golang.org/race-detector) are inevitable. +Carefully inspect all the net/http request handlers converted to fasthttp whether +they retain references to RequestCtx or to its' members after returning. +RequestCtx provides the following _band aids_ for this case: + + * Wrap RequestHandler into [TimeoutHandler](https://godoc.org/github.com/erikdubbelboer/fasthttp#TimeoutHandler). + * Call [TimeoutError](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.TimeoutError) + before returning from RequestHandler if there are references to RequestCtx or to its' members. + See [the example](https://godoc.org/github.com/erikdubbelboer/fasthttp#example-RequestCtx-TimeoutError) + for more details. + +Use this brilliant tool - [race detector](http://blog.golang.org/race-detector) - +for detecting and eliminating data races in your program. If you detected +data race related to fasthttp in your program, then there is high probability +you forgot calling [TimeoutError](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.TimeoutError) +before returning from [RequestHandler](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHandler). + +* Blind switching from net/http to fasthttp won't give you performance boost. +While fasthttp is optimized for speed, its' performance may be easily saturated +by slow [RequestHandler](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHandler). +So [profile](http://blog.golang.org/profiling-go-programs) and optimize your +code after switching to fasthttp. For instance, use [quicktemplate](https://github.com/erikdubbelboer/quicktemplate) +instead of [html/template](https://golang.org/pkg/html/template/). + +* See also [fasthttputil](https://godoc.org/github.com/erikdubbelboer/fasthttp/fasthttputil), +[fasthttpadaptor](https://godoc.org/github.com/erikdubbelboer/fasthttp/fasthttpadaptor) and +[expvarhandler](https://godoc.org/github.com/erikdubbelboer/fasthttp/expvarhandler). + + +# Performance optimization tips for multi-core systems + +* Use [reuseport](https://godoc.org/github.com/erikdubbelboer/fasthttp/reuseport) listener. +* Run a separate server instance per CPU core with GOMAXPROCS=1. +* Pin each server instance to a separate CPU core using [taskset](http://linux.die.net/man/1/taskset). +* Ensure the interrupts of multiqueue network card are evenly distributed between CPU cores. + See [this article](https://blog.cloudflare.com/how-to-achieve-low-latency/) for details. +* Use Go 1.6 as it provides some considerable performance improvements. + + +# Fasthttp best practices + +* Do not allocate objects and `[]byte` buffers - just reuse them as much + as possible. Fasthttp API design encourages this. +* [sync.Pool](https://golang.org/pkg/sync/#Pool) is your best friend. +* [Profile your program](http://blog.golang.org/profiling-go-programs) + in production. + `go tool pprof --alloc_objects your-program mem.pprof` usually gives better + insights for optimization opportunities than `go tool pprof your-program cpu.pprof`. +* Write [tests and benchmarks](https://golang.org/pkg/testing/) for hot paths. +* Avoid conversion between `[]byte` and `string`, since this may result in memory + allocation+copy. Fasthttp API provides functions for both `[]byte` and `string` - + use these functions instead of converting manually between `[]byte` and `string`. + There are some exceptions - see [this wiki page](https://github.com/golang/go/wiki/CompilerOptimizations#string-and-byte) + for more details. +* Verify your tests and production code under + [race detector](https://golang.org/doc/articles/race_detector.html) on a regular basis. +* Prefer [quicktemplate](https://github.com/erikdubbelboer/quicktemplate) instead of + [html/template](https://golang.org/pkg/html/template/) in your webserver. + + +# Tricks with `[]byte` buffers + +The following tricks are used by fasthttp. Use them in your code too. + +* Standard Go functions accept nil buffers +```go +var ( + // both buffers are uninitialized + dst []byte + src []byte +) +dst = append(dst, src...) // is legal if dst is nil and/or src is nil +copy(dst, src) // is legal if dst is nil and/or src is nil +(string(src) == "") // is true if src is nil +(len(src) == 0) // is true if src is nil +src = src[:0] // works like a charm with nil src + +// this for loop doesn't panic if src is nil +for i, ch := range src { + doSomething(i, ch) +} +``` + +So throw away nil checks for `[]byte` buffers from you code. For example, +```go +srcLen := 0 +if src != nil { + srcLen = len(src) +} +``` + +becomes + +```go +srcLen := len(src) +``` + +* String may be appended to `[]byte` buffer with `append` +```go +dst = append(dst, "foobar"...) +``` + +* `[]byte` buffer may be extended to its' capacity. +```go +buf := make([]byte, 100) +a := buf[:10] // len(a) == 10, cap(a) == 100. +b := a[:100] // is valid, since cap(a) == 100. +``` + +* All fasthttp functions accept nil `[]byte` buffer +```go +statusCode, body, err := fasthttp.Get(nil, "http://google.com/") +uintBuf := fasthttp.AppendUint(nil, 1234) +``` + +# Related projects + + * [fasthttp-contrib](https://github.com/fasthttp-contrib) - various useful + helpers for projects based on fasthttp. + * [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) - fast and + powerful routing package for fasthttp servers. + * [fasthttprouter](https://github.com/thehowl/fasthttprouter) - a high + performance fasthttp request router that scales well (uses this fork, + erikdubbelboer, not valyala's code). + * [lu](https://github.com/vincentLiuxiang/lu) - a high performance + go middleware web framework which is based on fasthttp. + * [websocket](https://github.com/leavengood/websocket) - Gorilla-based + websocket implementation for fasthttp. + + +# FAQ + +* *Why creating yet another http package instead of optimizing net/http?* + + Because net/http API limits many optimization opportunities. + For example: + * net/http Request object lifetime isn't limited by request handler execution + time. So the server must create a new request object per each request instead + of reusing existing objects like fasthttp does. + * net/http headers are stored in a `map[string][]string`. So the server + must parse all the headers, convert them from `[]byte` to `string` and put + them into the map before calling user-provided request handler. + This all requires unnecessary memory allocations avoided by fasthttp. + * net/http client API requires creating a new response object per each request. + +* *Why fasthttp API is incompatible with net/http?* + + Because net/http API limits many optimization opportunities. See the answer + above for more details. Also certain net/http API parts are suboptimal + for use: + * Compare [net/http connection hijacking](https://golang.org/pkg/net/http/#Hijacker) + to [fasthttp connection hijacking](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.Hijack). + * Compare [net/http Request.Body reading](https://golang.org/pkg/net/http/#Request) + to [fasthttp request body reading](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.PostBody). + +* *Why fasthttp doesn't support HTTP/2.0 and WebSockets?* + + There are [plans](TODO) for adding HTTP/2.0 and WebSockets support + in the future. + In the mean time, third parties may use [RequestCtx.Hijack](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.Hijack) + for implementing these goodies. See [the first third-party websocket implementation on the top of fasthttp](https://github.com/leavengood/websocket). + +* *Are there known net/http advantages comparing to fasthttp?* + + Yes: + * net/http supports [HTTP/2.0 starting from go1.6](https://http2.golang.org/). + * net/http API is stable, while fasthttp API constantly evolves. + * net/http handles more HTTP corner cases. + * net/http should contain less bugs, since it is used and tested by much + wider audience. + * net/http works on Go older than 1.5. + +* *Why fasthttp API prefers returning `[]byte` instead of `string`?* + + Because `[]byte` to `string` conversion isn't free - it requires memory + allocation and copy. Feel free wrapping returned `[]byte` result into + `string()` if you prefer working with strings instead of byte slices. + But be aware that this has non-zero overhead. + +* *Which GO versions are supported by fasthttp?* + + Go1.5+. Older versions won't be supported, since their standard package + [miss useful functions](https://github.com/valyala/fasthttp/issues/5). + +* *Please provide real benchmark data and sever information* + + See [this issue](https://github.com/valyala/fasthttp/issues/4). + +* *Are there plans to add request routing to fasthttp?* + + There are no plans to add request routing into fasthttp. + Use third-party routers and web frameworks with fasthttp support: + + * [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) + * [fasthttprouter](https://github.com/thehowl/fasthttprouter) + * [gramework](https://github.com/gramework/gramework) + * [lu](https://github.com/vincentLiuxiang/lu) + + See also [this issue](https://github.com/valyala/fasthttp/issues/9) for more info. + +* *I detected data race in fasthttp!* + + Cool! [File a bug](https://github.com/erikdubbelboer/fasthttp/issues/new). But before + doing this check the following in your code: + + * Make sure there are no references to [RequestCtx](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx) + or to its' members after returning from [RequestHandler](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHandler). + * Make sure you call [TimeoutError](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx.TimeoutError) + before returning from [RequestHandler](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHandler) + if there are references to [RequestCtx](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestCtx) + or to its' members, which may be accessed by other goroutines. + +* *I didn't find an answer for my question here* + + Try exploring [these questions](https://github.com/valyala/fasthttp/issues?q=label%3Aquestion). diff --git a/vendor/github.com/erikdubbelboer/fasthttp/TODO b/vendor/github.com/erikdubbelboer/fasthttp/TODO new file mode 100644 index 0000000..ce7505f --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/TODO @@ -0,0 +1,4 @@ +- SessionClient with referer and cookies support. +- ProxyHandler similar to FSHandler. +- WebSockets. See https://tools.ietf.org/html/rfc6455 . +- HTTP/2.0. See https://tools.ietf.org/html/rfc7540 . diff --git a/vendor/github.com/erikdubbelboer/fasthttp/args.go b/vendor/github.com/erikdubbelboer/fasthttp/args.go new file mode 100644 index 0000000..958f1c3 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/args.go @@ -0,0 +1,514 @@ +package fasthttp + +import ( + "bytes" + "errors" + "io" + "sync" +) + +// AcquireArgs returns an empty Args object from the pool. +// +// The returned Args may be returned to the pool with ReleaseArgs +// when no longer needed. This allows reducing GC load. +func AcquireArgs() *Args { + return argsPool.Get().(*Args) +} + +// ReleaseArgs returns the object acquired via AquireArgs to the pool. +// +// Do not access the released Args object, otherwise data races may occur. +func ReleaseArgs(a *Args) { + a.Reset() + argsPool.Put(a) +} + +var argsPool = &sync.Pool{ + New: func() interface{} { + return &Args{} + }, +} + +// Args represents query arguments. +// +// It is forbidden copying Args instances. Create new instances instead +// and use CopyTo(). +// +// Args instance MUST NOT be used from concurrently running goroutines. +type Args struct { + noCopy noCopy + + args []argsKV + buf []byte +} + +type argsKV struct { + key []byte + value []byte +} + +// Reset clears query args. +func (a *Args) Reset() { + a.args = a.args[:0] +} + +// CopyTo copies all args to dst. +func (a *Args) CopyTo(dst *Args) { + dst.Reset() + dst.args = copyArgs(dst.args, a.args) +} + +// VisitAll calls f for each existing arg. +// +// f must not retain references to key and value after returning. +// Make key and/or value copies if you need storing them after returning. +func (a *Args) VisitAll(f func(key, value []byte)) { + visitArgs(a.args, f) +} + +// Len returns the number of query args. +func (a *Args) Len() int { + return len(a.args) +} + +// Parse parses the given string containing query args. +func (a *Args) Parse(s string) { + a.buf = append(a.buf[:0], s...) + a.ParseBytes(a.buf) +} + +// ParseBytes parses the given b containing query args. +func (a *Args) ParseBytes(b []byte) { + a.Reset() + + var s argsScanner + s.b = b + + var kv *argsKV + a.args, kv = allocArg(a.args) + for s.next(kv) { + if len(kv.key) > 0 || len(kv.value) > 0 { + a.args, kv = allocArg(a.args) + } + } + a.args = releaseArg(a.args) +} + +// String returns string representation of query args. +func (a *Args) String() string { + return string(a.QueryString()) +} + +// QueryString returns query string for the args. +// +// The returned value is valid until the next call to Args methods. +func (a *Args) QueryString() []byte { + a.buf = a.AppendBytes(a.buf[:0]) + return a.buf +} + +// AppendBytes appends query string to dst and returns the extended dst. +func (a *Args) AppendBytes(dst []byte) []byte { + for i, n := 0, len(a.args); i < n; i++ { + kv := &a.args[i] + dst = AppendQuotedArg(dst, kv.key) + if len(kv.value) > 0 { + dst = append(dst, '=') + dst = AppendQuotedArg(dst, kv.value) + } + if i+1 < n { + dst = append(dst, '&') + } + } + return dst +} + +// WriteTo writes query string to w. +// +// WriteTo implements io.WriterTo interface. +func (a *Args) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(a.QueryString()) + return int64(n), err +} + +// Del deletes argument with the given key from query args. +func (a *Args) Del(key string) { + a.args = delAllArgs(a.args, key) +} + +// DelBytes deletes argument with the given key from query args. +func (a *Args) DelBytes(key []byte) { + a.args = delAllArgs(a.args, b2s(key)) +} + +// Add adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) Add(key, value string) { + a.args = appendArg(a.args, key, value) +} + +// AddBytesK adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) AddBytesK(key []byte, value string) { + a.args = appendArg(a.args, b2s(key), value) +} + +// AddBytesV adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) AddBytesV(key string, value []byte) { + a.args = appendArg(a.args, key, b2s(value)) +} + +// AddBytesKV adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) AddBytesKV(key, value []byte) { + a.args = appendArg(a.args, b2s(key), b2s(value)) +} + +// Set sets 'key=value' argument. +func (a *Args) Set(key, value string) { + a.args = setArg(a.args, key, value) +} + +// SetBytesK sets 'key=value' argument. +func (a *Args) SetBytesK(key []byte, value string) { + a.args = setArg(a.args, b2s(key), value) +} + +// SetBytesV sets 'key=value' argument. +func (a *Args) SetBytesV(key string, value []byte) { + a.args = setArg(a.args, key, b2s(value)) +} + +// SetBytesKV sets 'key=value' argument. +func (a *Args) SetBytesKV(key, value []byte) { + a.args = setArgBytes(a.args, key, value) +} + +// Peek returns query arg value for the given key. +// +// Returned value is valid until the next Args call. +func (a *Args) Peek(key string) []byte { + return peekArgStr(a.args, key) +} + +// PeekBytes returns query arg value for the given key. +// +// Returned value is valid until the next Args call. +func (a *Args) PeekBytes(key []byte) []byte { + return peekArgBytes(a.args, key) +} + +// PeekMulti returns all the arg values for the given key. +func (a *Args) PeekMulti(key string) [][]byte { + var values [][]byte + a.VisitAll(func(k, v []byte) { + if string(k) == key { + values = append(values, v) + } + }) + return values +} + +// PeekMultiBytes returns all the arg values for the given key. +func (a *Args) PeekMultiBytes(key []byte) [][]byte { + return a.PeekMulti(b2s(key)) +} + +// Has returns true if the given key exists in Args. +func (a *Args) Has(key string) bool { + return hasArg(a.args, key) +} + +// HasBytes returns true if the given key exists in Args. +func (a *Args) HasBytes(key []byte) bool { + return hasArg(a.args, b2s(key)) +} + +// ErrNoArgValue is returned when Args value with the given key is missing. +var ErrNoArgValue = errors.New("no Args value for the given key") + +// GetUint returns uint value for the given key. +func (a *Args) GetUint(key string) (int, error) { + value := a.Peek(key) + if len(value) == 0 { + return -1, ErrNoArgValue + } + return ParseUint(value) +} + +// SetUint sets uint value for the given key. +func (a *Args) SetUint(key string, value int) { + bb := AcquireByteBuffer() + bb.B = AppendUint(bb.B[:0], value) + a.SetBytesV(key, bb.B) + ReleaseByteBuffer(bb) +} + +// SetUintBytes sets uint value for the given key. +func (a *Args) SetUintBytes(key []byte, value int) { + a.SetUint(b2s(key), value) +} + +// GetUintOrZero returns uint value for the given key. +// +// Zero (0) is returned on error. +func (a *Args) GetUintOrZero(key string) int { + n, err := a.GetUint(key) + if err != nil { + n = 0 + } + return n +} + +// GetUfloat returns ufloat value for the given key. +func (a *Args) GetUfloat(key string) (float64, error) { + value := a.Peek(key) + if len(value) == 0 { + return -1, ErrNoArgValue + } + return ParseUfloat(value) +} + +// GetUfloatOrZero returns ufloat value for the given key. +// +// Zero (0) is returned on error. +func (a *Args) GetUfloatOrZero(key string) float64 { + f, err := a.GetUfloat(key) + if err != nil { + f = 0 + } + return f +} + +// GetBool returns boolean value for the given key. +// +// true is returned for '1', 'y' and 'yes' values, +// otherwise false is returned. +func (a *Args) GetBool(key string) bool { + switch string(a.Peek(key)) { + case "1", "y", "yes": + return true + default: + return false + } +} + +func visitArgs(args []argsKV, f func(k, v []byte)) { + for i, n := 0, len(args); i < n; i++ { + kv := &args[i] + f(kv.key, kv.value) + } +} + +func copyArgs(dst, src []argsKV) []argsKV { + if cap(dst) < len(src) { + tmp := make([]argsKV, len(src)) + copy(tmp, dst) + dst = tmp + } + n := len(src) + dst = dst[:n] + for i := 0; i < n; i++ { + dstKV := &dst[i] + srcKV := &src[i] + dstKV.key = append(dstKV.key[:0], srcKV.key...) + dstKV.value = append(dstKV.value[:0], srcKV.value...) + } + return dst +} + +func delAllArgsBytes(args []argsKV, key []byte) []argsKV { + return delAllArgs(args, b2s(key)) +} + +func delAllArgs(args []argsKV, key string) []argsKV { + for i, n := 0, len(args); i < n; i++ { + kv := &args[i] + if key == string(kv.key) { + n-- + if i != n { + // Swap positions of the current and last member + args[i], args[n] = args[n], args[i] + i-- + } + args = args[:n] // Shrink the length + } + } + return args +} + +func setArgBytes(h []argsKV, key, value []byte) []argsKV { + return setArg(h, b2s(key), b2s(value)) +} + +func setArg(h []argsKV, key, value string) []argsKV { + n := len(h) + for i := 0; i < n; i++ { + kv := &h[i] + if key == string(kv.key) { + kv.value = append(kv.value[:0], value...) + return h + } + } + return appendArg(h, key, value) +} + +func appendArgBytes(h []argsKV, key, value []byte) []argsKV { + return appendArg(h, b2s(key), b2s(value)) +} + +func appendArg(args []argsKV, key, value string) []argsKV { + var kv *argsKV + args, kv = allocArg(args) + kv.key = append(kv.key[:0], key...) + kv.value = append(kv.value[:0], value...) + return args +} + +func allocArg(h []argsKV) ([]argsKV, *argsKV) { + n := len(h) + if cap(h) > n { + h = h[:n+1] + } else { + h = append(h, argsKV{}) + } + return h, &h[n] +} + +func releaseArg(h []argsKV) []argsKV { + return h[:len(h)-1] +} + +func hasArg(h []argsKV, key string) bool { + for i, n := 0, len(h); i < n; i++ { + kv := &h[i] + if key == string(kv.key) { + return true + } + } + return false +} + +func peekArgBytes(h []argsKV, k []byte) []byte { + for i, n := 0, len(h); i < n; i++ { + kv := &h[i] + if bytes.Equal(kv.key, k) { + return kv.value + } + } + return nil +} + +func peekArgStr(h []argsKV, k string) []byte { + // better to use string() casting + return peekArgBytes(h, []byte(k)) +} + +type argsScanner struct { + b []byte +} + +func (s *argsScanner) next(kv *argsKV) bool { + if len(s.b) == 0 { + return false + } + + isKey := true + k := 0 + for i, c := range s.b { + switch c { + case '=': + if isKey { + isKey = false + kv.key = decodeArgAppend(kv.key[:0], s.b[:i]) + k = i + 1 + } + case '&': + if isKey { + kv.key = decodeArgAppend(kv.key[:0], s.b[:i]) + kv.value = kv.value[:0] + } else { + kv.value = decodeArgAppend(kv.value[:0], s.b[k:i]) + } + s.b = s.b[i+1:] + return true + } + } + + if isKey { + kv.key = decodeArgAppend(kv.key[:0], s.b) + kv.value = kv.value[:0] + } else { + kv.value = decodeArgAppend(kv.value[:0], s.b[k:]) + } + s.b = s.b[len(s.b):] + return true +} + +func decodeArgAppend(dst, src []byte) []byte { + if bytes.IndexByte(src, '%') < 0 && bytes.IndexByte(src, '+') < 0 { + // fast path: src doesn't contain encoded chars + return append(dst, src...) + } + + // slow path + for i := 0; i < len(src); i++ { + c := src[i] + if c == '%' { + if i+2 >= len(src) { + return append(dst, src[i:]...) + } + x1 := hex2intTable[src[i+1]] + x2 := hex2intTable[src[i+2]] + if x1 == 16 || x2 == 16 { + dst = append(dst, c) + } else { + dst = append(dst, x1<<4|x2) + i += 2 + } + } else if c == '+' { + dst = append(dst, ' ') + } else { + dst = append(dst, c) + } + } + return dst +} + +// decodeArgAppendNoPlus is almost identical to decodeArgAppend, but it doesn't +// substitute '+' with ' '. +// +// The function is copy-pasted from decodeArgAppend due to the preformance +// reasons only. +func decodeArgAppendNoPlus(dst, src []byte) []byte { + if bytes.IndexByte(src, '%') < 0 { + // fast path: src doesn't contain encoded chars + return append(dst, src...) + } + + // slow path + for i, n := 0, len(src); i < n; i++ { + c := src[i] + if c == '%' { + if i+2 >= n { + return append(dst, src[i:]...) + } + x1 := hex2intTable[src[i+1]] + x2 := hex2intTable[src[i+2]] + if x1 == 16 || x2 == 16 { + dst = append(dst, c) + } else { + dst = append(dst, x1<<4|x2) + i += 2 + } + } else { + dst = append(dst, c) + } + } + return dst +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/args_test.go b/vendor/github.com/erikdubbelboer/fasthttp/args_test.go new file mode 100644 index 0000000..d5b768f --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/args_test.go @@ -0,0 +1,497 @@ +package fasthttp + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" +) + +func TestDecodeArgAppend(t *testing.T) { + testDecodeArgAppend(t, "", "") + testDecodeArgAppend(t, "foobar", "foobar") + testDecodeArgAppend(t, "тест", "тест") + testDecodeArgAppend(t, "a%", "a%") + testDecodeArgAppend(t, "%a%21", "%a!") + testDecodeArgAppend(t, "ab%test", "ab%test") + testDecodeArgAppend(t, "d%тестF", "d%тестF") + testDecodeArgAppend(t, "a%\xffb%20c", "a%\xffb c") + testDecodeArgAppend(t, "foo%20bar", "foo bar") + testDecodeArgAppend(t, "f.o%2C1%3A2%2F4=%7E%60%21%40%23%24%25%5E%26*%28%29_-%3D%2B%5C%7C%2F%5B%5D%7B%7D%3B%3A%27%22%3C%3E%2C.%2F%3F", + "f.o,1:2/4=~`!@#$%^&*()_-=+\\|/[]{};:'\"<>,./?") +} + +func testDecodeArgAppend(t *testing.T, s, expectedResult string) { + result := decodeArgAppend(nil, []byte(s)) + if string(result) != expectedResult { + t.Fatalf("unexpected decodeArgAppend(%q)=%q; expecting %q", s, result, expectedResult) + } +} + +func TestArgsAdd(t *testing.T) { + var a Args + a.Add("foo", "bar") + a.Add("foo", "baz") + a.Add("foo", "1") + a.Add("ba", "23") + if a.Len() != 4 { + t.Fatalf("unexpected number of elements: %d. Expecting 4", a.Len()) + } + s := a.String() + expectedS := "foo=bar&foo=baz&foo=1&ba=23" + if s != expectedS { + t.Fatalf("unexpected result: %q. Expecting %q", s, expectedS) + } + + var a1 Args + a1.Parse(s) + if a1.Len() != 4 { + t.Fatalf("unexpected number of elements: %d. Expecting 4", a.Len()) + } + + var barFound, bazFound, oneFound, baFound bool + a1.VisitAll(func(k, v []byte) { + switch string(k) { + case "foo": + switch string(v) { + case "bar": + barFound = true + case "baz": + bazFound = true + case "1": + oneFound = true + default: + t.Fatalf("unexpected value %q", v) + } + case "ba": + if string(v) != "23" { + t.Fatalf("unexpected value: %q. Expecting %q", v, "23") + } + baFound = true + default: + t.Fatalf("unexpected key found %q", k) + } + }) + if !barFound || !bazFound || !oneFound || !baFound { + t.Fatalf("something is missing: %v, %v, %v, %v", barFound, bazFound, oneFound, baFound) + } +} + +func TestArgsAcquireReleaseSequential(t *testing.T) { + testArgsAcquireRelease(t) +} + +func TestArgsAcquireReleaseConcurrent(t *testing.T) { + ch := make(chan struct{}, 10) + for i := 0; i < 10; i++ { + go func() { + testArgsAcquireRelease(t) + ch <- struct{}{} + }() + } + for i := 0; i < 10; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } +} + +func testArgsAcquireRelease(t *testing.T) { + a := AcquireArgs() + + for i := 0; i < 10; i++ { + k := fmt.Sprintf("key_%d", i) + v := fmt.Sprintf("value_%d", i*3+123) + a.Set(k, v) + } + + s := a.String() + a.Reset() + a.Parse(s) + + for i := 0; i < 10; i++ { + k := fmt.Sprintf("key_%d", i) + expectedV := fmt.Sprintf("value_%d", i*3+123) + v := a.Peek(k) + if string(v) != expectedV { + t.Fatalf("unexpected value %q for key %q. Expecting %q", v, k, expectedV) + } + } + + ReleaseArgs(a) +} + +func TestArgsPeekMulti(t *testing.T) { + var a Args + a.Parse("foo=123&bar=121&foo=321&foo=&barz=sdf") + + vv := a.PeekMulti("foo") + expectedVV := [][]byte{ + []byte("123"), + []byte("321"), + []byte(nil), + } + if !reflect.DeepEqual(vv, expectedVV) { + t.Fatalf("unexpected vv\n%#v\nExpecting\n%#v\n", vv, expectedVV) + } + + vv = a.PeekMulti("aaaa") + if len(vv) > 0 { + t.Fatalf("expecting empty result for non-existing key. Got %#v", vv) + } + + vv = a.PeekMulti("bar") + expectedVV = [][]byte{[]byte("121")} + if !reflect.DeepEqual(vv, expectedVV) { + t.Fatalf("unexpected vv\n%#v\nExpecting\n%#v\n", vv, expectedVV) + } +} + +func TestArgsEscape(t *testing.T) { + testArgsEscape(t, "foo", "bar", "foo=bar") + testArgsEscape(t, "f.o,1:2/4", "~`!@#$%^&*()_-=+\\|/[]{};:'\"<>,./?", + "f.o%2C1%3A2%2F4=%7E%60%21%40%23%24%25%5E%26*%28%29_-%3D%2B%5C%7C%2F%5B%5D%7B%7D%3B%3A%27%22%3C%3E%2C.%2F%3F") +} + +func testArgsEscape(t *testing.T, k, v, expectedS string) { + var a Args + a.Set(k, v) + s := a.String() + if s != expectedS { + t.Fatalf("unexpected args %q. Expecting %q. k=%q, v=%q", s, expectedS, k, v) + } +} + +func TestArgsWriteTo(t *testing.T) { + s := "foo=bar&baz=123&aaa=bbb" + + var a Args + a.Parse(s) + + var w ByteBuffer + n, err := a.WriteTo(&w) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if n != int64(len(s)) { + t.Fatalf("unexpected n: %d. Expecting %d", n, len(s)) + } + result := string(w.B) + if result != s { + t.Fatalf("unexpected result %q. Expecting %q", result, s) + } +} + +func TestArgsGetBool(t *testing.T) { + testArgsGetBool(t, "", false) + testArgsGetBool(t, "0", false) + testArgsGetBool(t, "n", false) + testArgsGetBool(t, "no", false) + testArgsGetBool(t, "1", true) + testArgsGetBool(t, "y", true) + testArgsGetBool(t, "yes", true) + + testArgsGetBool(t, "123", false) + testArgsGetBool(t, "foobar", false) +} + +func testArgsGetBool(t *testing.T, value string, expectedResult bool) { + var a Args + a.Parse("v=" + value) + + result := a.GetBool("v") + if result != expectedResult { + t.Fatalf("unexpected result %v. Expecting %v for value %q", result, expectedResult, value) + } +} + +func TestArgsUint(t *testing.T) { + var a Args + a.SetUint("foo", 123) + a.SetUint("bar", 0) + a.SetUint("aaaa", 34566) + + expectedS := "foo=123&bar=0&aaaa=34566" + s := string(a.QueryString()) + if s != expectedS { + t.Fatalf("unexpected args %q. Expecting %q", s, expectedS) + } + + if a.GetUintOrZero("foo") != 123 { + t.Fatalf("unexpected arg value %d. Expecting %d", a.GetUintOrZero("foo"), 123) + } + if a.GetUintOrZero("bar") != 0 { + t.Fatalf("unexpected arg value %d. Expecting %d", a.GetUintOrZero("bar"), 0) + } + if a.GetUintOrZero("aaaa") != 34566 { + t.Fatalf("unexpected arg value %d. Expecting %d", a.GetUintOrZero("aaaa"), 34566) + } + + if string(a.Peek("foo")) != "123" { + t.Fatalf("unexpected arg value %q. Expecting %q", a.Peek("foo"), "123") + } + if string(a.Peek("bar")) != "0" { + t.Fatalf("unexpected arg value %q. Expecting %q", a.Peek("bar"), "0") + } + if string(a.Peek("aaaa")) != "34566" { + t.Fatalf("unexpected arg value %q. Expecting %q", a.Peek("aaaa"), "34566") + } +} + +func TestArgsCopyTo(t *testing.T) { + var a Args + + // empty args + testCopyTo(t, &a) + + a.Set("foo", "bar") + testCopyTo(t, &a) + + a.Set("xxx", "yyy") + testCopyTo(t, &a) + + a.Del("foo") + testCopyTo(t, &a) +} + +func testCopyTo(t *testing.T, a *Args) { + keys := make(map[string]struct{}) + a.VisitAll(func(k, v []byte) { + keys[string(k)] = struct{}{} + }) + + var b Args + a.CopyTo(&b) + + b.VisitAll(func(k, v []byte) { + if _, ok := keys[string(k)]; !ok { + t.Fatalf("unexpected key %q after copying from %q", k, a.String()) + } + delete(keys, string(k)) + }) + if len(keys) > 0 { + t.Fatalf("missing keys %#v after copying from %q", keys, a.String()) + } +} + +func TestArgsVisitAll(t *testing.T) { + var a Args + a.Set("foo", "bar") + + i := 0 + a.VisitAll(func(k, v []byte) { + if string(k) != "foo" { + t.Fatalf("unexpected key %q. Expected %q", k, "foo") + } + if string(v) != "bar" { + t.Fatalf("unexpected value %q. Expected %q", v, "bar") + } + i++ + }) + if i != 1 { + t.Fatalf("unexpected number of VisitAll calls: %d. Expected %d", i, 1) + } +} + +func TestArgsStringCompose(t *testing.T) { + var a Args + a.Set("foo", "bar") + a.Set("aa", "bbb") + a.Set("привет", "мир") + a.Set("", "xxxx") + a.Set("cvx", "") + + expectedS := "foo=bar&aa=bbb&%D0%BF%D1%80%D0%B8%D0%B2%D0%B5%D1%82=%D0%BC%D0%B8%D1%80&=xxxx&cvx" + s := a.String() + if s != expectedS { + t.Fatalf("Unexpected string %q. Exected %q", s, expectedS) + } +} + +func TestArgsString(t *testing.T) { + var a Args + + testArgsString(t, &a, "") + testArgsString(t, &a, "foobar") + testArgsString(t, &a, "foo=bar") + testArgsString(t, &a, "foo=bar&baz=sss") + testArgsString(t, &a, "") + testArgsString(t, &a, "f%20o=x.x*-_8x%D0%BF%D1%80%D0%B8%D0%B2%D0%B5aaa&sdf=ss") + testArgsString(t, &a, "=asdfsdf") +} + +func testArgsString(t *testing.T, a *Args, s string) { + a.Parse(s) + s1 := a.String() + if s != s1 { + t.Fatalf("Unexpected args %q. Expected %q", s1, s) + } +} + +func TestArgsSetGetDel(t *testing.T) { + var a Args + + if len(a.Peek("foo")) > 0 { + t.Fatalf("Unexpected value: %q", a.Peek("foo")) + } + if len(a.Peek("")) > 0 { + t.Fatalf("Unexpected value: %q", a.Peek("")) + } + a.Del("xxx") + + for j := 0; j < 3; j++ { + for i := 0; i < 10; i++ { + k := fmt.Sprintf("foo%d", i) + v := fmt.Sprintf("bar_%d", i) + a.Set(k, v) + if string(a.Peek(k)) != v { + t.Fatalf("Unexpected value: %q. Expected %q", a.Peek(k), v) + } + } + } + for i := 0; i < 10; i++ { + k := fmt.Sprintf("foo%d", i) + v := fmt.Sprintf("bar_%d", i) + if string(a.Peek(k)) != v { + t.Fatalf("Unexpected value: %q. Expected %q", a.Peek(k), v) + } + a.Del(k) + if string(a.Peek(k)) != "" { + t.Fatalf("Unexpected value: %q. Expected %q", a.Peek(k), "") + } + } + + a.Parse("aaa=xxx&bb=aa") + if string(a.Peek("foo0")) != "" { + t.Fatalf("Unepxected value %q", a.Peek("foo0")) + } + if string(a.Peek("aaa")) != "xxx" { + t.Fatalf("Unexpected value %q. Expected %q", a.Peek("aaa"), "xxx") + } + if string(a.Peek("bb")) != "aa" { + t.Fatalf("Unexpected value %q. Expected %q", a.Peek("bb"), "aa") + } + + for i := 0; i < 10; i++ { + k := fmt.Sprintf("xx%d", i) + v := fmt.Sprintf("yy%d", i) + a.Set(k, v) + if string(a.Peek(k)) != v { + t.Fatalf("Unexpected value: %q. Expected %q", a.Peek(k), v) + } + } + for i := 5; i < 10; i++ { + k := fmt.Sprintf("xx%d", i) + v := fmt.Sprintf("yy%d", i) + if string(a.Peek(k)) != v { + t.Fatalf("Unexpected value: %q. Expected %q", a.Peek(k), v) + } + a.Del(k) + if string(a.Peek(k)) != "" { + t.Fatalf("Unexpected value: %q. Expected %q", a.Peek(k), "") + } + } +} + +func TestArgsParse(t *testing.T) { + var a Args + + // empty args + testArgsParse(t, &a, "", 0, "foo=", "bar=", "=") + + // arg without value + testArgsParse(t, &a, "foo1", 1, "foo=", "bar=", "=") + + // arg without value, but with equal sign + testArgsParse(t, &a, "foo2=", 1, "foo=", "bar=", "=") + + // arg with value + testArgsParse(t, &a, "foo3=bar1", 1, "foo3=bar1", "bar=", "=") + + // empty key + testArgsParse(t, &a, "=bar2", 1, "foo=", "=bar2", "bar2=") + + // missing kv + testArgsParse(t, &a, "&&&&", 0, "foo=", "bar=", "=") + + // multiple values with the same key + testArgsParse(t, &a, "x=1&x=2&x=3", 3, "x=1") + + // multiple args + testArgsParse(t, &a, "&&&qw=er&tyx=124&&&zxc_ss=2234&&", 3, "qw=er", "tyx=124", "zxc_ss=2234") + + // multiple args without values + testArgsParse(t, &a, "&&a&&b&&bar&baz", 4, "a=", "b=", "bar=", "baz=") + + // values with '=' + testArgsParse(t, &a, "zz=1&k=v=v=a=a=s", 2, "k=v=v=a=a=s", "zz=1") + + // mixed '=' and '&' + testArgsParse(t, &a, "sss&z=dsf=&df", 3, "sss=", "z=dsf=", "df=") + + // encoded args + testArgsParse(t, &a, "f+o%20o=%D0%BF%D1%80%D0%B8%D0%B2%D0%B5%D1%82+test", 1, "f o o=привет test") + + // invalid percent encoding + testArgsParse(t, &a, "f%=x&qw%z=d%0k%20p&%%20=%%%20x", 3, "f%=x", "qw%z=d%0k p", "% =%% x") + + // special chars + testArgsParse(t, &a, "a.b,c:d/e=f.g,h:i/q", 1, "a.b,c:d/e=f.g,h:i/q") +} + +func TestArgsHas(t *testing.T) { + var a Args + + // single arg + testArgsHas(t, &a, "foo", "foo") + testArgsHasNot(t, &a, "foo", "bar", "baz", "") + + // multi args without values + testArgsHas(t, &a, "foo&bar", "foo", "bar") + testArgsHasNot(t, &a, "foo&bar", "", "aaaa") + + // multi args + testArgsHas(t, &a, "b=xx&=aaa&c=", "b", "", "c") + testArgsHasNot(t, &a, "b=xx&=aaa&c=", "xx", "aaa", "foo") + + // encoded args + testArgsHas(t, &a, "a+b=c+d%20%20e", "a b") + testArgsHasNot(t, &a, "a+b=c+d", "a+b", "c+d") +} + +func testArgsHas(t *testing.T, a *Args, s string, expectedKeys ...string) { + a.Parse(s) + for _, key := range expectedKeys { + if !a.Has(key) { + t.Fatalf("Missing key %q in %q", key, s) + } + } +} + +func testArgsHasNot(t *testing.T, a *Args, s string, unexpectedKeys ...string) { + a.Parse(s) + for _, key := range unexpectedKeys { + if a.Has(key) { + t.Fatalf("Unexpected key %q in %q", key, s) + } + } +} + +func testArgsParse(t *testing.T, a *Args, s string, expectedLen int, expectedArgs ...string) { + a.Parse(s) + if a.Len() != expectedLen { + t.Fatalf("Unexpected args len %d. Expected %d. s=%q", a.Len(), expectedLen, s) + } + for _, xx := range expectedArgs { + tmp := strings.SplitN(xx, "=", 2) + k := tmp[0] + v := tmp[1] + buf := a.Peek(k) + if string(buf) != v { + t.Fatalf("Unexpected value for key=%q: %q. Expected %q. s=%q", k, buf, v, s) + } + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/args_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/args_timing_test.go new file mode 100644 index 0000000..d6e9e98 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/args_timing_test.go @@ -0,0 +1,30 @@ +package fasthttp + +import ( + "bytes" + "testing" +) + +func BenchmarkArgsParse(b *testing.B) { + s := []byte("foo=bar&baz=qqq&aaaaa=bbbb") + b.RunParallel(func(pb *testing.PB) { + var a Args + for pb.Next() { + a.ParseBytes(s) + } + }) +} + +func BenchmarkArgsPeek(b *testing.B) { + value := []byte("foobarbaz1234") + key := "foobarbaz" + b.RunParallel(func(pb *testing.PB) { + var a Args + a.SetBytesV(key, value) + for pb.Next() { + if !bytes.Equal(a.Peek(key), value) { + b.Fatalf("unexpected arg value %q. Expecting %q", a.Peek(key), value) + } + } + }) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer.go b/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer.go new file mode 100644 index 0000000..f965172 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer.go @@ -0,0 +1,64 @@ +package fasthttp + +import ( + "github.com/valyala/bytebufferpool" +) + +// ByteBuffer provides byte buffer, which can be used with fasthttp API +// in order to minimize memory allocations. +// +// ByteBuffer may be used with functions appending data to the given []byte +// slice. See example code for details. +// +// Use AcquireByteBuffer for obtaining an empty byte buffer. +// +// ByteBuffer is deprecated. Use github.com/valyala/bytebufferpool instead. +type ByteBuffer bytebufferpool.ByteBuffer + +// Write implements io.Writer - it appends p to ByteBuffer.B +func (b *ByteBuffer) Write(p []byte) (int, error) { + return bb(b).Write(p) +} + +// WriteString appends s to ByteBuffer.B +func (b *ByteBuffer) WriteString(s string) (int, error) { + return bb(b).WriteString(s) +} + +// Set sets ByteBuffer.B to p +func (b *ByteBuffer) Set(p []byte) { + bb(b).Set(p) +} + +// SetString sets ByteBuffer.B to s +func (b *ByteBuffer) SetString(s string) { + bb(b).SetString(s) +} + +// Reset makes ByteBuffer.B empty. +func (b *ByteBuffer) Reset() { + bb(b).Reset() +} + +// AcquireByteBuffer returns an empty byte buffer from the pool. +// +// Acquired byte buffer may be returned to the pool via ReleaseByteBuffer call. +// This reduces the number of memory allocations required for byte buffer +// management. +func AcquireByteBuffer() *ByteBuffer { + return (*ByteBuffer)(defaultByteBufferPool.Get()) +} + +// ReleaseByteBuffer returns byte buffer to the pool. +// +// ByteBuffer.B mustn't be touched after returning it to the pool. +// Otherwise data races occur. +func ReleaseByteBuffer(b *ByteBuffer) { + defaultByteBufferPool.Put(bb(b)) +} + +func bb(b *ByteBuffer) *bytebufferpool.ByteBuffer { + return (*bytebufferpool.ByteBuffer)(b) +} + +var defaultByteBufferPool bytebufferpool.Pool diff --git a/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer_example_test.go b/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer_example_test.go new file mode 100644 index 0000000..7df375c --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer_example_test.go @@ -0,0 +1,29 @@ +package fasthttp_test + +import ( + "fmt" + + "github.com/erikdubbelboer/fasthttp" +) + +func ExampleByteBuffer() { + // This request handler sets 'Your-IP' response header + // to 'Your IP is '. It uses ByteBuffer for constructing response + // header value with zero memory allocations. + yourIPRequestHandler := func(ctx *fasthttp.RequestCtx) { + b := fasthttp.AcquireByteBuffer() + b.B = append(b.B, "Your IP is <"...) + b.B = fasthttp.AppendIPv4(b.B, ctx.RemoteIP()) + b.B = append(b.B, ">"...) + ctx.Response.Header.SetBytesV("Your-IP", b.B) + + fmt.Fprintf(ctx, "Check response headers - they must contain 'Your-IP: %s'", b.B) + + // It is safe to release byte buffer now, since it is + // no longer used. + fasthttp.ReleaseByteBuffer(b) + } + + // Start fasthttp server returning your ip in response headers. + fasthttp.ListenAndServe(":8080", yourIPRequestHandler) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer_test.go b/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer_test.go new file mode 100644 index 0000000..2ecbd77 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer_test.go @@ -0,0 +1,43 @@ +package fasthttp + +import ( + "fmt" + "testing" + "time" +) + +func TestByteBufferAcquireReleaseSerial(t *testing.T) { + testByteBufferAcquireRelease(t) +} + +func TestByteBufferAcquireReleaseConcurrent(t *testing.T) { + concurrency := 10 + ch := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + testByteBufferAcquireRelease(t) + ch <- struct{}{} + }() + } + + for i := 0; i < concurrency; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout!") + } + } +} + +func testByteBufferAcquireRelease(t *testing.T) { + for i := 0; i < 10; i++ { + b := AcquireByteBuffer() + b.B = append(b.B, "num "...) + b.B = AppendUint(b.B, i) + expectedS := fmt.Sprintf("num %d", i) + if string(b.B) != expectedS { + t.Fatalf("unexpected result: %q. Expecting %q", b.B, expectedS) + } + ReleaseByteBuffer(b) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer_timing_test.go new file mode 100644 index 0000000..92bbafa --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/bytebuffer_timing_test.go @@ -0,0 +1,32 @@ +package fasthttp + +import ( + "bytes" + "testing" +) + +func BenchmarkByteBufferWrite(b *testing.B) { + s := []byte("foobarbaz") + b.RunParallel(func(pb *testing.PB) { + var buf ByteBuffer + for pb.Next() { + for i := 0; i < 100; i++ { + buf.Write(s) + } + buf.Reset() + } + }) +} + +func BenchmarkBytesBufferWrite(b *testing.B) { + s := []byte("foobarbaz") + b.RunParallel(func(pb *testing.PB) { + var buf bytes.Buffer + for pb.Next() { + for i := 0; i < 100; i++ { + buf.Write(s) + } + buf.Reset() + } + }) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/bytesconv.go b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv.go new file mode 100644 index 0000000..8fdf307 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv.go @@ -0,0 +1,447 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "math" + "net" + "reflect" + "strings" + "sync" + "time" + "unsafe" +) + +// AppendHTMLEscape appends html-escaped s to dst and returns the extended dst. +func AppendHTMLEscape(dst []byte, s string) []byte { + if strings.IndexByte(s, '<') < 0 && + strings.IndexByte(s, '>') < 0 && + strings.IndexByte(s, '"') < 0 && + strings.IndexByte(s, '\'') < 0 { + + // fast path - nothing to escape + return append(dst, s...) + } + + // slow path + var prev int + var sub string + for i, n := 0, len(s); i < n; i++ { + sub = "" + switch s[i] { + case '<': + sub = "<" + case '>': + sub = ">" + case '"': + sub = """ + case '\'': + sub = "'" + } + if len(sub) > 0 { + dst = append(dst, s[prev:i]...) + dst = append(dst, sub...) + prev = i + 1 + } + } + return append(dst, s[prev:]...) +} + +// AppendHTMLEscapeBytes appends html-escaped s to dst and returns +// the extended dst. +func AppendHTMLEscapeBytes(dst, s []byte) []byte { + return AppendHTMLEscape(dst, b2s(s)) +} + +// AppendIPv4 appends string representation of the given ip v4 to dst +// and returns the extended dst. +func AppendIPv4(dst []byte, ip net.IP) []byte { + ip = ip.To4() + if ip == nil { + return append(dst, "non-v4 ip passed to AppendIPv4"...) + } + + dst = AppendUint(dst, int(ip[0])) + for i := 1; i < 4; i++ { + dst = append(dst, '.') + dst = AppendUint(dst, int(ip[i])) + } + return dst +} + +var errEmptyIPStr = errors.New("empty ip address string") + +// ParseIPv4 parses ip address from ipStr into dst and returns the extended dst. +func ParseIPv4(dst net.IP, ipStr []byte) (net.IP, error) { + if len(ipStr) == 0 { + return dst, errEmptyIPStr + } + if len(dst) < net.IPv4len { + dst = make([]byte, net.IPv4len) + } + copy(dst, net.IPv4zero) + dst = dst.To4() + if dst == nil { + panic("BUG: dst must not be nil") + } + + b := ipStr + for i := 0; i < 3; i++ { + n := bytes.IndexByte(b, '.') + if n < 0 { + return dst, fmt.Errorf("cannot find dot in ipStr %q", ipStr) + } + v, err := ParseUint(b[:n]) + if err != nil { + return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err) + } + if v > 255 { + return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v) + } + dst[i] = byte(v) + b = b[n+1:] + } + v, err := ParseUint(b) + if err != nil { + return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err) + } + if v > 255 { + return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v) + } + dst[3] = byte(v) + + return dst, nil +} + +// AppendHTTPDate appends HTTP-compliant (RFC1123) representation of date +// to dst and returns the extended dst. +func AppendHTTPDate(dst []byte, date time.Time) []byte { + dst = date.In(time.UTC).AppendFormat(dst, time.RFC1123) + copy(dst[len(dst)-3:], strGMT) + return dst +} + +// ParseHTTPDate parses HTTP-compliant (RFC1123) date. +func ParseHTTPDate(date []byte) (time.Time, error) { + return time.Parse(time.RFC1123, b2s(date)) +} + +// AppendUint appends n to dst and returns the extended dst. +func AppendUint(dst []byte, n int) []byte { + if n < 0 { + panic("BUG: int must be positive") + } + + var b [20]byte + buf := b[:] + i := len(buf) + var q int + for n >= 10 { + i-- + q = n / 10 + buf[i] = '0' + byte(n-q*10) + n = q + } + i-- + buf[i] = '0' + byte(n) + + dst = append(dst, buf[i:]...) + return dst +} + +// ParseUint parses uint from buf. +func ParseUint(buf []byte) (int, error) { + v, n, err := parseUintBuf(buf) + if n != len(buf) { + return -1, errUnexpectedTrailingChar + } + return v, err +} + +var ( + errEmptyInt = errors.New("empty integer") + errUnexpectedFirstChar = errors.New("unexpected first char found. Expecting 0-9") + errUnexpectedTrailingChar = errors.New("unexpected trailing char found. Expecting 0-9") + errTooLongInt = errors.New("too long int") +) + +func parseUintBuf(b []byte) (int, int, error) { + n := len(b) + if n == 0 { + return -1, 0, errEmptyInt + } + v := 0 + for i := 0; i < n; i++ { + c := b[i] + k := c - '0' + if k > 9 { + if i == 0 { + return -1, i, errUnexpectedFirstChar + } + return v, i, nil + } + if i >= maxIntChars { + return -1, i, errTooLongInt + } + v = 10*v + int(k) + } + return v, n, nil +} + +var ( + errEmptyFloat = errors.New("empty float number") + errDuplicateFloatPoint = errors.New("duplicate point found in float number") + errUnexpectedFloatEnd = errors.New("unexpected end of float number") + errInvalidFloatExponent = errors.New("invalid float number exponent") + errUnexpectedFloatChar = errors.New("unexpected char found in float number") +) + +// ParseUfloat parses unsigned float from buf. +func ParseUfloat(buf []byte) (float64, error) { + if len(buf) == 0 { + return -1, errEmptyFloat + } + b := buf + var v uint64 + var offset = 1.0 + var pointFound bool + for i, c := range b { + if c < '0' || c > '9' { + if c == '.' { + if pointFound { + return -1, errDuplicateFloatPoint + } + pointFound = true + continue + } + if c == 'e' || c == 'E' { + if i+1 >= len(b) { + return -1, errUnexpectedFloatEnd + } + b = b[i+1:] + minus := -1 + switch b[0] { + case '+': + b = b[1:] + minus = 1 + case '-': + b = b[1:] + default: + minus = 1 + } + vv, err := ParseUint(b) + if err != nil { + return -1, errInvalidFloatExponent + } + return float64(v) * offset * math.Pow10(minus*int(vv)), nil + } + return -1, errUnexpectedFloatChar + } + v = 10*v + uint64(c-'0') + if pointFound { + offset /= 10 + } + } + return float64(v) * offset, nil +} + +var ( + errEmptyHexNum = errors.New("empty hex number") + errTooLargeHexNum = errors.New("too large hex number") +) + +func readHexInt(r *bufio.Reader) (int, error) { + n := 0 + i := 0 + var k int + for { + c, err := r.ReadByte() + if err != nil { + if err == io.EOF && i > 0 { + return n, nil + } + return -1, err + } + k = int(hex2intTable[c]) + if k == 16 { + if i == 0 { + return -1, errEmptyHexNum + } + r.UnreadByte() + return n, nil + } + if i >= maxHexIntChars { + return -1, errTooLargeHexNum + } + n = (n << 4) | k + i++ + } +} + +var hexIntBufPool sync.Pool + +func writeHexInt(w *bufio.Writer, n int) error { + if n < 0 { + panic("BUG: int must be positive") + } + + v := hexIntBufPool.Get() + if v == nil { + v = make([]byte, maxHexIntChars+1) + } + buf := v.([]byte) + i := len(buf) - 1 + for { + buf[i] = int2hexbyte(n & 0xf) + n >>= 4 + if n == 0 { + break + } + i-- + } + _, err := w.Write(buf[i:]) + hexIntBufPool.Put(v) + return err +} + +func int2hexbyte(n int) byte { + if n < 10 { + return '0' + byte(n) + } + return 'a' + byte(n) - 10 +} + +func hexCharUpper(c byte) byte { + if c < 10 { + return '0' + c + } + return c - 10 + 'A' +} + +var hex2intTable = func() []byte { + b := make([]byte, 256) + for i := 0; i < 256; i++ { + c := byte(16) + if i >= '0' && i <= '9' { + c = byte(i) - '0' + } else if i >= 'a' && i <= 'f' { + c = byte(i) - 'a' + 10 + } else if i >= 'A' && i <= 'F' { + c = byte(i) - 'A' + 10 + } + b[i] = c + } + return b +}() + +const toLower = 'a' - 'A' + +var toLowerTable = func() [256]byte { + var a [256]byte + for i := 0; i < 256; i++ { + c := byte(i) + if c >= 'A' && c <= 'Z' { + c += toLower + } + a[i] = c + } + return a +}() + +var toUpperTable = func() [256]byte { + var a [256]byte + for i := 0; i < 256; i++ { + c := byte(i) + if c >= 'a' && c <= 'z' { + c -= toLower + } + a[i] = c + } + return a +}() + +func lowercaseBytes(b []byte) { + for i := 0; i < len(b); i++ { + p := &b[i] + *p = toLowerTable[*p] + } +} + +// b2s converts byte slice to a string without memory allocation. +// See https://groups.google.com/forum/#!msg/Golang-Nuts/ENgbUzYvCuU/90yGx7GUAgAJ . +// +// Note it may break if string and/or slice header will change +// in the future go versions. +func b2s(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// s2b converts string to a byte slice without memory allocation. +// +// Note it may break if string and/or slice header will change +// in the future go versions. +func s2b(s string) []byte { + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh := reflect.SliceHeader{ + Data: sh.Data, + Len: sh.Len, + Cap: sh.Len, + } + return *(*[]byte)(unsafe.Pointer(&bh)) +} + +// AppendUnquotedArg appends url-decoded src to dst and returns appended dst. +// +// dst may point to src. In this case src will be overwritten. +func AppendUnquotedArg(dst, src []byte) []byte { + return decodeArgAppend(dst, src) +} + +// AppendQuotedArg appends url-encoded src to dst and returns appended dst. +func AppendQuotedArg(dst, src []byte) []byte { + for _, c := range src { + // See http://www.w3.org/TR/html5/forms.html#form-submission-algorithm + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || + c == '*' || c == '-' || c == '.' || c == '_' { + dst = append(dst, c) + } else { + dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15)) + } + } + return dst +} + +func appendQuotedPath(dst, src []byte) []byte { + for _, c := range src { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || + c == '/' || c == '.' || c == ',' || c == '=' || c == ':' || c == '&' || c == '~' || c == '-' || c == '_' { + dst = append(dst, c) + } else { + dst = append(dst, '%', hexCharUpper(c>>4), hexCharUpper(c&15)) + } + } + return dst +} + +// EqualBytesStr returns true if string(b) == s. +// +// This function has no performance benefits comparing to string(b) == s. +// It is left here for backwards compatibility only. +// +// This function is deperecated and may be deleted soon. +func EqualBytesStr(b []byte, s string) bool { + return string(b) == s +} + +// AppendBytesStr appends src to dst and returns the extended dst. +// +// This function has no performance benefits comparing to append(dst, src...). +// It is left here for backwards compatibility only. +// +// This function is deprecated and may be deleted soon. +func AppendBytesStr(dst []byte, src string) []byte { + return append(dst, src...) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_32.go b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_32.go new file mode 100644 index 0000000..1437754 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_32.go @@ -0,0 +1,8 @@ +// +build !amd64,!arm64,!ppc64 + +package fasthttp + +const ( + maxIntChars = 9 + maxHexIntChars = 7 +) diff --git a/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_32_test.go b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_32_test.go new file mode 100644 index 0000000..eb8f660 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_32_test.go @@ -0,0 +1,39 @@ +// +build !amd64,!arm64,!ppc64 + +package fasthttp + +import ( + "testing" +) + +func TestWriteHexInt(t *testing.T) { + testWriteHexInt(t, 0, "0") + testWriteHexInt(t, 1, "1") + testWriteHexInt(t, 0x123, "123") + testWriteHexInt(t, 0x7fffffff, "7fffffff") +} + +func TestAppendUint(t *testing.T) { + testAppendUint(t, 0) + testAppendUint(t, 123) + testAppendUint(t, 0x7fffffff) + + for i := 0; i < 2345; i++ { + testAppendUint(t, i) + } +} + +func TestReadHexIntSuccess(t *testing.T) { + testReadHexIntSuccess(t, "0", 0) + testReadHexIntSuccess(t, "fF", 0xff) + testReadHexIntSuccess(t, "00abc", 0xabc) + testReadHexIntSuccess(t, "7ffffff", 0x7ffffff) + testReadHexIntSuccess(t, "000", 0) + testReadHexIntSuccess(t, "1234ZZZ", 0x1234) +} + +func TestParseUintSuccess(t *testing.T) { + testParseUintSuccess(t, "0", 0) + testParseUintSuccess(t, "123", 123) + testParseUintSuccess(t, "123456789", 123456789) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_64.go b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_64.go new file mode 100644 index 0000000..09d07ef --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_64.go @@ -0,0 +1,8 @@ +// +build amd64 arm64 ppc64 + +package fasthttp + +const ( + maxIntChars = 18 + maxHexIntChars = 15 +) diff --git a/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_64_test.go b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_64_test.go new file mode 100644 index 0000000..52d9e3e --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_64_test.go @@ -0,0 +1,41 @@ +// +build amd64 arm64 ppc64 + +package fasthttp + +import ( + "testing" +) + +func TestWriteHexInt(t *testing.T) { + testWriteHexInt(t, 0, "0") + testWriteHexInt(t, 1, "1") + testWriteHexInt(t, 0x123, "123") + testWriteHexInt(t, 0x7fffffffffffffff, "7fffffffffffffff") +} + +func TestAppendUint(t *testing.T) { + testAppendUint(t, 0) + testAppendUint(t, 123) + testAppendUint(t, 0x7fffffffffffffff) + + for i := 0; i < 2345; i++ { + testAppendUint(t, i) + } +} + +func TestReadHexIntSuccess(t *testing.T) { + testReadHexIntSuccess(t, "0", 0) + testReadHexIntSuccess(t, "fF", 0xff) + testReadHexIntSuccess(t, "00abc", 0xabc) + testReadHexIntSuccess(t, "7fffffff", 0x7fffffff) + testReadHexIntSuccess(t, "000", 0) + testReadHexIntSuccess(t, "1234ZZZ", 0x1234) + testReadHexIntSuccess(t, "7ffffffffffffff", 0x7ffffffffffffff) +} + +func TestParseUintSuccess(t *testing.T) { + testParseUintSuccess(t, "0", 0) + testParseUintSuccess(t, "123", 123) + testParseUintSuccess(t, "1234567890", 1234567890) + testParseUintSuccess(t, "123456789012345678", 123456789012345678) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_test.go b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_test.go new file mode 100644 index 0000000..b5da44d --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_test.go @@ -0,0 +1,300 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "fmt" + "net" + "testing" + "time" +) + +func TestAppendHTMLEscape(t *testing.T) { + testAppendHTMLEscape(t, "", "") + testAppendHTMLEscape(t, "<", "<") + testAppendHTMLEscape(t, "a", "a") + testAppendHTMLEscape(t, `><"''`, "><"''") + testAppendHTMLEscape(t, "foaxxx", "fo<b x='ss'>a</b>xxx") +} + +func testAppendHTMLEscape(t *testing.T, s, expectedS string) { + buf := AppendHTMLEscapeBytes(nil, []byte(s)) + if string(buf) != expectedS { + t.Fatalf("unexpected html-escaped string %q. Expecting %q. Original string %q", buf, expectedS, s) + } +} + +func TestParseIPv4(t *testing.T) { + testParseIPv4(t, "0.0.0.0", true) + testParseIPv4(t, "255.255.255.255", true) + testParseIPv4(t, "123.45.67.89", true) + + // ipv6 shouldn't work + testParseIPv4(t, "2001:4860:0:2001::68", false) + + // invalid ip + testParseIPv4(t, "foobar", false) + testParseIPv4(t, "1.2.3", false) + testParseIPv4(t, "123.456.789.11", false) +} + +func testParseIPv4(t *testing.T, ipStr string, isValid bool) { + ip, err := ParseIPv4(nil, []byte(ipStr)) + if isValid { + if err != nil { + t.Fatalf("unexpected error when parsing ip %q: %s", ipStr, err) + } + s := string(AppendIPv4(nil, ip)) + if s != ipStr { + t.Fatalf("unexpected ip parsed %q. Expecting %q", s, ipStr) + } + } else { + if err == nil { + t.Fatalf("expecting error when parsing ip %q", ipStr) + } + } +} + +func TestAppendIPv4(t *testing.T) { + testAppendIPv4(t, "0.0.0.0", true) + testAppendIPv4(t, "127.0.0.1", true) + testAppendIPv4(t, "8.8.8.8", true) + testAppendIPv4(t, "123.45.67.89", true) + + // ipv6 shouldn't work + testAppendIPv4(t, "2001:4860:0:2001::68", false) +} + +func testAppendIPv4(t *testing.T, ipStr string, isValid bool) { + ip := net.ParseIP(ipStr) + if ip == nil { + t.Fatalf("cannot parse ip %q", ipStr) + } + s := string(AppendIPv4(nil, ip)) + if isValid { + if s != ipStr { + t.Fatalf("unepxected ip %q. Expecting %q", s, ipStr) + } + } else { + ipStr = "non-v4 ip passed to AppendIPv4" + if s != ipStr { + t.Fatalf("unexpected ip %q. Expecting %q", s, ipStr) + } + } +} + +func testAppendUint(t *testing.T, n int) { + expectedS := fmt.Sprintf("%d", n) + s := AppendUint(nil, n) + if string(s) != expectedS { + t.Fatalf("unexpected uint %q. Expecting %q. n=%d", s, expectedS, n) + } +} + +func testWriteHexInt(t *testing.T, n int, expectedS string) { + var w ByteBuffer + bw := bufio.NewWriter(&w) + if err := writeHexInt(bw, n); err != nil { + t.Fatalf("unexpected error when writing hex %x: %s", n, err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("unexpected error when flushing hex %x: %s", n, err) + } + s := string(w.B) + if s != expectedS { + t.Fatalf("unexpected hex after writing %q. Expected %q", s, expectedS) + } +} + +func TestReadHexIntError(t *testing.T) { + testReadHexIntError(t, "") + testReadHexIntError(t, "ZZZ") + testReadHexIntError(t, "-123") + testReadHexIntError(t, "+434") +} + +func testReadHexIntError(t *testing.T, s string) { + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + n, err := readHexInt(br) + if err == nil { + t.Fatalf("expecting error when reading hex int %q", s) + } + if n >= 0 { + t.Fatalf("unexpected hex value read %d for hex int %q. must be negative", n, s) + } +} + +func testReadHexIntSuccess(t *testing.T, s string, expectedN int) { + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + n, err := readHexInt(br) + if err != nil { + t.Fatalf("unexpected error: %s. s=%q", err, s) + } + if n != expectedN { + t.Fatalf("unexpected hex int %d. Expected %d. s=%q", n, expectedN, s) + } +} + +func TestAppendHTTPDate(t *testing.T) { + d := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + s := string(AppendHTTPDate(nil, d)) + expectedS := "Tue, 10 Nov 2009 23:00:00 GMT" + if s != expectedS { + t.Fatalf("unexpected date %q. Expecting %q", s, expectedS) + } + + b := []byte("prefix") + s = string(AppendHTTPDate(b, d)) + if s[:len(b)] != string(b) { + t.Fatalf("unexpected prefix %q. Expecting %q", s[:len(b)], b) + } + s = s[len(b):] + if s != expectedS { + t.Fatalf("unexpected date %q. Expecting %q", s, expectedS) + } +} + +func TestParseUintError(t *testing.T) { + // empty string + testParseUintError(t, "") + + // negative value + testParseUintError(t, "-123") + + // non-num + testParseUintError(t, "foobar234") + + // non-num chars at the end + testParseUintError(t, "123w") + + // floating point num + testParseUintError(t, "1234.545") + + // too big num + testParseUintError(t, "12345678901234567890") +} + +func TestParseUfloatSuccess(t *testing.T) { + testParseUfloatSuccess(t, "0", 0) + testParseUfloatSuccess(t, "1.", 1.) + testParseUfloatSuccess(t, ".1", 0.1) + testParseUfloatSuccess(t, "123.456", 123.456) + testParseUfloatSuccess(t, "123", 123) + testParseUfloatSuccess(t, "1234e2", 1234e2) + testParseUfloatSuccess(t, "1234E-5", 1234E-5) + testParseUfloatSuccess(t, "1.234e+3", 1.234e+3) +} + +func TestParseUfloatError(t *testing.T) { + // empty num + testParseUfloatError(t, "") + + // negative num + testParseUfloatError(t, "-123.53") + + // non-num chars + testParseUfloatError(t, "123sdfsd") + testParseUfloatError(t, "sdsf234") + testParseUfloatError(t, "sdfdf") + + // non-num chars in exponent + testParseUfloatError(t, "123e3s") + testParseUfloatError(t, "12.3e-op") + testParseUfloatError(t, "123E+SS5") + + // duplicate point + testParseUfloatError(t, "1.3.4") + + // duplicate exponent + testParseUfloatError(t, "123e5e6") + + // missing exponent + testParseUfloatError(t, "123534e") +} + +func testParseUfloatError(t *testing.T, s string) { + n, err := ParseUfloat([]byte(s)) + if err == nil { + t.Fatalf("Expecting error when parsing %q. obtained %f", s, n) + } + if n >= 0 { + t.Fatalf("Expecting negative num instead of %f when parsing %q", n, s) + } +} + +func testParseUfloatSuccess(t *testing.T, s string, expectedF float64) { + f, err := ParseUfloat([]byte(s)) + if err != nil { + t.Fatalf("Unexpected error when parsing %q: %s", s, err) + } + delta := f - expectedF + if delta < 0 { + delta = -delta + } + if delta > expectedF*1e-10 { + t.Fatalf("Unexpected value when parsing %q: %f. Expected %f", s, f, expectedF) + } +} + +func testParseUintError(t *testing.T, s string) { + n, err := ParseUint([]byte(s)) + if err == nil { + t.Fatalf("Expecting error when parsing %q. obtained %d", s, n) + } + if n >= 0 { + t.Fatalf("Unexpected n=%d when parsing %q. Expected negative num", n, s) + } +} + +func testParseUintSuccess(t *testing.T, s string, expectedN int) { + n, err := ParseUint([]byte(s)) + if err != nil { + t.Fatalf("Unexpected error when parsing %q: %s", s, err) + } + if n != expectedN { + t.Fatalf("Unexpected value %d. Expected %d. num=%q", n, expectedN, s) + } +} + +func TestAppendUnquotedArg(t *testing.T) { + testAppendUnquotedArg(t, "", "") + testAppendUnquotedArg(t, "abc", "abc") + testAppendUnquotedArg(t, "тест.abc", "тест.abc") + testAppendUnquotedArg(t, "%D1%82%D0%B5%D1%81%D1%82%20%=&;:", "тест %=&;:") +} + +func testAppendUnquotedArg(t *testing.T, s, expectedS string) { + // test appending to nil + result := AppendUnquotedArg(nil, []byte(s)) + if string(result) != expectedS { + t.Fatalf("Unexpected AppendUnquotedArg(%q)=%q, want %q", s, result, expectedS) + } + + // test appending to prefix + prefix := "prefix" + dst := []byte(prefix) + dst = AppendUnquotedArg(dst, []byte(s)) + if !bytes.HasPrefix(dst, []byte(prefix)) { + t.Fatalf("Unexpected prefix for AppendUnquotedArg(%q)=%q, want %q", s, dst, prefix) + } + result = dst[len(prefix):] + if string(result) != expectedS { + t.Fatalf("Unexpected AppendUnquotedArg(%q)=%q, want %q", s, result, expectedS) + } + + // test in-place appending + result = []byte(s) + result = AppendUnquotedArg(result[:0], result) + if string(result) != expectedS { + t.Fatalf("Unexpected AppendUnquotedArg(%q)=%q, want %q", s, result, expectedS) + } + + // verify AppendQuotedArg <-> AppendUnquotedArg conversion + quotedS := AppendQuotedArg(nil, []byte(s)) + unquotedS := AppendUnquotedArg(nil, quotedS) + if s != string(unquotedS) { + t.Fatalf("Unexpected AppendUnquotedArg(AppendQuotedArg(%q))=%q, want %q", s, unquotedS, s) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_timing_test.go new file mode 100644 index 0000000..24b6a49 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/bytesconv_timing_test.go @@ -0,0 +1,175 @@ +package fasthttp + +import ( + "bufio" + "html" + "net" + "testing" +) + +func BenchmarkAppendHTMLEscape(b *testing.B) { + sOrig := "foobarbazxxxyyyzzz" + sExpected := string(AppendHTMLEscape(nil, sOrig)) + b.RunParallel(func(pb *testing.PB) { + var buf []byte + for pb.Next() { + for i := 0; i < 10; i++ { + buf = AppendHTMLEscape(buf[:0], sOrig) + if string(buf) != sExpected { + b.Fatalf("unexpected escaped string: %s. Expecting %s", buf, sExpected) + } + } + } + }) +} + +func BenchmarkHTMLEscapeString(b *testing.B) { + sOrig := "foobarbazxxxyyyzzz" + sExpected := html.EscapeString(sOrig) + b.RunParallel(func(pb *testing.PB) { + var s string + for pb.Next() { + for i := 0; i < 10; i++ { + s = html.EscapeString(sOrig) + if s != sExpected { + b.Fatalf("unexpected escaped string: %s. Expecting %s", s, sExpected) + } + } + } + }) +} + +func BenchmarkParseIPv4(b *testing.B) { + ipStr := []byte("123.145.167.189") + b.RunParallel(func(pb *testing.PB) { + var ip net.IP + var err error + for pb.Next() { + ip, err = ParseIPv4(ip, ipStr) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + } + }) +} + +func BenchmarkAppendIPv4(b *testing.B) { + ip := net.ParseIP("123.145.167.189") + b.RunParallel(func(pb *testing.PB) { + var buf []byte + for pb.Next() { + buf = AppendIPv4(buf[:0], ip) + } + }) +} + +func BenchmarkInt2HexByte(b *testing.B) { + buf := []int{1, 0xf, 2, 0xd, 3, 0xe, 4, 0xa, 5, 0xb, 6, 0xc, 7, 0xf, 0, 0xf, 6, 0xd, 9, 8, 4, 0x5} + b.RunParallel(func(pb *testing.PB) { + var n int + for pb.Next() { + for _, n = range buf { + int2hexbyte(n) + } + } + }) +} + +func BenchmarkWriteHexInt(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + var w ByteBuffer + bw := bufio.NewWriter(&w) + i := 0 + for pb.Next() { + writeHexInt(bw, i) + i++ + if i > 0x7fffffff { + i = 0 + } + w.Reset() + bw.Reset(&w) + } + }) +} + +func BenchmarkParseUint(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + buf := []byte("1234567") + for pb.Next() { + n, err := ParseUint(buf) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + if n != 1234567 { + b.Fatalf("unexpected result: %d. Expecting %s", n, buf) + } + } + }) +} + +func BenchmarkAppendUint(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + var buf []byte + i := 0 + for pb.Next() { + buf = AppendUint(buf[:0], i) + i++ + if i > 0x7fffffff { + i = 0 + } + } + }) +} + +func BenchmarkLowercaseBytesNoop(b *testing.B) { + src := []byte("foobarbaz_lowercased_all") + b.RunParallel(func(pb *testing.PB) { + s := make([]byte, len(src)) + for pb.Next() { + copy(s, src) + lowercaseBytes(s) + } + }) +} + +func BenchmarkLowercaseBytesAll(b *testing.B) { + src := []byte("FOOBARBAZ_UPPERCASED_ALL") + b.RunParallel(func(pb *testing.PB) { + s := make([]byte, len(src)) + for pb.Next() { + copy(s, src) + lowercaseBytes(s) + } + }) +} + +func BenchmarkLowercaseBytesMixed(b *testing.B) { + src := []byte("Foobarbaz_Uppercased_Mix") + b.RunParallel(func(pb *testing.PB) { + s := make([]byte, len(src)) + for pb.Next() { + copy(s, src) + lowercaseBytes(s) + } + }) +} + +func BenchmarkAppendUnquotedArgFastPath(b *testing.B) { + src := []byte("foobarbaz no quoted chars fdskjsdf jklsdfdfskljd;aflskjdsaf fdsklj fsdkj fsdl kfjsdlk jfsdklj fsdfsdf sdfkflsd") + b.RunParallel(func(pb *testing.PB) { + var dst []byte + for pb.Next() { + dst = AppendUnquotedArg(dst[:0], src) + } + }) +} + +func BenchmarkAppendUnquotedArgSlowPath(b *testing.B) { + src := []byte("D0%B4%20%D0%B0%D0%B2%D0%BB%D0%B4%D1%84%D1%8B%D0%B0%D0%BE%20%D1%84%D0%B2%D0%B6%D0%BB%D0%B4%D1%8B%20%D0%B0%D0%BE") + b.RunParallel(func(pb *testing.PB) { + var dst []byte + for pb.Next() { + dst = AppendUnquotedArg(dst[:0], src) + } + }) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/client.go b/vendor/github.com/erikdubbelboer/fasthttp/client.go new file mode 100644 index 0000000..fccc117 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/client.go @@ -0,0 +1,2207 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Do performs the given http request and fills the given http response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func Do(req *Request, resp *Response) error { + return defaultClient.Do(req, resp) +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return defaultClient.DoTimeout(req, resp, timeout) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return defaultClient.DoDeadline(req, resp, deadline) +} + +// Get appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +func Get(dst []byte, url string) (statusCode int, body []byte, err error) { + return defaultClient.Get(dst, url) +} + +// GetTimeout appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// during the given timeout. +func GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) { + return defaultClient.GetTimeout(dst, url, timeout) +} + +// GetDeadline appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// until the given deadline. +func GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) { + return defaultClient.GetDeadline(dst, url, deadline) +} + +// Post sends POST request to the given url with the given POST arguments. +// +// Response body is appended to dst, which is returned as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// Empty POST body is sent if postArgs is nil. +func Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) { + return defaultClient.Post(dst, url, postArgs) +} + +var defaultClient Client + +// Client implements http client. +// +// Copying Client by value is prohibited. Create new instance instead. +// +// It is safe calling Client methods from concurrently running goroutines. +type Client struct { + noCopy noCopy + + // Client name. Used in User-Agent request header. + // + // Default client name is used if not set. + Name string + + // Callback for establishing new connections to hosts. + // + // Default Dial is used if not set. + Dial DialFunc + + // Attempt to connect to both ipv4 and ipv6 addresses if set to true. + // + // This option is used only if default TCP dialer is used, + // i.e. if Dial is blank. + // + // By default client connects only to ipv4 addresses, + // since unfortunately ipv6 remains broken in many networks worldwide :) + DialDualStack bool + + // TLS config for https connections. + // + // Default TLS config is used if not set. + TLSConfig *tls.Config + + // Maximum number of connections per each host which may be established. + // + // DefaultMaxConnsPerHost is used if not set. + MaxConnsPerHost int + + // Idle keep-alive connections are closed after this duration. + // + // By default idle connections are closed + // after DefaultMaxIdleConnDuration. + MaxIdleConnDuration time.Duration + + // Per-connection buffer size for responses' reading. + // This also limits the maximum header size. + // + // Default buffer size is used if 0. + ReadBufferSize int + + // Per-connection buffer size for requests' writing. + // + // Default buffer size is used if 0. + WriteBufferSize int + + // Maximum duration for full response reading (including body). + // + // By default response read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for full request writing (including body). + // + // By default request write timeout is unlimited. + WriteTimeout time.Duration + + // Maximum response body size. + // + // The client returns ErrBodyTooLarge if this limit is greater than 0 + // and response body is greater than the limit. + // + // By default response body size is unlimited. + MaxResponseBodySize int + + // Header names are passed as-is without normalization + // if this option is set. + // + // Disabled header names' normalization may be useful only for proxying + // responses to other clients expecting case-sensitive + // header names. See https://github.com/valyala/fasthttp/issues/57 + // for details. + // + // By default request and response header names are normalized, i.e. + // The first letter and the first letters following dashes + // are uppercased, while all the other letters are lowercased. + // Examples: + // + // * HOST -> Host + // * content-type -> Content-Type + // * cONTENT-lenGTH -> Content-Length + DisableHeaderNamesNormalizing bool + + mLock sync.Mutex + m map[string]*HostClient + ms map[string]*HostClient +} + +// Get appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +func (c *Client) Get(dst []byte, url string) (statusCode int, body []byte, err error) { + return clientGetURL(dst, url, c) +} + +// GetTimeout appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// during the given timeout. +func (c *Client) GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) { + return clientGetURLTimeout(dst, url, timeout, c) +} + +// GetDeadline appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// until the given deadline. +func (c *Client) GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) { + return clientGetURLDeadline(dst, url, deadline, c) +} + +// Post sends POST request to the given url with the given POST arguments. +// +// Response body is appended to dst, which is returned as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// Empty POST body is sent if postArgs is nil. +func (c *Client) Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) { + return clientPostURL(dst, url, postArgs, c) +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *Client) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return clientDoTimeout(req, resp, timeout, c) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *Client) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return clientDoDeadline(req, resp, deadline, c) +} + +// Do performs the given http request and fills the given http response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// Response is ignored if resp is nil. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *Client) Do(req *Request, resp *Response) error { + uri := req.URI() + host := uri.Host() + + isTLS := false + scheme := uri.Scheme() + if bytes.Equal(scheme, strHTTPS) { + isTLS = true + } else if !bytes.Equal(scheme, strHTTP) { + return fmt.Errorf("unsupported protocol %q. http and https are supported", scheme) + } + + startCleaner := false + + c.mLock.Lock() + m := c.m + if isTLS { + m = c.ms + } + if m == nil { + m = make(map[string]*HostClient) + if isTLS { + c.ms = m + } else { + c.m = m + } + } + hc := m[string(host)] + if hc == nil { + hc = &HostClient{ + Addr: addMissingPort(string(host), isTLS), + Name: c.Name, + Dial: c.Dial, + DialDualStack: c.DialDualStack, + IsTLS: isTLS, + TLSConfig: c.TLSConfig, + MaxConns: c.MaxConnsPerHost, + MaxIdleConnDuration: c.MaxIdleConnDuration, + ReadBufferSize: c.ReadBufferSize, + WriteBufferSize: c.WriteBufferSize, + ReadTimeout: c.ReadTimeout, + WriteTimeout: c.WriteTimeout, + MaxResponseBodySize: c.MaxResponseBodySize, + DisableHeaderNamesNormalizing: c.DisableHeaderNamesNormalizing, + } + m[string(host)] = hc + if len(m) == 1 { + startCleaner = true + } + } + c.mLock.Unlock() + + if startCleaner { + go c.mCleaner(m) + } + + return hc.Do(req, resp) +} + +func (c *Client) mCleaner(m map[string]*HostClient) { + mustStop := false + for { + t := time.Now() + c.mLock.Lock() + for k, v := range m { + if t.Sub(v.LastUseTime()) > time.Minute { + delete(m, k) + } + } + if len(m) == 0 { + mustStop = true + } + c.mLock.Unlock() + + if mustStop { + break + } + time.Sleep(10 * time.Second) + } +} + +// DefaultMaxConnsPerHost is the maximum number of concurrent connections +// http client may establish per host by default (i.e. if +// Client.MaxConnsPerHost isn't set). +const DefaultMaxConnsPerHost = 512 + +// DefaultMaxIdleConnDuration is the default duration before idle keep-alive +// connection is closed. +const DefaultMaxIdleConnDuration = 10 * time.Second + +// DefaultMaxIdemponentCallAttempts is the default idempotent calls attempts count. +const DefaultMaxIdemponentCallAttempts = 5 + +// DialFunc must establish connection to addr. +// +// There is no need in establishing TLS (SSL) connection for https. +// The client automatically converts connection to TLS +// if HostClient.IsTLS is set. +// +// TCP address passed to DialFunc always contains host and port. +// Example TCP addr values: +// +// - foobar.com:80 +// - foobar.com:443 +// - foobar.com:8080 +type DialFunc func(addr string) (net.Conn, error) + +// HostClient balances http requests among hosts listed in Addr. +// +// HostClient may be used for balancing load among multiple upstream hosts. +// While multiple addresses passed to HostClient.Addr may be used for balancing +// load among them, it would be better using LBClient instead, since HostClient +// may unevenly balance load among upstream hosts. +// +// It is forbidden copying HostClient instances. Create new instances instead. +// +// It is safe calling HostClient methods from concurrently running goroutines. +type HostClient struct { + noCopy noCopy + + // Comma-separated list of upstream HTTP server host addresses, + // which are passed to Dial in a round-robin manner. + // + // Each address may contain port if default dialer is used. + // For example, + // + // - foobar.com:80 + // - foobar.com:443 + // - foobar.com:8080 + Addr string + + // Client name. Used in User-Agent request header. + Name string + + // Callback for establishing new connection to the host. + // + // Default Dial is used if not set. + Dial DialFunc + + // Attempt to connect to both ipv4 and ipv6 host addresses + // if set to true. + // + // This option is used only if default TCP dialer is used, + // i.e. if Dial is blank. + // + // By default client connects only to ipv4 addresses, + // since unfortunately ipv6 remains broken in many networks worldwide :) + DialDualStack bool + + // Whether to use TLS (aka SSL or HTTPS) for host connections. + IsTLS bool + + // Optional TLS config. + TLSConfig *tls.Config + + // Maximum number of connections which may be established to all hosts + // listed in Addr. + // + // DefaultMaxConnsPerHost is used if not set. + MaxConns int + + // Keep-alive connections are closed after this duration. + // + // By default connection duration is unlimited. + MaxConnDuration time.Duration + + // Idle keep-alive connections are closed after this duration. + // + // By default idle connections are closed + // after DefaultMaxIdleConnDuration. + MaxIdleConnDuration time.Duration + + // Maximum number of attempts for idempotent calls + // + // DefaultMaxIdemponentCallAttempts is used if not set. + MaxIdemponentCallAttempts int + + // Per-connection buffer size for responses' reading. + // This also limits the maximum header size. + // + // Default buffer size is used if 0. + ReadBufferSize int + + // Per-connection buffer size for requests' writing. + // + // Default buffer size is used if 0. + WriteBufferSize int + + // Maximum duration for full response reading (including body). + // + // By default response read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for full request writing (including body). + // + // By default request write timeout is unlimited. + WriteTimeout time.Duration + + // Maximum response body size. + // + // The client returns ErrBodyTooLarge if this limit is greater than 0 + // and response body is greater than the limit. + // + // By default response body size is unlimited. + MaxResponseBodySize int + + // Header names are passed as-is without normalization + // if this option is set. + // + // Disabled header names' normalization may be useful only for proxying + // responses to other clients expecting case-sensitive + // header names. See https://github.com/valyala/fasthttp/issues/57 + // for details. + // + // By default request and response header names are normalized, i.e. + // The first letter and the first letters following dashes + // are uppercased, while all the other letters are lowercased. + // Examples: + // + // * HOST -> Host + // * content-type -> Content-Type + // * cONTENT-lenGTH -> Content-Length + DisableHeaderNamesNormalizing bool + + clientName atomic.Value + lastUseTime uint32 + + connsLock sync.Mutex + connsCount int + conns []*clientConn + + addrsLock sync.Mutex + addrs []string + addrIdx uint32 + + tlsConfigMap map[string]*tls.Config + tlsConfigMapLock sync.Mutex + + readerPool sync.Pool + writerPool sync.Pool + + pendingRequests uint64 + + connsCleanerRun bool +} + +type clientConn struct { + c net.Conn + + createdTime time.Time + lastUseTime time.Time + + lastReadDeadlineTime time.Time + lastWriteDeadlineTime time.Time +} + +var startTimeUnix = time.Now().Unix() + +// LastUseTime returns time the client was last used +func (c *HostClient) LastUseTime() time.Time { + n := atomic.LoadUint32(&c.lastUseTime) + return time.Unix(startTimeUnix+int64(n), 0) +} + +// Get appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +func (c *HostClient) Get(dst []byte, url string) (statusCode int, body []byte, err error) { + return clientGetURL(dst, url, c) +} + +// GetTimeout appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// during the given timeout. +func (c *HostClient) GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) { + return clientGetURLTimeout(dst, url, timeout, c) +} + +// GetDeadline appends url contents to dst and returns it as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// until the given deadline. +func (c *HostClient) GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) { + return clientGetURLDeadline(dst, url, deadline, c) +} + +// Post sends POST request to the given url with the given POST arguments. +// +// Response body is appended to dst, which is returned as body. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// New body buffer is allocated if dst is nil. +// +// Empty POST body is sent if postArgs is nil. +func (c *HostClient) Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) { + return clientPostURL(dst, url, postArgs, c) +} + +type clientDoer interface { + Do(req *Request, resp *Response) error +} + +func clientGetURL(dst []byte, url string, c clientDoer) (statusCode int, body []byte, err error) { + req := AcquireRequest() + + statusCode, body, err = doRequestFollowRedirects(req, dst, url, c) + + ReleaseRequest(req) + return statusCode, body, err +} + +func clientGetURLTimeout(dst []byte, url string, timeout time.Duration, c clientDoer) (statusCode int, body []byte, err error) { + deadline := time.Now().Add(timeout) + return clientGetURLDeadline(dst, url, deadline, c) +} + +type clientURLResponse struct { + statusCode int + body []byte + err error +} + +func clientGetURLDeadline(dst []byte, url string, deadline time.Time, c clientDoer) (statusCode int, body []byte, err error) { + timeout := -time.Since(deadline) + if timeout <= 0 { + return 0, dst, ErrTimeout + } + + var ch chan clientURLResponse + chv := clientURLResponseChPool.Get() + if chv == nil { + chv = make(chan clientURLResponse, 1) + } + ch = chv.(chan clientURLResponse) + + req := AcquireRequest() + + // Note that the request continues execution on ErrTimeout until + // client-specific ReadTimeout exceeds. This helps limiting load + // on slow hosts by MaxConns* concurrent requests. + // + // Without this 'hack' the load on slow host could exceed MaxConns* + // concurrent requests, since timed out requests on client side + // usually continue execution on the host. + go func() { + statusCodeCopy, bodyCopy, errCopy := doRequestFollowRedirects(req, dst, url, c) + ch <- clientURLResponse{ + statusCode: statusCodeCopy, + body: bodyCopy, + err: errCopy, + } + }() + + tc := acquireTimer(timeout) + select { + case resp := <-ch: + ReleaseRequest(req) + clientURLResponseChPool.Put(chv) + statusCode = resp.statusCode + body = resp.body + err = resp.err + case <-tc.C: + body = dst + err = ErrTimeout + } + releaseTimer(tc) + + return statusCode, body, err +} + +var clientURLResponseChPool sync.Pool + +func clientPostURL(dst []byte, url string, postArgs *Args, c clientDoer) (statusCode int, body []byte, err error) { + req := AcquireRequest() + req.Header.SetMethodBytes(strPost) + req.Header.SetContentTypeBytes(strPostArgsContentType) + if postArgs != nil { + postArgs.WriteTo(req.BodyWriter()) + } + + statusCode, body, err = doRequestFollowRedirects(req, dst, url, c) + + ReleaseRequest(req) + return statusCode, body, err +} + +var ( + errMissingLocation = errors.New("missing Location header for http redirect") + errTooManyRedirects = errors.New("too many redirects detected when doing the request") +) + +const maxRedirectsCount = 16 + +func doRequestFollowRedirects(req *Request, dst []byte, url string, c clientDoer) (statusCode int, body []byte, err error) { + resp := AcquireResponse() + bodyBuf := resp.bodyBuffer() + resp.keepBodyBuffer = true + oldBody := bodyBuf.B + bodyBuf.B = dst + scheme := req.uri.Scheme() + req.schemaUpdate = false + + redirectsCount := 0 + for { + // In case redirect to different scheme + if redirectsCount > 0 && !bytes.Equal(scheme, req.uri.Scheme()) { + if strings.HasPrefix(url, string(strHTTPS)) { + req.isTLS = true + req.uri.SetSchemeBytes(strHTTPS) + } else { + req.isTLS = false + req.uri.SetSchemeBytes(strHTTP) + } + scheme = req.uri.Scheme() + req.schemaUpdate = true + } + + req.parsedURI = false + req.Header.host = req.Header.host[:0] + req.SetRequestURI(url) + + if err = c.Do(req, resp); err != nil { + break + } + statusCode = resp.Header.StatusCode() + if statusCode != StatusMovedPermanently && statusCode != StatusFound && statusCode != StatusSeeOther { + break + } + + redirectsCount++ + if redirectsCount > maxRedirectsCount { + err = errTooManyRedirects + break + } + location := resp.Header.peek(strLocation) + if len(location) == 0 { + err = errMissingLocation + break + } + url = getRedirectURL(url, location) + } + + body = bodyBuf.B + bodyBuf.B = oldBody + resp.keepBodyBuffer = false + ReleaseResponse(resp) + + return statusCode, body, err +} + +func getRedirectURL(baseURL string, location []byte) string { + u := AcquireURI() + u.Update(baseURL) + u.UpdateBytes(location) + redirectURL := u.String() + ReleaseURI(u) + return redirectURL +} + +var ( + requestPool sync.Pool + responsePool sync.Pool +) + +// AcquireRequest returns an empty Request instance from request pool. +// +// The returned Request instance may be passed to ReleaseRequest when it is +// no longer needed. This allows Request recycling, reduces GC pressure +// and usually improves performance. +func AcquireRequest() *Request { + v := requestPool.Get() + if v == nil { + return &Request{} + } + return v.(*Request) +} + +// ReleaseRequest returns req acquired via AcquireRequest to request pool. +// +// It is forbidden accessing req and/or its' members after returning +// it to request pool. +func ReleaseRequest(req *Request) { + req.Reset() + requestPool.Put(req) +} + +// AcquireResponse returns an empty Response instance from response pool. +// +// The returned Response instance may be passed to ReleaseResponse when it is +// no longer needed. This allows Response recycling, reduces GC pressure +// and usually improves performance. +func AcquireResponse() *Response { + v := responsePool.Get() + if v == nil { + return &Response{} + } + return v.(*Response) +} + +// ReleaseResponse return resp acquired via AcquireResponse to response pool. +// +// It is forbidden accessing resp and/or its' members after returning +// it to response pool. +func ReleaseResponse(resp *Response) { + resp.Reset() + responsePool.Put(resp) +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// ErrNoFreeConns is returned if all HostClient.MaxConns connections +// to the host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *HostClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return clientDoTimeout(req, resp, timeout, c) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// ErrNoFreeConns is returned if all HostClient.MaxConns connections +// to the host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *HostClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return clientDoDeadline(req, resp, deadline, c) +} + +func clientDoTimeout(req *Request, resp *Response, timeout time.Duration, c clientDoer) error { + deadline := time.Now().Add(timeout) + return clientDoDeadline(req, resp, deadline, c) +} + +func clientDoDeadline(req *Request, resp *Response, deadline time.Time, c clientDoer) error { + timeout := -time.Since(deadline) + if timeout <= 0 { + return ErrTimeout + } + + var ch chan error + chv := errorChPool.Get() + if chv == nil { + chv = make(chan error, 1) + } + ch = chv.(chan error) + + // Make req and resp copies, since on timeout they no longer + // may be accessed. + reqCopy := AcquireRequest() + req.copyToSkipBody(reqCopy) + swapRequestBody(req, reqCopy) + respCopy := AcquireResponse() + + // Note that the request continues execution on ErrTimeout until + // client-specific ReadTimeout exceeds. This helps limiting load + // on slow hosts by MaxConns* concurrent requests. + // + // Without this 'hack' the load on slow host could exceed MaxConns* + // concurrent requests, since timed out requests on client side + // usually continue execution on the host. + go func() { + ch <- c.Do(reqCopy, respCopy) + }() + + tc := acquireTimer(timeout) + var err error + select { + case err = <-ch: + if resp != nil { + respCopy.copyToSkipBody(resp) + swapResponseBody(resp, respCopy) + } + swapRequestBody(reqCopy, req) + ReleaseResponse(respCopy) + ReleaseRequest(reqCopy) + errorChPool.Put(chv) + case <-tc.C: + err = ErrTimeout + } + releaseTimer(tc) + + return err +} + +var errorChPool sync.Pool + +// Do performs the given http request and sets the corresponding response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrNoFreeConns is returned if all HostClient.MaxConns connections +// to the host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *HostClient) Do(req *Request, resp *Response) error { + var err error + var retry bool + maxAttempts := c.MaxIdemponentCallAttempts + if maxAttempts <= 0 { + maxAttempts = DefaultMaxIdemponentCallAttempts + } + attempts := 0 + + atomic.AddUint64(&c.pendingRequests, 1) + for { + retry, err = c.do(req, resp) + if err == nil || !retry { + break + } + + if !isIdempotent(req) { + // Retry non-idempotent requests if the server closes + // the connection before sending the response. + // + // This case is possible if the server closes the idle + // keep-alive connection on timeout. + // + // Apache and nginx usually do this. + if err != io.EOF { + break + } + } + attempts++ + if attempts >= maxAttempts { + break + } + } + atomic.AddUint64(&c.pendingRequests, ^uint64(0)) + + if err == io.EOF { + err = ErrConnectionClosed + } + return err +} + +// PendingRequests returns the current number of requests the client +// is executing. +// +// This function may be used for balancing load among multiple HostClient +// instances. +func (c *HostClient) PendingRequests() int { + return int(atomic.LoadUint64(&c.pendingRequests)) +} + +func isIdempotent(req *Request) bool { + return req.Header.IsGet() || req.Header.IsHead() || req.Header.IsPut() +} + +func (c *HostClient) do(req *Request, resp *Response) (bool, error) { + nilResp := false + if resp == nil { + nilResp = true + resp = AcquireResponse() + } + + ok, err := c.doNonNilReqResp(req, resp) + + if nilResp { + ReleaseResponse(resp) + } + + return ok, err +} + +func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error) { + if req == nil { + panic("BUG: req cannot be nil") + } + if resp == nil { + panic("BUG: resp cannot be nil") + } + + atomic.StoreUint32(&c.lastUseTime, uint32(time.Now().Unix()-startTimeUnix)) + + // Free up resources occupied by response before sending the request, + // so the GC may reclaim these resources (e.g. response body). + resp.Reset() + + // If we detected a redirect to another schema + if req.schemaUpdate { + if bytes.Equal(req.URI().Scheme(), strHTTPS) { + c.IsTLS = true + c.Addr = addMissingPort(string(req.Host()), true) + c.addrIdx = 0 + c.addrs = nil + } else { + c.IsTLS = false + c.Addr = addMissingPort(string(req.Host()), false) + c.addrIdx = 0 + c.addrs = nil + } + req.schemaUpdate = false + } + + cc, err := c.acquireConn() + if err != nil { + return false, err + } + conn := cc.c + + if c.WriteTimeout > 0 { + // Optimization: update write deadline only if more than 25% + // of the last write deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + currentTime := time.Now() + if currentTime.Sub(cc.lastWriteDeadlineTime) > (c.WriteTimeout >> 2) { + if err = conn.SetWriteDeadline(currentTime.Add(c.WriteTimeout)); err != nil { + c.closeConn(cc) + return true, err + } + cc.lastWriteDeadlineTime = currentTime + } + } + + resetConnection := false + if c.MaxConnDuration > 0 && time.Since(cc.createdTime) > c.MaxConnDuration && !req.ConnectionClose() { + req.SetConnectionClose() + resetConnection = true + } + + userAgentOld := req.Header.UserAgent() + if len(userAgentOld) == 0 { + req.Header.userAgent = c.getClientName() + } + bw := c.acquireWriter(conn) + err = req.Write(bw) + + if resetConnection { + req.Header.ResetConnectionClose() + } + + if err == nil { + err = bw.Flush() + } + if err != nil { + c.releaseWriter(bw) + c.closeConn(cc) + return true, err + } + c.releaseWriter(bw) + + if c.ReadTimeout > 0 { + // Optimization: update read deadline only if more than 25% + // of the last read deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + currentTime := time.Now() + if currentTime.Sub(cc.lastReadDeadlineTime) > (c.ReadTimeout >> 2) { + if err = conn.SetReadDeadline(currentTime.Add(c.ReadTimeout)); err != nil { + c.closeConn(cc) + return true, err + } + cc.lastReadDeadlineTime = currentTime + } + } + + if !req.Header.IsGet() && req.Header.IsHead() { + resp.SkipBody = true + } + if c.DisableHeaderNamesNormalizing { + resp.Header.DisableNormalizing() + } + + br := c.acquireReader(conn) + if err = resp.ReadLimitBody(br, c.MaxResponseBodySize); err != nil { + c.releaseReader(br) + c.closeConn(cc) + return true, err + } + c.releaseReader(br) + + if resetConnection || req.ConnectionClose() || resp.ConnectionClose() { + c.closeConn(cc) + } else { + c.releaseConn(cc) + } + + return false, err +} + +var ( + // ErrNoFreeConns is returned when no free connections available + // to the given host. + // + // Increase the allowed number of connections per host if you + // see this error. + ErrNoFreeConns = errors.New("no free connections available to host") + + // ErrTimeout is returned from timed out calls. + ErrTimeout = errors.New("timeout") + + // ErrConnectionClosed may be returned from client methods if the server + // closes connection before returning the first response byte. + // + // If you see this error, then either fix the server by returning + // 'Connection: close' response header before closing the connection + // or add 'Connection: close' request header before sending requests + // to broken server. + ErrConnectionClosed = errors.New("the server closed connection before returning the first response byte. " + + "Make sure the server returns 'Connection: close' response header before closing the connection") +) + +func (c *HostClient) acquireConn() (*clientConn, error) { + var cc *clientConn + createConn := false + startCleaner := false + + var n int + c.connsLock.Lock() + n = len(c.conns) + if n == 0 { + maxConns := c.MaxConns + if maxConns <= 0 { + maxConns = DefaultMaxConnsPerHost + } + if c.connsCount < maxConns { + c.connsCount++ + createConn = true + if !c.connsCleanerRun { + startCleaner = true + c.connsCleanerRun = true + } + } + } else { + n-- + cc = c.conns[n] + c.conns[n] = nil + c.conns = c.conns[:n] + } + c.connsLock.Unlock() + + if cc != nil { + return cc, nil + } + if !createConn { + return nil, ErrNoFreeConns + } + + if startCleaner { + go c.connsCleaner() + } + + conn, err := c.dialHostHard() + if err != nil { + c.decConnsCount() + return nil, err + } + cc = acquireClientConn(conn) + + return cc, nil +} + +func (c *HostClient) connsCleaner() { + var ( + scratch []*clientConn + maxIdleConnDuration = c.MaxIdleConnDuration + ) + if maxIdleConnDuration <= 0 { + maxIdleConnDuration = DefaultMaxIdleConnDuration + } + for { + currentTime := time.Now() + + // Determine idle connections to be closed. + c.connsLock.Lock() + conns := c.conns + n := len(conns) + i := 0 + for i < n && currentTime.Sub(conns[i].lastUseTime) > maxIdleConnDuration { + i++ + } + sleepFor := maxIdleConnDuration + if i < n { + // + 1 so we actually sleep past the expiration time and not up to it. + // Otherwise the > check above would still fail. + sleepFor = maxIdleConnDuration - currentTime.Sub(conns[i].lastUseTime) + 1 + } + scratch = append(scratch[:0], conns[:i]...) + if i > 0 { + m := copy(conns, conns[i:]) + for i = m; i < n; i++ { + conns[i] = nil + } + c.conns = conns[:m] + } + c.connsLock.Unlock() + + // Close idle connections. + for i, cc := range scratch { + c.closeConn(cc) + scratch[i] = nil + } + + // Determine whether to stop the connsCleaner. + c.connsLock.Lock() + mustStop := c.connsCount == 0 + if mustStop { + c.connsCleanerRun = false + } + c.connsLock.Unlock() + if mustStop { + break + } + + time.Sleep(sleepFor) + } +} + +func (c *HostClient) closeConn(cc *clientConn) { + c.decConnsCount() + cc.c.Close() + releaseClientConn(cc) +} + +func (c *HostClient) decConnsCount() { + c.connsLock.Lock() + c.connsCount-- + c.connsLock.Unlock() +} + +func acquireClientConn(conn net.Conn) *clientConn { + v := clientConnPool.Get() + if v == nil { + v = &clientConn{} + } + cc := v.(*clientConn) + cc.c = conn + cc.createdTime = time.Now() + return cc +} + +func releaseClientConn(cc *clientConn) { + cc.c = nil + clientConnPool.Put(cc) +} + +var clientConnPool sync.Pool + +func (c *HostClient) releaseConn(cc *clientConn) { + cc.lastUseTime = time.Now() + c.connsLock.Lock() + c.conns = append(c.conns, cc) + c.connsLock.Unlock() +} + +func (c *HostClient) acquireWriter(conn net.Conn) *bufio.Writer { + v := c.writerPool.Get() + if v == nil { + n := c.WriteBufferSize + if n <= 0 { + n = defaultWriteBufferSize + } + return bufio.NewWriterSize(conn, n) + } + bw := v.(*bufio.Writer) + bw.Reset(conn) + return bw +} + +func (c *HostClient) releaseWriter(bw *bufio.Writer) { + c.writerPool.Put(bw) +} + +func (c *HostClient) acquireReader(conn net.Conn) *bufio.Reader { + v := c.readerPool.Get() + if v == nil { + n := c.ReadBufferSize + if n <= 0 { + n = defaultReadBufferSize + } + return bufio.NewReaderSize(conn, n) + } + br := v.(*bufio.Reader) + br.Reset(conn) + return br +} + +func (c *HostClient) releaseReader(br *bufio.Reader) { + c.readerPool.Put(br) +} + +func newClientTLSConfig(c *tls.Config, addr string) *tls.Config { + if c == nil { + c = &tls.Config{} + } else { + // TODO: substitute this with c.Clone() after go1.8 becomes mainstream :) + c = &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + + // Do not copy ClientAuth, since it is server-related stuff + // Do not copy ClientCAs, since it is server-related stuff + + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + + // Do not copy PreferServerCipherSuites - this is server stuff + + SessionTicketsDisabled: c.SessionTicketsDisabled, + + // Do not copy SessionTicketKey - this is server stuff + + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } + } + + if c.ClientSessionCache == nil { + c.ClientSessionCache = tls.NewLRUClientSessionCache(0) + } + + if len(c.ServerName) == 0 { + serverName := tlsServerName(addr) + if serverName == "*" { + c.InsecureSkipVerify = true + } else { + c.ServerName = serverName + } + } + return c +} + +func tlsServerName(addr string) string { + if !strings.Contains(addr, ":") { + return addr + } + host, _, err := net.SplitHostPort(addr) + if err != nil { + return "*" + } + return host +} + +func (c *HostClient) nextAddr() string { + c.addrsLock.Lock() + if c.addrs == nil { + c.addrs = strings.Split(c.Addr, ",") + } + addr := c.addrs[0] + if len(c.addrs) > 1 { + addr = c.addrs[c.addrIdx%uint32(len(c.addrs))] + c.addrIdx++ + } + c.addrsLock.Unlock() + return addr +} + +func (c *HostClient) dialHostHard() (conn net.Conn, err error) { + // attempt to dial all the available hosts before giving up. + + c.addrsLock.Lock() + n := len(c.addrs) + c.addrsLock.Unlock() + + if n == 0 { + // It looks like c.addrs isn't initialized yet. + n = 1 + } + + timeout := c.ReadTimeout + c.WriteTimeout + if timeout <= 0 { + timeout = DefaultDialTimeout + } + deadline := time.Now().Add(timeout) + for n > 0 { + addr := c.nextAddr() + tlsConfig := c.cachedTLSConfig(addr) + conn, err = dialAddr(addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig) + if err == nil { + return conn, nil + } + if time.Since(deadline) >= 0 { + break + } + n-- + } + return nil, err +} + +func (c *HostClient) cachedTLSConfig(addr string) *tls.Config { + if !c.IsTLS { + return nil + } + + c.tlsConfigMapLock.Lock() + if c.tlsConfigMap == nil { + c.tlsConfigMap = make(map[string]*tls.Config) + } + cfg := c.tlsConfigMap[addr] + if cfg == nil { + cfg = newClientTLSConfig(c.TLSConfig, addr) + c.tlsConfigMap[addr] = cfg + } + c.tlsConfigMapLock.Unlock() + + return cfg +} + +func dialAddr(addr string, dial DialFunc, dialDualStack, isTLS bool, tlsConfig *tls.Config) (net.Conn, error) { + if dial == nil { + if dialDualStack { + dial = DialDualStack + } else { + dial = Dial + } + addr = addMissingPort(addr, isTLS) + } + conn, err := dial(addr) + if err != nil { + return nil, err + } + if conn == nil { + panic("BUG: DialFunc returned (nil, nil)") + } + if isTLS { + conn = tls.Client(conn, tlsConfig) + } + return conn, nil +} + +func (c *HostClient) getClientName() (clientName []byte) { + v := c.clientName.Load() + if v == nil { + clientName = []byte(c.Name) + if len(clientName) == 0 { + clientName = defaultUserAgent + } + c.clientName.Store(clientName) + } else { + clientName = v.([]byte) + } + return +} + +func addMissingPort(addr string, isTLS bool) string { + n := strings.Index(addr, ":") + if n >= 0 { + return addr + } + port := 80 + if isTLS { + port = 443 + } + return fmt.Sprintf("%s:%d", addr, port) +} + +// PipelineClient pipelines requests over a limited set of concurrent +// connections to the given Addr. +// +// This client may be used in highly loaded HTTP-based RPC systems for reducing +// context switches and network level overhead. +// See https://en.wikipedia.org/wiki/HTTP_pipelining for details. +// +// It is forbidden copying PipelineClient instances. Create new instances +// instead. +// +// It is safe calling PipelineClient methods from concurrently running +// goroutines. +type PipelineClient struct { + noCopy noCopy + + // Address of the host to connect to. + Addr string + + // The maximum number of concurrent connections to the Addr. + // + // A sinle connection is used by default. + MaxConns int + + // The maximum number of pending pipelined requests over + // a single connection to Addr. + // + // DefaultMaxPendingRequests is used by default. + MaxPendingRequests int + + // The maximum delay before sending pipelined requests as a batch + // to the server. + // + // By default requests are sent immediately to the server. + MaxBatchDelay time.Duration + + // Callback for connection establishing to the host. + // + // Default Dial is used if not set. + Dial DialFunc + + // Attempt to connect to both ipv4 and ipv6 host addresses + // if set to true. + // + // This option is used only if default TCP dialer is used, + // i.e. if Dial is blank. + // + // By default client connects only to ipv4 addresses, + // since unfortunately ipv6 remains broken in many networks worldwide :) + DialDualStack bool + + // Whether to use TLS (aka SSL or HTTPS) for host connections. + IsTLS bool + + // Optional TLS config. + TLSConfig *tls.Config + + // Idle connection to the host is closed after this duration. + // + // By default idle connection is closed after + // DefaultMaxIdleConnDuration. + MaxIdleConnDuration time.Duration + + // Buffer size for responses' reading. + // This also limits the maximum header size. + // + // Default buffer size is used if 0. + ReadBufferSize int + + // Buffer size for requests' writing. + // + // Default buffer size is used if 0. + WriteBufferSize int + + // Maximum duration for full response reading (including body). + // + // By default response read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for full request writing (including body). + // + // By default request write timeout is unlimited. + WriteTimeout time.Duration + + // Logger for logging client errors. + // + // By default standard logger from log package is used. + Logger Logger + + connClients []*pipelineConnClient + connClientsLock sync.Mutex +} + +type pipelineConnClient struct { + noCopy noCopy + + Addr string + MaxPendingRequests int + MaxBatchDelay time.Duration + Dial DialFunc + DialDualStack bool + IsTLS bool + TLSConfig *tls.Config + MaxIdleConnDuration time.Duration + ReadBufferSize int + WriteBufferSize int + ReadTimeout time.Duration + WriteTimeout time.Duration + Logger Logger + + workPool sync.Pool + + chLock sync.Mutex + chW chan *pipelineWork + chR chan *pipelineWork + + tlsConfigLock sync.Mutex + tlsConfig *tls.Config +} + +type pipelineWork struct { + reqCopy Request + respCopy Response + req *Request + resp *Response + t *time.Timer + deadline time.Time + err error + done chan struct{} +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *PipelineClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return c.DoDeadline(req, resp, time.Now().Add(timeout)) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *PipelineClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return c.getConnClient().DoDeadline(req, resp, deadline) +} + +func (c *pipelineConnClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + c.init() + + timeout := -time.Since(deadline) + if timeout < 0 { + return ErrTimeout + } + + w := acquirePipelineWork(&c.workPool, timeout) + w.req = &w.reqCopy + w.resp = &w.respCopy + + // Make a copy of the request in order to avoid data races on timeouts + req.copyToSkipBody(&w.reqCopy) + swapRequestBody(req, &w.reqCopy) + + // Put the request to outgoing queue + select { + case c.chW <- w: + // Fast path: len(c.ch) < cap(c.ch) + default: + // Slow path + select { + case c.chW <- w: + case <-w.t.C: + releasePipelineWork(&c.workPool, w) + return ErrTimeout + } + } + + // Wait for the response + var err error + select { + case <-w.done: + if resp != nil { + w.respCopy.copyToSkipBody(resp) + swapResponseBody(resp, &w.respCopy) + } + err = w.err + releasePipelineWork(&c.workPool, w) + case <-w.t.C: + err = ErrTimeout + } + + return err +} + +// Do performs the given http request and sets the corresponding response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *PipelineClient) Do(req *Request, resp *Response) error { + return c.getConnClient().Do(req, resp) +} + +func (c *pipelineConnClient) Do(req *Request, resp *Response) error { + c.init() + + w := acquirePipelineWork(&c.workPool, 0) + w.req = req + if resp != nil { + w.resp = resp + } else { + w.resp = &w.respCopy + } + + // Put the request to outgoing queue + select { + case c.chW <- w: + default: + // Try substituting the oldest w with the current one. + select { + case wOld := <-c.chW: + wOld.err = ErrPipelineOverflow + wOld.done <- struct{}{} + default: + } + select { + case c.chW <- w: + default: + releasePipelineWork(&c.workPool, w) + return ErrPipelineOverflow + } + } + + // Wait for the response + <-w.done + err := w.err + + releasePipelineWork(&c.workPool, w) + + return err +} + +func (c *PipelineClient) getConnClient() *pipelineConnClient { + c.connClientsLock.Lock() + cc := c.getConnClientUnlocked() + c.connClientsLock.Unlock() + return cc +} + +func (c *PipelineClient) getConnClientUnlocked() *pipelineConnClient { + if len(c.connClients) == 0 { + return c.newConnClient() + } + + // Return the client with the minimum number of pending requests. + minCC := c.connClients[0] + minReqs := minCC.PendingRequests() + if minReqs == 0 { + return minCC + } + for i := 1; i < len(c.connClients); i++ { + cc := c.connClients[i] + reqs := cc.PendingRequests() + if reqs == 0 { + return cc + } + if reqs < minReqs { + minCC = cc + minReqs = reqs + } + } + + maxConns := c.MaxConns + if maxConns <= 0 { + maxConns = 1 + } + if len(c.connClients) < maxConns { + return c.newConnClient() + } + return minCC +} + +func (c *PipelineClient) newConnClient() *pipelineConnClient { + cc := &pipelineConnClient{ + Addr: c.Addr, + MaxPendingRequests: c.MaxPendingRequests, + MaxBatchDelay: c.MaxBatchDelay, + Dial: c.Dial, + DialDualStack: c.DialDualStack, + IsTLS: c.IsTLS, + TLSConfig: c.TLSConfig, + MaxIdleConnDuration: c.MaxIdleConnDuration, + ReadBufferSize: c.ReadBufferSize, + WriteBufferSize: c.WriteBufferSize, + ReadTimeout: c.ReadTimeout, + WriteTimeout: c.WriteTimeout, + Logger: c.Logger, + } + c.connClients = append(c.connClients, cc) + return cc +} + +// ErrPipelineOverflow may be returned from PipelineClient.Do* +// if the requests' queue is overflown. +var ErrPipelineOverflow = errors.New("pipelined requests' queue has been overflown. Increase MaxConns and/or MaxPendingRequests") + +// DefaultMaxPendingRequests is the default value +// for PipelineClient.MaxPendingRequests. +const DefaultMaxPendingRequests = 1024 + +func (c *pipelineConnClient) init() { + c.chLock.Lock() + if c.chR == nil { + maxPendingRequests := c.MaxPendingRequests + if maxPendingRequests <= 0 { + maxPendingRequests = DefaultMaxPendingRequests + } + c.chR = make(chan *pipelineWork, maxPendingRequests) + if c.chW == nil { + c.chW = make(chan *pipelineWork, maxPendingRequests) + } + go func() { + if err := c.worker(); err != nil { + c.logger().Printf("error in PipelineClient(%q): %s", c.Addr, err) + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + // Throttle client reconnections on temporary errors + time.Sleep(time.Second) + } + } + + c.chLock.Lock() + // Do not reset c.chW to nil, since it may contain + // pending requests, which could be served on the next + // connection to the host. + c.chR = nil + c.chLock.Unlock() + }() + } + c.chLock.Unlock() +} + +func (c *pipelineConnClient) worker() error { + tlsConfig := c.cachedTLSConfig() + conn, err := dialAddr(c.Addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig) + if err != nil { + return err + } + + // Start reader and writer + stopW := make(chan struct{}) + doneW := make(chan error) + go func() { + doneW <- c.writer(conn, stopW) + }() + stopR := make(chan struct{}) + doneR := make(chan error) + go func() { + doneR <- c.reader(conn, stopR) + }() + + // Wait until reader and writer are stopped + select { + case err = <-doneW: + conn.Close() + close(stopR) + <-doneR + case err = <-doneR: + conn.Close() + close(stopW) + <-doneW + } + + // Notify pending readers + for len(c.chR) > 0 { + w := <-c.chR + w.err = errPipelineConnStopped + w.done <- struct{}{} + } + + return err +} + +func (c *pipelineConnClient) cachedTLSConfig() *tls.Config { + if !c.IsTLS { + return nil + } + + c.tlsConfigLock.Lock() + cfg := c.tlsConfig + if cfg == nil { + cfg = newClientTLSConfig(c.TLSConfig, c.Addr) + c.tlsConfig = cfg + } + c.tlsConfigLock.Unlock() + + return cfg +} + +func (c *pipelineConnClient) writer(conn net.Conn, stopCh <-chan struct{}) error { + writeBufferSize := c.WriteBufferSize + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + bw := bufio.NewWriterSize(conn, writeBufferSize) + defer bw.Flush() + chR := c.chR + chW := c.chW + writeTimeout := c.WriteTimeout + + maxIdleConnDuration := c.MaxIdleConnDuration + if maxIdleConnDuration <= 0 { + maxIdleConnDuration = DefaultMaxIdleConnDuration + } + maxBatchDelay := c.MaxBatchDelay + + var ( + stopTimer = time.NewTimer(time.Hour) + flushTimer = time.NewTimer(time.Hour) + flushTimerCh <-chan time.Time + instantTimerCh = make(chan time.Time) + + w *pipelineWork + err error + + lastWriteDeadlineTime time.Time + ) + close(instantTimerCh) + for { + againChW: + select { + case w = <-chW: + // Fast path: len(chW) > 0 + default: + // Slow path + stopTimer.Reset(maxIdleConnDuration) + select { + case w = <-chW: + case <-stopTimer.C: + return nil + case <-stopCh: + return nil + case <-flushTimerCh: + if err = bw.Flush(); err != nil { + return err + } + flushTimerCh = nil + goto againChW + } + } + + if !w.deadline.IsZero() && time.Since(w.deadline) >= 0 { + w.err = ErrTimeout + w.done <- struct{}{} + continue + } + + if writeTimeout > 0 { + // Optimization: update write deadline only if more than 25% + // of the last write deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + currentTime := time.Now() + if currentTime.Sub(lastWriteDeadlineTime) > (writeTimeout >> 2) { + if err = conn.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + lastWriteDeadlineTime = currentTime + } + } + if err = w.req.Write(bw); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + if flushTimerCh == nil && (len(chW) == 0 || len(chR) == cap(chR)) { + if maxBatchDelay > 0 { + flushTimer.Reset(maxBatchDelay) + flushTimerCh = flushTimer.C + } else { + flushTimerCh = instantTimerCh + } + } + + againChR: + select { + case chR <- w: + // Fast path: len(chR) < cap(chR) + default: + // Slow path + select { + case chR <- w: + case <-stopCh: + w.err = errPipelineConnStopped + w.done <- struct{}{} + return nil + case <-flushTimerCh: + if err = bw.Flush(); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + flushTimerCh = nil + goto againChR + } + } + } +} + +func (c *pipelineConnClient) reader(conn net.Conn, stopCh <-chan struct{}) error { + readBufferSize := c.ReadBufferSize + if readBufferSize <= 0 { + readBufferSize = defaultReadBufferSize + } + br := bufio.NewReaderSize(conn, readBufferSize) + chR := c.chR + readTimeout := c.ReadTimeout + + var ( + w *pipelineWork + err error + + lastReadDeadlineTime time.Time + ) + for { + select { + case w = <-chR: + // Fast path: len(chR) > 0 + default: + // Slow path + select { + case w = <-chR: + case <-stopCh: + return nil + } + } + + if readTimeout > 0 { + // Optimization: update read deadline only if more than 25% + // of the last read deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + currentTime := time.Now() + if currentTime.Sub(lastReadDeadlineTime) > (readTimeout >> 2) { + if err = conn.SetReadDeadline(currentTime.Add(readTimeout)); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + lastReadDeadlineTime = currentTime + } + } + if err = w.resp.Read(br); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + + w.done <- struct{}{} + } +} + +func (c *pipelineConnClient) logger() Logger { + if c.Logger != nil { + return c.Logger + } + return defaultLogger +} + +// PendingRequests returns the current number of pending requests pipelined +// to the server. +// +// This number may exceed MaxPendingRequests*MaxConns by up to two times, since +// each connection to the server may keep up to MaxPendingRequests requests +// in the queue before sending them to the server. +// +// This function may be used for balancing load among multiple PipelineClient +// instances. +func (c *PipelineClient) PendingRequests() int { + c.connClientsLock.Lock() + n := 0 + for _, cc := range c.connClients { + n += cc.PendingRequests() + } + c.connClientsLock.Unlock() + return n +} + +func (c *pipelineConnClient) PendingRequests() int { + c.init() + + c.chLock.Lock() + n := len(c.chR) + len(c.chW) + c.chLock.Unlock() + return n +} + +var errPipelineConnStopped = errors.New("pipeline connection has been stopped") + +func acquirePipelineWork(pool *sync.Pool, timeout time.Duration) *pipelineWork { + v := pool.Get() + if v == nil { + v = &pipelineWork{ + done: make(chan struct{}, 1), + } + } + w := v.(*pipelineWork) + if timeout > 0 { + if w.t == nil { + w.t = time.NewTimer(timeout) + } else { + w.t.Reset(timeout) + } + w.deadline = time.Now().Add(timeout) + } else { + w.deadline = zeroTime + } + return w +} + +func releasePipelineWork(pool *sync.Pool, w *pipelineWork) { + if w.t != nil { + w.t.Stop() + } + w.reqCopy.Reset() + w.respCopy.Reset() + w.req = nil + w.resp = nil + w.err = nil + pool.Put(w) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/client_example_test.go b/vendor/github.com/erikdubbelboer/fasthttp/client_example_test.go new file mode 100644 index 0000000..a806201 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/client_example_test.go @@ -0,0 +1,39 @@ +package fasthttp_test + +import ( + "log" + + "github.com/erikdubbelboer/fasthttp" +) + +func ExampleHostClient() { + // Perpare a client, which fetches webpages via HTTP proxy listening + // on the localhost:8080. + c := &fasthttp.HostClient{ + Addr: "localhost:8080", + } + + // Fetch google page via local proxy. + statusCode, body, err := c.Get(nil, "http://google.com/foo/bar") + if err != nil { + log.Fatalf("Error when loading google page through local proxy: %s", err) + } + if statusCode != fasthttp.StatusOK { + log.Fatalf("Unexpected status code: %d. Expecting %d", statusCode, fasthttp.StatusOK) + } + useResponseBody(body) + + // Fetch foobar page via local proxy. Reuse body buffer. + statusCode, body, err = c.Get(body, "http://foobar.com/google/com") + if err != nil { + log.Fatalf("Error when loading foobar page through local proxy: %s", err) + } + if statusCode != fasthttp.StatusOK { + log.Fatalf("Unexpected status code: %d. Expecting %d", statusCode, fasthttp.StatusOK) + } + useResponseBody(body) +} + +func useResponseBody(body []byte) { + // Do something with body :) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/client_test.go b/vendor/github.com/erikdubbelboer/fasthttp/client_test.go new file mode 100644 index 0000000..988a7a8 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/client_test.go @@ -0,0 +1,1418 @@ +package fasthttp + +import ( + "bufio" + "crypto/tls" + "fmt" + "io" + "net" + "net/url" + "os" + "runtime" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/erikdubbelboer/fasthttp/fasthttputil" +) + +func TestClientPostArgs(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + + s := &Server{ + Handler: func(ctx *RequestCtx) { + body := ctx.Request.Body() + if len(body) == 0 { + return + } + ctx.Write(body) + }, + } + + go s.Serve(ln) + + c := &Client{ + Dial: func(addr string) (net.Conn, error) { + return ln.Dial() + }, + } + + req, res := AcquireRequest(), AcquireResponse() + args := req.PostArgs() + + args.Add("addhttp2", "support") + args.Add("fast", "http") + + req.Header.SetMethod("POST") + req.SetRequestURI("http://make.fasthttp.great?again") + + err := c.Do(req, res) + if err != nil { + t.Fatal(err) + } + if len(res.Body()) == 0 { + t.Fatal("cannot set args as body") + } +} + +func TestClientUserAgent(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.Write([]byte("response")) + }, + } + go s.Serve(ln) + + userAgent := "I'm not fasthttp" + c := &Client{ + Name: userAgent, + Dial: func(addr string) (net.Conn, error) { + return ln.Dial() + }, + } + req := AcquireRequest() + res := AcquireResponse() + + req.SetRequestURI("http://do.not.worry?we.are.going.to.make.fasthttp.great.again") + + err := c.Do(req, res) + if err != nil { + t.Fatal(err) + } + if ua := string(req.Header.UserAgent()); ua != userAgent { + t.Fatalf("User-Agent defers %s <> %s", ua, userAgent) + } +} + +func TestClientDoWithCustomHeaders(t *testing.T) { + // make sure that the client sends all the request headers and body. + ln := fasthttputil.NewInmemoryListener() + c := &Client{ + Dial: func(addr string) (net.Conn, error) { + return ln.Dial() + }, + } + + uri := "/foo/bar/baz?a=b&cd=12" + headers := map[string]string{ + "Foo": "bar", + "Host": "xxx.com", + "Content-Type": "asdfsdf", + "a-b-c-d-f": "", + } + body := "request body" + + ch := make(chan error) + go func() { + conn, err := ln.Accept() + if err != nil { + ch <- fmt.Errorf("cannot accept client connection: %s", err) + return + } + br := bufio.NewReader(conn) + + var req Request + if err = req.Read(br); err != nil { + ch <- fmt.Errorf("cannot read client request: %s", err) + return + } + if string(req.Header.Method()) != "POST" { + ch <- fmt.Errorf("unexpected request method: %q. Expecting %q", req.Header.Method(), "POST") + return + } + reqURI := req.RequestURI() + if string(reqURI) != uri { + ch <- fmt.Errorf("unexpected request uri: %q. Expecting %q", reqURI, uri) + return + } + for k, v := range headers { + hv := req.Header.Peek(k) + if string(hv) != v { + ch <- fmt.Errorf("unexpected value for header %q: %q. Expecting %q", k, hv, v) + return + } + } + cl := req.Header.ContentLength() + if cl != len(body) { + ch <- fmt.Errorf("unexpected content-length %d. Expecting %d", cl, len(body)) + return + } + reqBody := req.Body() + if string(reqBody) != body { + ch <- fmt.Errorf("unexpected request body: %q. Expecting %q", reqBody, body) + return + } + + var resp Response + bw := bufio.NewWriter(conn) + if err = resp.Write(bw); err != nil { + ch <- fmt.Errorf("cannot send response: %s", err) + return + } + if err = bw.Flush(); err != nil { + ch <- fmt.Errorf("cannot flush response: %s", err) + return + } + + ch <- nil + }() + + var req Request + req.Header.SetMethod("POST") + req.SetRequestURI(uri) + for k, v := range headers { + req.Header.Set(k, v) + } + req.SetBodyString(body) + + var resp Response + + err := c.DoTimeout(&req, &resp, time.Second) + if err != nil { + t.Fatalf("error when doing request: %s", err) + } + + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } +} + +func TestPipelineClientDoSerial(t *testing.T) { + testPipelineClientDoConcurrent(t, 1, 0, 0) +} + +func TestPipelineClientDoConcurrent(t *testing.T) { + testPipelineClientDoConcurrent(t, 10, 0, 1) +} + +func TestPipelineClientDoBatchDelayConcurrent(t *testing.T) { + testPipelineClientDoConcurrent(t, 10, 5*time.Millisecond, 1) +} + +func TestPipelineClientDoBatchDelayConcurrentMultiConn(t *testing.T) { + testPipelineClientDoConcurrent(t, 10, 5*time.Millisecond, 3) +} + +func testPipelineClientDoConcurrent(t *testing.T, concurrency int, maxBatchDelay time.Duration, maxConns int) { + ln := fasthttputil.NewInmemoryListener() + + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.WriteString("OK") + }, + } + + serverStopCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverStopCh) + }() + + c := &PipelineClient{ + Dial: func(addr string) (net.Conn, error) { + return ln.Dial() + }, + MaxConns: maxConns, + MaxPendingRequests: concurrency, + MaxBatchDelay: maxBatchDelay, + Logger: &customLogger{}, + } + + clientStopCh := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + testPipelineClientDo(t, c) + clientStopCh <- struct{}{} + }() + } + + for i := 0; i < concurrency; i++ { + select { + case <-clientStopCh: + case <-time.After(3 * time.Second): + t.Fatalf("timeout") + } + } + + if c.PendingRequests() != 0 { + t.Fatalf("unexpected number of pending requests: %d. Expecting zero", c.PendingRequests()) + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + select { + case <-serverStopCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func testPipelineClientDo(t *testing.T, c *PipelineClient) { + var err error + req := AcquireRequest() + req.SetRequestURI("http://foobar/baz") + resp := AcquireResponse() + for i := 0; i < 10; i++ { + if i&1 == 0 { + err = c.DoTimeout(req, resp, time.Second) + } else { + err = c.Do(req, resp) + } + if err != nil { + if err == ErrPipelineOverflow { + time.Sleep(10 * time.Millisecond) + continue + } + t.Fatalf("unexpected error on iteration %d: %s", i, err) + } + if resp.StatusCode() != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusOK) + } + body := string(resp.Body()) + if body != "OK" { + t.Fatalf("unexpected body: %q. Expecting %q", body, "OK") + } + + // sleep for a while, so the connection to the host may expire. + if i%5 == 0 { + time.Sleep(30 * time.Millisecond) + } + } + ReleaseRequest(req) + ReleaseResponse(resp) +} + +func TestClientDoTimeoutDisableNormalizing(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.Response.Header.Set("foo-BAR", "baz") + }, + DisableHeaderNamesNormalizing: true, + } + + serverStopCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverStopCh) + }() + + c := &Client{ + Dial: func(addr string) (net.Conn, error) { + return ln.Dial() + }, + DisableHeaderNamesNormalizing: true, + } + + var req Request + req.SetRequestURI("http://aaaai.com/bsdf?sddfsd") + var resp Response + for i := 0; i < 5; i++ { + if err := c.DoTimeout(&req, &resp, time.Second); err != nil { + t.Fatalf("unexpected error: %s", err) + } + hv := resp.Header.Peek("foo-BAR") + if string(hv) != "baz" { + t.Fatalf("unexpected header value: %q. Expecting %q", hv, "baz") + } + hv = resp.Header.Peek("Foo-Bar") + if len(hv) > 0 { + t.Fatalf("unexpected non-empty header value %q", hv) + } + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + select { + case <-serverStopCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestHostClientPendingRequests(t *testing.T) { + const concurrency = 10 + doneCh := make(chan struct{}) + readyCh := make(chan struct{}, concurrency) + s := &Server{ + Handler: func(ctx *RequestCtx) { + readyCh <- struct{}{} + <-doneCh + }, + } + ln := fasthttputil.NewInmemoryListener() + serverStopCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverStopCh) + }() + + c := &HostClient{ + Addr: "foobar", + Dial: func(addr string) (net.Conn, error) { + return ln.Dial() + }, + } + + pendingRequests := c.PendingRequests() + if pendingRequests != 0 { + t.Fatalf("non-zero pendingRequests: %d", pendingRequests) + } + + resultCh := make(chan error, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + req := AcquireRequest() + req.SetRequestURI("http://foobar/baz") + resp := AcquireResponse() + + if err := c.DoTimeout(req, resp, 10*time.Second); err != nil { + resultCh <- fmt.Errorf("unexpected error: %s", err) + return + } + + if resp.StatusCode() != StatusOK { + resultCh <- fmt.Errorf("unexpected status code %d. Expecting %d", resp.StatusCode(), StatusOK) + return + } + resultCh <- nil + }() + } + + // wait while all the requests reach server + for i := 0; i < concurrency; i++ { + select { + case <-readyCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } + + pendingRequests = c.PendingRequests() + if pendingRequests != concurrency { + t.Fatalf("unexpected pendingRequests: %d. Expecting %d", pendingRequests, concurrency) + } + + // unblock request handlers on the server and wait until all the requests are finished. + close(doneCh) + for i := 0; i < concurrency; i++ { + select { + case err := <-resultCh: + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } + + pendingRequests = c.PendingRequests() + if pendingRequests != 0 { + t.Fatalf("non-zero pendingRequests: %d", pendingRequests) + } + + // stop the server + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + select { + case <-serverStopCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestHostClientMaxConnsWithDeadline(t *testing.T) { + var ( + emptyBodyCount uint8 + ln = fasthttputil.NewInmemoryListener() + timeout = 50 * time.Millisecond + wg sync.WaitGroup + ) + + s := &Server{ + Handler: func(ctx *RequestCtx) { + if len(ctx.PostBody()) == 0 { + emptyBodyCount++ + } + + ctx.WriteString("foo") + }, + } + serverStopCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverStopCh) + }() + + c := &HostClient{ + Addr: "foobar", + Dial: func(addr string) (net.Conn, error) { + return ln.Dial() + }, + MaxConns: 1, + } + + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + req := AcquireRequest() + req.SetRequestURI("http://foobar/baz") + req.Header.SetMethod("POST") + req.SetBodyString("bar") + resp := AcquireResponse() + + for { + if err := c.DoDeadline(req, resp, time.Now().Add(timeout)); err != nil { + if err == ErrNoFreeConns { + time.Sleep(time.Millisecond) + continue + } + t.Fatalf("unexpected error: %s", err) + } + break + } + + if resp.StatusCode() != StatusOK { + t.Fatalf("unexpected status code %d. Expecting %d", resp.StatusCode(), StatusOK) + } + + body := resp.Body() + if string(body) != "foo" { + t.Fatalf("unexpected body %q. Expecting %q", body, "abcd") + } + }() + } + wg.Wait() + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + select { + case <-serverStopCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + if emptyBodyCount > 0 { + t.Fatalf("at least one request body was empty") + } +} + +func TestHostClientMaxConnDuration(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + + connectionCloseCount := uint32(0) + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.WriteString("abcd") + if ctx.Request.ConnectionClose() { + atomic.AddUint32(&connectionCloseCount, 1) + } + }, + } + serverStopCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverStopCh) + }() + + c := &HostClient{ + Addr: "foobar", + Dial: func(addr string) (net.Conn, error) { + return ln.Dial() + }, + MaxConnDuration: 10 * time.Millisecond, + } + + for i := 0; i < 5; i++ { + statusCode, body, err := c.Get(nil, "http://aaaa.com/bbb/cc") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if statusCode != StatusOK { + t.Fatalf("unexpected status code %d. Expecting %d", statusCode, StatusOK) + } + if string(body) != "abcd" { + t.Fatalf("unexpected body %q. Expecting %q", body, "abcd") + } + time.Sleep(c.MaxConnDuration) + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + select { + case <-serverStopCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + if connectionCloseCount == 0 { + t.Fatalf("expecting at least one 'Connection: close' request header") + } +} + +func TestHostClientMultipleAddrs(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.Write(ctx.Host()) + ctx.SetConnectionClose() + }, + } + serverStopCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverStopCh) + }() + + dialsCount := make(map[string]int) + c := &HostClient{ + Addr: "foo,bar,baz", + Dial: func(addr string) (net.Conn, error) { + dialsCount[addr]++ + return ln.Dial() + }, + } + + for i := 0; i < 9; i++ { + statusCode, body, err := c.Get(nil, "http://foobar/baz/aaa?bbb=ddd") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if statusCode != StatusOK { + t.Fatalf("unexpected status code %d. Expecting %d", statusCode, StatusOK) + } + if string(body) != "foobar" { + t.Fatalf("unexpected body %q. Expecting %q", body, "foobar") + } + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + select { + case <-serverStopCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + if len(dialsCount) != 3 { + t.Fatalf("unexpected dialsCount size %d. Expecting 3", len(dialsCount)) + } + for _, k := range []string{"foo", "bar", "baz"} { + if dialsCount[k] != 3 { + t.Fatalf("unexpected dialsCount for %q. Expecting 3", k) + } + } +} + +func TestClientFollowRedirects(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + switch string(ctx.Path()) { + case "/foo": + u := ctx.URI() + u.Update("/xy?z=wer") + ctx.Redirect(u.String(), StatusFound) + case "/xy": + u := ctx.URI() + u.Update("/bar") + ctx.Redirect(u.String(), StatusFound) + default: + ctx.Success("text/plain", ctx.Path()) + } + }, + } + ln := fasthttputil.NewInmemoryListener() + + serverStopCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverStopCh) + }() + + c := &HostClient{ + Addr: "xxx", + Dial: func(addr string) (net.Conn, error) { + return ln.Dial() + }, + } + + for i := 0; i < 10; i++ { + statusCode, body, err := c.GetTimeout(nil, "http://xxx/foo", time.Second) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if statusCode != StatusOK { + t.Fatalf("unexpected status code: %d", statusCode) + } + if string(body) != "/bar" { + t.Fatalf("unexpected response %q. Expecting %q", body, "/bar") + } + } + + for i := 0; i < 10; i++ { + statusCode, body, err := c.Get(nil, "http://xxx/aaab/sss") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if statusCode != StatusOK { + t.Fatalf("unexpected status code: %d", statusCode) + } + if string(body) != "/aaab/sss" { + t.Fatalf("unexpected response %q. Expecting %q", body, "/aaab/sss") + } + } +} + +func TestClientGetTimeoutSuccess(t *testing.T) { + s := startEchoServer(t, "tcp", "127.0.0.1:") + defer s.Stop() + + testClientGetTimeoutSuccess(t, &defaultClient, "http://"+s.Addr(), 100) +} + +func TestClientGetTimeoutSuccessConcurrent(t *testing.T) { + s := startEchoServer(t, "tcp", "127.0.0.1:") + defer s.Stop() + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testClientGetTimeoutSuccess(t, &defaultClient, "http://"+s.Addr(), 100) + }() + } + wg.Wait() +} + +func TestClientDoTimeoutSuccess(t *testing.T) { + s := startEchoServer(t, "tcp", "127.0.0.1:") + defer s.Stop() + + testClientDoTimeoutSuccess(t, &defaultClient, "http://"+s.Addr(), 100) +} + +func TestClientDoTimeoutSuccessConcurrent(t *testing.T) { + s := startEchoServer(t, "tcp", "127.0.0.1:") + defer s.Stop() + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testClientDoTimeoutSuccess(t, &defaultClient, "http://"+s.Addr(), 100) + }() + } + wg.Wait() +} + +func TestClientGetTimeoutError(t *testing.T) { + c := &Client{ + Dial: func(addr string) (net.Conn, error) { + return &readTimeoutConn{t: time.Second}, nil + }, + } + + testClientGetTimeoutError(t, c, 100) +} + +func TestClientGetTimeoutErrorConcurrent(t *testing.T) { + c := &Client{ + Dial: func(addr string) (net.Conn, error) { + return &readTimeoutConn{t: time.Second}, nil + }, + MaxConnsPerHost: 1000, + } + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testClientGetTimeoutError(t, c, 100) + }() + } + wg.Wait() +} + +func TestClientDoTimeoutError(t *testing.T) { + c := &Client{ + Dial: func(addr string) (net.Conn, error) { + return &readTimeoutConn{t: time.Second}, nil + }, + } + + testClientDoTimeoutError(t, c, 100) +} + +func TestClientDoTimeoutErrorConcurrent(t *testing.T) { + c := &Client{ + Dial: func(addr string) (net.Conn, error) { + return &readTimeoutConn{t: time.Second}, nil + }, + MaxConnsPerHost: 1000, + } + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testClientDoTimeoutError(t, c, 100) + }() + } + wg.Wait() +} + +func testClientDoTimeoutError(t *testing.T, c *Client, n int) { + var req Request + var resp Response + req.SetRequestURI("http://foobar.com/baz") + for i := 0; i < n; i++ { + err := c.DoTimeout(&req, &resp, time.Millisecond) + if err == nil { + t.Fatalf("expecting error") + } + if err != ErrTimeout { + t.Fatalf("unexpected error: %s. Expecting %s", err, ErrTimeout) + } + } +} + +func testClientGetTimeoutError(t *testing.T, c *Client, n int) { + buf := make([]byte, 10) + for i := 0; i < n; i++ { + statusCode, body, err := c.GetTimeout(buf, "http://foobar.com/baz", time.Millisecond) + if err == nil { + t.Fatalf("expecting error") + } + if err != ErrTimeout { + t.Fatalf("unexpected error: %s. Expecting %s", err, ErrTimeout) + } + if statusCode != 0 { + t.Fatalf("unexpected statusCode=%d. Expecting %d", statusCode, 0) + } + if body == nil { + t.Fatalf("body must be non-nil") + } + } +} + +type readTimeoutConn struct { + net.Conn + t time.Duration +} + +func (r *readTimeoutConn) Read(p []byte) (int, error) { + time.Sleep(r.t) + return 0, io.EOF +} + +func (r *readTimeoutConn) Write(p []byte) (int, error) { + return len(p), nil +} + +func (r *readTimeoutConn) Close() error { + return nil +} + +func TestClientNonIdempotentRetry(t *testing.T) { + dialsCount := 0 + c := &Client{ + Dial: func(addr string) (net.Conn, error) { + dialsCount++ + switch dialsCount { + case 1, 2: + return &readErrorConn{}, nil + case 3: + return &singleReadConn{ + s: "HTTP/1.1 345 OK\r\nContent-Type: foobar\r\nContent-Length: 7\r\n\r\n0123456", + }, nil + default: + t.Fatalf("unexpected number of dials: %d", dialsCount) + } + panic("unreachable") + }, + } + + // This POST must succeed, since the readErrorConn closes + // the connection before sending any response. + // So the client must retry non-idempotent request. + dialsCount = 0 + statusCode, body, err := c.Post(nil, "http://foobar/a/b", nil) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if statusCode != 345 { + t.Fatalf("unexpected status code: %d. Expecting 345", statusCode) + } + if string(body) != "0123456" { + t.Fatalf("unexpected body: %q. Expecting %q", body, "0123456") + } + + // Verify that idempotent GET succeeds. + dialsCount = 0 + statusCode, body, err = c.Get(nil, "http://foobar/a/b") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if statusCode != 345 { + t.Fatalf("unexpected status code: %d. Expecting 345", statusCode) + } + if string(body) != "0123456" { + t.Fatalf("unexpected body: %q. Expecting %q", body, "0123456") + } +} + +func TestClientIdempotentRequest(t *testing.T) { + dialsCount := 0 + c := &Client{ + Dial: func(addr string) (net.Conn, error) { + dialsCount++ + switch dialsCount { + case 1: + return &singleReadConn{ + s: "invalid response", + }, nil + case 2: + return &writeErrorConn{}, nil + case 3: + return &readErrorConn{}, nil + case 4: + return &singleReadConn{ + s: "HTTP/1.1 345 OK\r\nContent-Type: foobar\r\nContent-Length: 7\r\n\r\n0123456", + }, nil + default: + t.Fatalf("unexpected number of dials: %d", dialsCount) + } + panic("unreachable") + }, + } + + // idempotent GET must succeed. + statusCode, body, err := c.Get(nil, "http://foobar/a/b") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if statusCode != 345 { + t.Fatalf("unexpected status code: %d. Expecting 345", statusCode) + } + if string(body) != "0123456" { + t.Fatalf("unexpected body: %q. Expecting %q", body, "0123456") + } + + var args Args + + // non-idempotent POST must fail on incorrect singleReadConn + dialsCount = 0 + _, _, err = c.Post(nil, "http://foobar/a/b", &args) + if err == nil { + t.Fatalf("expecting error") + } + + // non-idempotent POST must fail on incorrect singleReadConn + dialsCount = 0 + _, _, err = c.Post(nil, "http://foobar/a/b", nil) + if err == nil { + t.Fatalf("expecting error") + } +} + +type writeErrorConn struct { + net.Conn +} + +func (w *writeErrorConn) Write(p []byte) (int, error) { + return 1, fmt.Errorf("error") +} + +func (w *writeErrorConn) Close() error { + return nil +} + +type readErrorConn struct { + net.Conn +} + +func (r *readErrorConn) Read(p []byte) (int, error) { + return 0, fmt.Errorf("error") +} + +func (r *readErrorConn) Write(p []byte) (int, error) { + return len(p), nil +} + +func (r *readErrorConn) Close() error { + return nil +} + +type singleReadConn struct { + net.Conn + s string + n int +} + +func (r *singleReadConn) Read(p []byte) (int, error) { + if len(r.s) == r.n { + return 0, io.EOF + } + n := copy(p, []byte(r.s[r.n:])) + r.n += n + return n, nil +} + +func (r *singleReadConn) Write(p []byte) (int, error) { + return len(p), nil +} + +func (r *singleReadConn) Close() error { + return nil +} + +func TestClientHTTPSInvalidServerName(t *testing.T) { + sHTTPS := startEchoServerTLS(t, "tcp", "127.0.0.1:") + defer sHTTPS.Stop() + + var c Client + + for i := 0; i < 10; i++ { + _, _, err := c.GetTimeout(nil, "https://"+sHTTPS.Addr(), time.Second) + if err == nil { + t.Fatalf("expecting TLS error") + } + } +} + +func TestClientHTTPSConcurrent(t *testing.T) { + sHTTP := startEchoServer(t, "tcp", "127.0.0.1:") + defer sHTTP.Stop() + + sHTTPS := startEchoServerTLS(t, "tcp", "127.0.0.1:") + defer sHTTPS.Stop() + + c := &Client{ + TLSConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + + var wg sync.WaitGroup + for i := 0; i < 4; i++ { + wg.Add(1) + addr := "http://" + sHTTP.Addr() + if i&1 != 0 { + addr = "https://" + sHTTPS.Addr() + } + go func() { + defer wg.Done() + testClientGet(t, c, addr, 20) + testClientPost(t, c, addr, 10) + }() + } + wg.Wait() +} + +func TestClientManyServers(t *testing.T) { + var addrs []string + for i := 0; i < 10; i++ { + s := startEchoServer(t, "tcp", "127.0.0.1:") + defer s.Stop() + addrs = append(addrs, s.Addr()) + } + + var wg sync.WaitGroup + for i := 0; i < 4; i++ { + wg.Add(1) + addr := "http://" + addrs[i] + go func() { + defer wg.Done() + testClientGet(t, &defaultClient, addr, 20) + testClientPost(t, &defaultClient, addr, 10) + }() + } + wg.Wait() +} + +func TestClientGet(t *testing.T) { + s := startEchoServer(t, "tcp", "127.0.0.1:") + defer s.Stop() + + testClientGet(t, &defaultClient, "http://"+s.Addr(), 100) +} + +func TestClientPost(t *testing.T) { + s := startEchoServer(t, "tcp", "127.0.0.1:") + defer s.Stop() + + testClientPost(t, &defaultClient, "http://"+s.Addr(), 100) +} + +func TestClientConcurrent(t *testing.T) { + s := startEchoServer(t, "tcp", "127.0.0.1:") + defer s.Stop() + + addr := "http://" + s.Addr() + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testClientGet(t, &defaultClient, addr, 30) + testClientPost(t, &defaultClient, addr, 10) + }() + } + wg.Wait() +} + +func skipIfNotUnix(tb testing.TB) { + switch runtime.GOOS { + case "android", "nacl", "plan9", "windows": + tb.Skipf("%s does not support unix sockets", runtime.GOOS) + } + if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { + tb.Skip("iOS does not support unix, unixgram") + } +} + +func TestHostClientGet(t *testing.T) { + skipIfNotUnix(t) + addr := "TestHostClientGet.unix" + s := startEchoServer(t, "unix", addr) + defer s.Stop() + c := createEchoClient(t, "unix", addr) + + testHostClientGet(t, c, 100) +} + +func TestHostClientPost(t *testing.T) { + skipIfNotUnix(t) + addr := "./TestHostClientPost.unix" + s := startEchoServer(t, "unix", addr) + defer s.Stop() + c := createEchoClient(t, "unix", addr) + + testHostClientPost(t, c, 100) +} + +func TestHostClientConcurrent(t *testing.T) { + skipIfNotUnix(t) + addr := "./TestHostClientConcurrent.unix" + s := startEchoServer(t, "unix", addr) + defer s.Stop() + c := createEchoClient(t, "unix", addr) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testHostClientGet(t, c, 30) + testHostClientPost(t, c, 10) + }() + } + wg.Wait() +} + +func testClientGet(t *testing.T, c clientGetter, addr string, n int) { + var buf []byte + for i := 0; i < n; i++ { + uri := fmt.Sprintf("%s/foo/%d?bar=baz", addr, i) + statusCode, body, err := c.Get(buf, uri) + buf = body + if err != nil { + t.Fatalf("unexpected error when doing http request: %s", err) + } + if statusCode != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", statusCode, StatusOK) + } + resultURI := string(body) + if resultURI != uri { + t.Fatalf("unexpected uri %q. Expecting %q", resultURI, uri) + } + } +} + +func testClientDoTimeoutSuccess(t *testing.T, c *Client, addr string, n int) { + var req Request + var resp Response + + for i := 0; i < n; i++ { + uri := fmt.Sprintf("%s/foo/%d?bar=baz", addr, i) + req.SetRequestURI(uri) + if err := c.DoTimeout(&req, &resp, time.Second); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode() != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusOK) + } + resultURI := string(resp.Body()) + if strings.HasPrefix(uri, "https") { + resultURI = uri[:5] + resultURI[4:] + } + if resultURI != uri { + t.Fatalf("unexpected uri %q. Expecting %q", resultURI, uri) + } + } +} + +func testClientGetTimeoutSuccess(t *testing.T, c *Client, addr string, n int) { + var buf []byte + for i := 0; i < n; i++ { + uri := fmt.Sprintf("%s/foo/%d?bar=baz", addr, i) + statusCode, body, err := c.GetTimeout(buf, uri, time.Second) + buf = body + if err != nil { + t.Fatalf("unexpected error when doing http request: %s", err) + } + if statusCode != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", statusCode, StatusOK) + } + resultURI := string(body) + if strings.HasPrefix(uri, "https") { + resultURI = uri[:5] + resultURI[4:] + } + if resultURI != uri { + t.Fatalf("unexpected uri %q. Expecting %q", resultURI, uri) + } + } +} + +func testClientPost(t *testing.T, c clientPoster, addr string, n int) { + var buf []byte + var args Args + for i := 0; i < n; i++ { + uri := fmt.Sprintf("%s/foo/%d?bar=baz", addr, i) + args.Set("xx", fmt.Sprintf("yy%d", i)) + args.Set("zzz", fmt.Sprintf("qwe_%d", i)) + argsS := args.String() + statusCode, body, err := c.Post(buf, uri, &args) + buf = body + if err != nil { + t.Fatalf("unexpected error when doing http request: %s", err) + } + if statusCode != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", statusCode, StatusOK) + } + s := string(body) + if s != argsS { + t.Fatalf("unexpected response %q. Expecting %q", s, argsS) + } + } +} + +func testHostClientGet(t *testing.T, c *HostClient, n int) { + testClientGet(t, c, "http://google.com", n) +} + +func testHostClientPost(t *testing.T, c *HostClient, n int) { + testClientPost(t, c, "http://post-host.com", n) +} + +type clientPoster interface { + Post(dst []byte, uri string, postArgs *Args) (int, []byte, error) +} + +type clientGetter interface { + Get(dst []byte, uri string) (int, []byte, error) +} + +func createEchoClient(t *testing.T, network, addr string) *HostClient { + return &HostClient{ + Addr: addr, + Dial: func(addr string) (net.Conn, error) { + return net.Dial(network, addr) + }, + } +} + +type testEchoServer struct { + s *Server + ln net.Listener + ch chan struct{} + t *testing.T +} + +func (s *testEchoServer) Stop() { + s.ln.Close() + select { + case <-s.ch: + case <-time.After(time.Second): + s.t.Fatalf("timeout when waiting for server close") + } +} + +func (s *testEchoServer) Addr() string { + return s.ln.Addr().String() +} + +func startEchoServerTLS(t *testing.T, network, addr string) *testEchoServer { + return startEchoServerExt(t, network, addr, true) +} + +func startEchoServer(t *testing.T, network, addr string) *testEchoServer { + return startEchoServerExt(t, network, addr, false) +} + +func startEchoServerExt(t *testing.T, network, addr string, isTLS bool) *testEchoServer { + if network == "unix" { + os.Remove(addr) + } + var ln net.Listener + var err error + if isTLS { + certFile := "./ssl-cert-snakeoil.pem" + keyFile := "./ssl-cert-snakeoil.key" + cert, err1 := tls.LoadX509KeyPair(certFile, keyFile) + if err1 != nil { + t.Fatalf("Cannot load TLS certificate: %s", err1) + } + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + ln, err = tls.Listen(network, addr, tlsConfig) + } else { + ln, err = net.Listen(network, addr) + } + if err != nil { + t.Fatalf("cannot listen %q: %s", addr, err) + } + + s := &Server{ + Handler: func(ctx *RequestCtx) { + if ctx.IsGet() { + ctx.Success("text/plain", ctx.URI().FullURI()) + } else if ctx.IsPost() { + ctx.PostArgs().WriteTo(ctx) + } + }, + } + ch := make(chan struct{}) + go func() { + err := s.Serve(ln) + if err != nil { + t.Fatalf("unexpected error returned from Serve(): %s", err) + } + close(ch) + }() + return &testEchoServer{ + s: s, + ln: ln, + ch: ch, + t: t, + } +} + +func TestHostClientRedirectChangingSchema(t *testing.T) { + sHTTPS := testHostClientRedirectChangingSchemaServer(t, "tcp", "127.0.0.1:0", true, "") + defer sHTTPS.Stop() + sHTTP := testHostClientRedirectChangingSchemaServer(t, "tcp", "127.0.0.1:0", false, sHTTPS.Addr()) + defer sHTTP.Stop() + + destURL := "http://" + sHTTP.Addr() + "/baz" + + urlParsed, err := url.Parse(destURL) + if err != nil { + fmt.Println(err) + return + } + + var reqClient *HostClient + + reqClient = &HostClient{ + Addr: urlParsed.Host, + TLSConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + + statusCode, _, err := reqClient.GetTimeout(nil, destURL, 4000*time.Millisecond) + if err != nil { + t.Fatalf("HostClient error: %s", err) + return + } + + if statusCode != 200 { + t.Fatalf("HostClient error code response %d", statusCode) + return + } + +} + +func testHostClientRedirectChangingSchemaServer(t *testing.T, network, addr string, isTLS bool, redirectTo string) *testEchoServer { + var ln net.Listener + var err error + if isTLS { + certFile := "./ssl-cert-snakeoil.pem" + keyFile := "./ssl-cert-snakeoil.key" + cert, err1 := tls.LoadX509KeyPair(certFile, keyFile) + if err1 != nil { + t.Fatalf("Cannot load TLS certificate: %s", err1) + } + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + ln, err = tls.Listen(network, addr, tlsConfig) + } else { + ln, err = net.Listen(network, addr) + } + if err != nil { + t.Fatalf("cannot listen %q: %s", addr, err) + } + + s := &Server{ + Handler: func(ctx *RequestCtx) { + if redirectTo == "" { + ctx.SetStatusCode(200) + } else { + ctx.Redirect("https://"+redirectTo+"/baz", 301) + ctx.SetConnectionClose() + } + }, + } + + ch := make(chan struct{}) + go func() { + err := s.Serve(ln) + if err != nil { + t.Fatalf("unexpected error returned from Serve(): %s", err) + } + close(ch) + }() + return &testEchoServer{ + s: s, + ln: ln, + ch: ch, + t: t, + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/client_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/client_timing_test.go new file mode 100644 index 0000000..f9acb89 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/client_timing_test.go @@ -0,0 +1,628 @@ +package fasthttp + +import ( + "bytes" + "fmt" + "io/ioutil" + "net" + "net/http" + "runtime" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/erikdubbelboer/fasthttp/fasthttputil" +) + +type fakeClientConn struct { + net.Conn + s []byte + n int + ch chan struct{} +} + +func (c *fakeClientConn) Write(b []byte) (int, error) { + c.ch <- struct{}{} + return len(b), nil +} + +func (c *fakeClientConn) Read(b []byte) (int, error) { + if c.n == 0 { + // wait for request :) + <-c.ch + } + n := 0 + for len(b) > 0 { + if c.n == len(c.s) { + c.n = 0 + return n, nil + } + n = copy(b, c.s[c.n:]) + c.n += n + b = b[n:] + } + return n, nil +} + +func (c *fakeClientConn) Close() error { + releaseFakeServerConn(c) + return nil +} + +func releaseFakeServerConn(c *fakeClientConn) { + c.n = 0 + fakeClientConnPool.Put(c) +} + +func acquireFakeServerConn(s []byte) *fakeClientConn { + v := fakeClientConnPool.Get() + if v == nil { + c := &fakeClientConn{ + s: s, + ch: make(chan struct{}, 1), + } + return c + } + return v.(*fakeClientConn) +} + +var fakeClientConnPool sync.Pool + +func BenchmarkClientGetTimeoutFastServer(b *testing.B) { + body := []byte("123456789099") + s := []byte(fmt.Sprintf("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %d\r\n\r\n%s", len(body), body)) + c := &Client{ + Dial: func(addr string) (net.Conn, error) { + return acquireFakeServerConn(s), nil + }, + } + + nn := uint32(0) + b.RunParallel(func(pb *testing.PB) { + url := fmt.Sprintf("http://foobar%d.com/aaa/bbb", atomic.AddUint32(&nn, 1)) + var statusCode int + var bodyBuf []byte + var err error + for pb.Next() { + statusCode, bodyBuf, err = c.GetTimeout(bodyBuf[:0], url, time.Second) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + if statusCode != StatusOK { + b.Fatalf("unexpected status code: %d", statusCode) + } + if !bytes.Equal(bodyBuf, body) { + b.Fatalf("unexpected response body: %q. Expected %q", bodyBuf, body) + } + } + }) +} + +func BenchmarkClientDoFastServer(b *testing.B) { + body := []byte("012345678912") + s := []byte(fmt.Sprintf("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %d\r\n\r\n%s", len(body), body)) + c := &Client{ + Dial: func(addr string) (net.Conn, error) { + return acquireFakeServerConn(s), nil + }, + MaxConnsPerHost: runtime.GOMAXPROCS(-1), + } + + nn := uint32(0) + b.RunParallel(func(pb *testing.PB) { + var req Request + var resp Response + req.Header.SetRequestURI(fmt.Sprintf("http://foobar%d.com/aaa/bbb", atomic.AddUint32(&nn, 1))) + for pb.Next() { + if err := c.Do(&req, &resp); err != nil { + b.Fatalf("unexpected error: %s", err) + } + if resp.Header.StatusCode() != StatusOK { + b.Fatalf("unexpected status code: %d", resp.Header.StatusCode()) + } + if !bytes.Equal(resp.Body(), body) { + b.Fatalf("unexpected response body: %q. Expected %q", resp.Body(), body) + } + } + }) +} + +func BenchmarkNetHTTPClientDoFastServer(b *testing.B) { + body := []byte("012345678912") + s := []byte(fmt.Sprintf("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %d\r\n\r\n%s", len(body), body)) + c := &http.Client{ + Transport: &http.Transport{ + Dial: func(network, addr string) (net.Conn, error) { + return acquireFakeServerConn(s), nil + }, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(-1), + }, + } + + nn := uint32(0) + b.RunParallel(func(pb *testing.PB) { + req, err := http.NewRequest("GET", fmt.Sprintf("http://foobar%d.com/aaa/bbb", atomic.AddUint32(&nn, 1)), nil) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + for pb.Next() { + resp, err := c.Do(req) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode != http.StatusOK { + b.Fatalf("unexpected status code: %d", resp.StatusCode) + } + respBody, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + b.Fatalf("unexpected error when reading response body: %s", err) + } + if !bytes.Equal(respBody, body) { + b.Fatalf("unexpected response body: %q. Expected %q", respBody, body) + } + } + }) +} + +func fasthttpEchoHandler(ctx *RequestCtx) { + ctx.Success("text/plain", ctx.RequestURI()) +} + +func nethttpEchoHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + w.Write([]byte(r.RequestURI)) +} + +func BenchmarkClientGetEndToEnd1TCP(b *testing.B) { + benchmarkClientGetEndToEndTCP(b, 1) +} + +func BenchmarkClientGetEndToEnd10TCP(b *testing.B) { + benchmarkClientGetEndToEndTCP(b, 10) +} + +func BenchmarkClientGetEndToEnd100TCP(b *testing.B) { + benchmarkClientGetEndToEndTCP(b, 100) +} + +func benchmarkClientGetEndToEndTCP(b *testing.B, parallelism int) { + addr := "127.0.0.1:8543" + + ln, err := net.Listen("tcp4", addr) + if err != nil { + b.Fatalf("cannot listen %q: %s", addr, err) + } + + ch := make(chan struct{}) + go func() { + if err := Serve(ln, fasthttpEchoHandler); err != nil { + b.Fatalf("error when serving requests: %s", err) + } + close(ch) + }() + + c := &Client{ + MaxConnsPerHost: runtime.GOMAXPROCS(-1) * parallelism, + } + + requestURI := "/foo/bar?baz=123" + url := "http://" + addr + requestURI + b.SetParallelism(parallelism) + b.RunParallel(func(pb *testing.PB) { + var buf []byte + for pb.Next() { + statusCode, body, err := c.Get(buf, url) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + if statusCode != StatusOK { + b.Fatalf("unexpected status code: %d. Expecting %d", statusCode, StatusOK) + } + if string(body) != requestURI { + b.Fatalf("unexpected response %q. Expecting %q", body, requestURI) + } + buf = body + } + }) + + ln.Close() + select { + case <-ch: + case <-time.After(time.Second): + b.Fatalf("server wasn't stopped") + } +} + +func BenchmarkNetHTTPClientGetEndToEnd1TCP(b *testing.B) { + benchmarkNetHTTPClientGetEndToEndTCP(b, 1) +} + +func BenchmarkNetHTTPClientGetEndToEnd10TCP(b *testing.B) { + benchmarkNetHTTPClientGetEndToEndTCP(b, 10) +} + +func BenchmarkNetHTTPClientGetEndToEnd100TCP(b *testing.B) { + benchmarkNetHTTPClientGetEndToEndTCP(b, 100) +} + +func benchmarkNetHTTPClientGetEndToEndTCP(b *testing.B, parallelism int) { + addr := "127.0.0.1:8542" + + ln, err := net.Listen("tcp4", addr) + if err != nil { + b.Fatalf("cannot listen %q: %s", addr, err) + } + + ch := make(chan struct{}) + go func() { + if err := http.Serve(ln, http.HandlerFunc(nethttpEchoHandler)); err != nil && !strings.Contains( + err.Error(), "use of closed network connection") { + b.Fatalf("error when serving requests: %s", err) + } + close(ch) + }() + + c := &http.Client{ + Transport: &http.Transport{ + MaxIdleConnsPerHost: parallelism * runtime.GOMAXPROCS(-1), + }, + } + + requestURI := "/foo/bar?baz=123" + url := "http://" + addr + requestURI + b.SetParallelism(parallelism) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + resp, err := c.Get(url) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode != http.StatusOK { + b.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode, http.StatusOK) + } + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + b.Fatalf("unexpected error when reading response body: %s", err) + } + if string(body) != requestURI { + b.Fatalf("unexpected response %q. Expecting %q", body, requestURI) + } + } + }) + + ln.Close() + select { + case <-ch: + case <-time.After(time.Second): + b.Fatalf("server wasn't stopped") + } +} + +func BenchmarkClientGetEndToEnd1Inmemory(b *testing.B) { + benchmarkClientGetEndToEndInmemory(b, 1) +} + +func BenchmarkClientGetEndToEnd10Inmemory(b *testing.B) { + benchmarkClientGetEndToEndInmemory(b, 10) +} + +func BenchmarkClientGetEndToEnd100Inmemory(b *testing.B) { + benchmarkClientGetEndToEndInmemory(b, 100) +} + +func BenchmarkClientGetEndToEnd1000Inmemory(b *testing.B) { + benchmarkClientGetEndToEndInmemory(b, 1000) +} + +func BenchmarkClientGetEndToEnd10KInmemory(b *testing.B) { + benchmarkClientGetEndToEndInmemory(b, 10000) +} + +func benchmarkClientGetEndToEndInmemory(b *testing.B, parallelism int) { + ln := fasthttputil.NewInmemoryListener() + + ch := make(chan struct{}) + go func() { + if err := Serve(ln, fasthttpEchoHandler); err != nil { + b.Fatalf("error when serving requests: %s", err) + } + close(ch) + }() + + c := &Client{ + MaxConnsPerHost: runtime.GOMAXPROCS(-1) * parallelism, + Dial: func(addr string) (net.Conn, error) { return ln.Dial() }, + } + + requestURI := "/foo/bar?baz=123" + url := "http://unused.host" + requestURI + b.SetParallelism(parallelism) + b.RunParallel(func(pb *testing.PB) { + var buf []byte + for pb.Next() { + statusCode, body, err := c.Get(buf, url) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + if statusCode != StatusOK { + b.Fatalf("unexpected status code: %d. Expecting %d", statusCode, StatusOK) + } + if string(body) != requestURI { + b.Fatalf("unexpected response %q. Expecting %q", body, requestURI) + } + buf = body + } + }) + + ln.Close() + select { + case <-ch: + case <-time.After(time.Second): + b.Fatalf("server wasn't stopped") + } +} + +func BenchmarkNetHTTPClientGetEndToEnd1Inmemory(b *testing.B) { + benchmarkNetHTTPClientGetEndToEndInmemory(b, 1) +} + +func BenchmarkNetHTTPClientGetEndToEnd10Inmemory(b *testing.B) { + benchmarkNetHTTPClientGetEndToEndInmemory(b, 10) +} + +func BenchmarkNetHTTPClientGetEndToEnd100Inmemory(b *testing.B) { + benchmarkNetHTTPClientGetEndToEndInmemory(b, 100) +} + +func BenchmarkNetHTTPClientGetEndToEnd1000Inmemory(b *testing.B) { + benchmarkNetHTTPClientGetEndToEndInmemory(b, 1000) +} + +func benchmarkNetHTTPClientGetEndToEndInmemory(b *testing.B, parallelism int) { + ln := fasthttputil.NewInmemoryListener() + + ch := make(chan struct{}) + go func() { + if err := http.Serve(ln, http.HandlerFunc(nethttpEchoHandler)); err != nil && !strings.Contains( + err.Error(), "use of closed network connection") { + b.Fatalf("error when serving requests: %s", err) + } + close(ch) + }() + + c := &http.Client{ + Transport: &http.Transport{ + Dial: func(_, _ string) (net.Conn, error) { return ln.Dial() }, + MaxIdleConnsPerHost: parallelism * runtime.GOMAXPROCS(-1), + }, + } + + requestURI := "/foo/bar?baz=123" + url := "http://unused.host" + requestURI + b.SetParallelism(parallelism) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + resp, err := c.Get(url) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode != http.StatusOK { + b.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode, http.StatusOK) + } + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + b.Fatalf("unexpected error when reading response body: %s", err) + } + if string(body) != requestURI { + b.Fatalf("unexpected response %q. Expecting %q", body, requestURI) + } + } + }) + + ln.Close() + select { + case <-ch: + case <-time.After(time.Second): + b.Fatalf("server wasn't stopped") + } +} + +func BenchmarkClientEndToEndBigResponse1Inmemory(b *testing.B) { + benchmarkClientEndToEndBigResponseInmemory(b, 1) +} + +func BenchmarkClientEndToEndBigResponse10Inmemory(b *testing.B) { + benchmarkClientEndToEndBigResponseInmemory(b, 10) +} + +func benchmarkClientEndToEndBigResponseInmemory(b *testing.B, parallelism int) { + bigResponse := createFixedBody(1024 * 1024) + h := func(ctx *RequestCtx) { + ctx.SetContentType("text/plain") + ctx.Write(bigResponse) + } + + ln := fasthttputil.NewInmemoryListener() + + ch := make(chan struct{}) + go func() { + if err := Serve(ln, h); err != nil { + b.Fatalf("error when serving requests: %s", err) + } + close(ch) + }() + + c := &Client{ + MaxConnsPerHost: runtime.GOMAXPROCS(-1) * parallelism, + Dial: func(addr string) (net.Conn, error) { return ln.Dial() }, + } + + requestURI := "/foo/bar?baz=123" + url := "http://unused.host" + requestURI + b.SetParallelism(parallelism) + b.RunParallel(func(pb *testing.PB) { + var req Request + req.SetRequestURI(url) + var resp Response + for pb.Next() { + if err := c.DoTimeout(&req, &resp, 5*time.Second); err != nil { + b.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode() != StatusOK { + b.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusOK) + } + body := resp.Body() + if !bytes.Equal(bigResponse, body) { + b.Fatalf("unexpected response %q. Expecting %q", body, bigResponse) + } + } + }) + + ln.Close() + select { + case <-ch: + case <-time.After(time.Second): + b.Fatalf("server wasn't stopped") + } +} + +func BenchmarkNetHTTPClientEndToEndBigResponse1Inmemory(b *testing.B) { + benchmarkNetHTTPClientEndToEndBigResponseInmemory(b, 1) +} + +func BenchmarkNetHTTPClientEndToEndBigResponse10Inmemory(b *testing.B) { + benchmarkNetHTTPClientEndToEndBigResponseInmemory(b, 10) +} + +func benchmarkNetHTTPClientEndToEndBigResponseInmemory(b *testing.B, parallelism int) { + bigResponse := createFixedBody(1024 * 1024) + h := func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + w.Write(bigResponse) + } + ln := fasthttputil.NewInmemoryListener() + + ch := make(chan struct{}) + go func() { + if err := http.Serve(ln, http.HandlerFunc(h)); err != nil && !strings.Contains( + err.Error(), "use of closed network connection") { + b.Fatalf("error when serving requests: %s", err) + } + close(ch) + }() + + c := &http.Client{ + Transport: &http.Transport{ + Dial: func(_, _ string) (net.Conn, error) { return ln.Dial() }, + MaxIdleConnsPerHost: parallelism * runtime.GOMAXPROCS(-1), + }, + Timeout: 5 * time.Second, + } + + requestURI := "/foo/bar?baz=123" + url := "http://unused.host" + requestURI + b.SetParallelism(parallelism) + b.RunParallel(func(pb *testing.PB) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + for pb.Next() { + resp, err := c.Do(req) + if err != nil { + b.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode != http.StatusOK { + b.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode, http.StatusOK) + } + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + b.Fatalf("unexpected error when reading response body: %s", err) + } + if !bytes.Equal(bigResponse, body) { + b.Fatalf("unexpected response %q. Expecting %q", body, bigResponse) + } + } + }) + + ln.Close() + select { + case <-ch: + case <-time.After(time.Second): + b.Fatalf("server wasn't stopped") + } +} + +func BenchmarkPipelineClient1(b *testing.B) { + benchmarkPipelineClient(b, 1) +} + +func BenchmarkPipelineClient10(b *testing.B) { + benchmarkPipelineClient(b, 10) +} + +func BenchmarkPipelineClient100(b *testing.B) { + benchmarkPipelineClient(b, 100) +} + +func BenchmarkPipelineClient1000(b *testing.B) { + benchmarkPipelineClient(b, 1000) +} + +func benchmarkPipelineClient(b *testing.B, parallelism int) { + h := func(ctx *RequestCtx) { + ctx.WriteString("foobar") + } + ln := fasthttputil.NewInmemoryListener() + + ch := make(chan struct{}) + go func() { + if err := Serve(ln, h); err != nil { + b.Fatalf("error when serving requests: %s", err) + } + close(ch) + }() + + maxConns := runtime.GOMAXPROCS(-1) + c := &PipelineClient{ + Dial: func(addr string) (net.Conn, error) { return ln.Dial() }, + ReadBufferSize: 1024 * 1024, + WriteBufferSize: 1024 * 1024, + MaxConns: maxConns, + MaxPendingRequests: parallelism * maxConns, + } + + requestURI := "/foo/bar?baz=123" + url := "http://unused.host" + requestURI + b.SetParallelism(parallelism) + b.RunParallel(func(pb *testing.PB) { + var req Request + req.SetRequestURI(url) + var resp Response + for pb.Next() { + if err := c.Do(&req, &resp); err != nil { + b.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode() != StatusOK { + b.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusOK) + } + body := resp.Body() + if string(body) != "foobar" { + b.Fatalf("unexpected response %q. Expecting %q", body, "foobar") + } + } + }) + + ln.Close() + select { + case <-ch: + case <-time.After(time.Second): + b.Fatalf("server wasn't stopped") + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/compress.go b/vendor/github.com/erikdubbelboer/fasthttp/compress.go new file mode 100644 index 0000000..6bcad61 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/compress.go @@ -0,0 +1,440 @@ +package fasthttp + +import ( + "bytes" + "fmt" + "io" + "os" + "sync" + + "github.com/erikdubbelboer/fasthttp/stackless" + "github.com/klauspost/compress/flate" + "github.com/klauspost/compress/gzip" + "github.com/klauspost/compress/zlib" + "github.com/valyala/bytebufferpool" +) + +// Supported compression levels. +const ( + CompressNoCompression = flate.NoCompression + CompressBestSpeed = flate.BestSpeed + CompressBestCompression = flate.BestCompression + CompressDefaultCompression = 6 // flate.DefaultCompression + CompressHuffmanOnly = -2 // flate.HuffmanOnly +) + +func acquireGzipReader(r io.Reader) (*gzip.Reader, error) { + v := gzipReaderPool.Get() + if v == nil { + return gzip.NewReader(r) + } + zr := v.(*gzip.Reader) + if err := zr.Reset(r); err != nil { + return nil, err + } + return zr, nil +} + +func releaseGzipReader(zr *gzip.Reader) { + zr.Close() + gzipReaderPool.Put(zr) +} + +var gzipReaderPool sync.Pool + +func acquireFlateReader(r io.Reader) (io.ReadCloser, error) { + v := flateReaderPool.Get() + if v == nil { + zr, err := zlib.NewReader(r) + if err != nil { + return nil, err + } + return zr, nil + } + zr := v.(io.ReadCloser) + if err := resetFlateReader(zr, r); err != nil { + return nil, err + } + return zr, nil +} + +func releaseFlateReader(zr io.ReadCloser) { + zr.Close() + flateReaderPool.Put(zr) +} + +func resetFlateReader(zr io.ReadCloser, r io.Reader) error { + zrr, ok := zr.(zlib.Resetter) + if !ok { + panic("BUG: zlib.Reader doesn't implement zlib.Resetter???") + } + return zrr.Reset(r, nil) +} + +var flateReaderPool sync.Pool + +func acquireStacklessGzipWriter(w io.Writer, level int) stackless.Writer { + nLevel := normalizeCompressLevel(level) + p := stacklessGzipWriterPoolMap[nLevel] + v := p.Get() + if v == nil { + return stackless.NewWriter(w, func(w io.Writer) stackless.Writer { + return acquireRealGzipWriter(w, level) + }) + } + sw := v.(stackless.Writer) + sw.Reset(w) + return sw +} + +func releaseStacklessGzipWriter(sw stackless.Writer, level int) { + sw.Close() + nLevel := normalizeCompressLevel(level) + p := stacklessGzipWriterPoolMap[nLevel] + p.Put(sw) +} + +func acquireRealGzipWriter(w io.Writer, level int) *gzip.Writer { + nLevel := normalizeCompressLevel(level) + p := realGzipWriterPoolMap[nLevel] + v := p.Get() + if v == nil { + zw, err := gzip.NewWriterLevel(w, level) + if err != nil { + panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err)) + } + return zw + } + zw := v.(*gzip.Writer) + zw.Reset(w) + return zw +} + +func releaseRealGzipWriter(zw *gzip.Writer, level int) { + zw.Close() + nLevel := normalizeCompressLevel(level) + p := realGzipWriterPoolMap[nLevel] + p.Put(zw) +} + +var ( + stacklessGzipWriterPoolMap = newCompressWriterPoolMap() + realGzipWriterPoolMap = newCompressWriterPoolMap() +) + +// AppendGzipBytesLevel appends gzipped src to dst using the given +// compression level and returns the resulting dst. +// +// Supported compression levels are: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +func AppendGzipBytesLevel(dst, src []byte, level int) []byte { + w := &byteSliceWriter{dst} + WriteGzipLevel(w, src, level) + return w.b +} + +// WriteGzipLevel writes gzipped p to w using the given compression level +// and returns the number of compressed bytes written to w. +// +// Supported compression levels are: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) { + switch w.(type) { + case *byteSliceWriter, + *bytes.Buffer, + *ByteBuffer, + *bytebufferpool.ByteBuffer: + // These writers don't block, so we can just use stacklessWriteGzip + ctx := &compressCtx{ + w: w, + p: p, + level: level, + } + stacklessWriteGzip(ctx) + return len(p), nil + default: + zw := acquireStacklessGzipWriter(w, level) + n, err := zw.Write(p) + releaseStacklessGzipWriter(zw, level) + return n, err + } +} + +var stacklessWriteGzip = stackless.NewFunc(nonblockingWriteGzip) + +func nonblockingWriteGzip(ctxv interface{}) { + ctx := ctxv.(*compressCtx) + zw := acquireRealGzipWriter(ctx.w, ctx.level) + + _, err := zw.Write(ctx.p) + if err != nil { + panic(fmt.Sprintf("BUG: gzip.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err)) + } + + releaseRealGzipWriter(zw, ctx.level) +} + +// WriteGzip writes gzipped p to w and returns the number of compressed +// bytes written to w. +func WriteGzip(w io.Writer, p []byte) (int, error) { + return WriteGzipLevel(w, p, CompressDefaultCompression) +} + +// AppendGzipBytes appends gzipped src to dst and returns the resulting dst. +func AppendGzipBytes(dst, src []byte) []byte { + return AppendGzipBytesLevel(dst, src, CompressDefaultCompression) +} + +// WriteGunzip writes ungzipped p to w and returns the number of uncompressed +// bytes written to w. +func WriteGunzip(w io.Writer, p []byte) (int, error) { + r := &byteSliceReader{p} + zr, err := acquireGzipReader(r) + if err != nil { + return 0, err + } + n, err := copyZeroAlloc(w, zr) + releaseGzipReader(zr) + nn := int(n) + if int64(nn) != n { + return 0, fmt.Errorf("too much data gunzipped: %d", n) + } + return nn, err +} + +// AppendGunzipBytes appends gunzipped src to dst and returns the resulting dst. +func AppendGunzipBytes(dst, src []byte) ([]byte, error) { + w := &byteSliceWriter{dst} + _, err := WriteGunzip(w, src) + return w.b, err +} + +// AppendDeflateBytesLevel appends deflated src to dst using the given +// compression level and returns the resulting dst. +// +// Supported compression levels are: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +func AppendDeflateBytesLevel(dst, src []byte, level int) []byte { + w := &byteSliceWriter{dst} + WriteDeflateLevel(w, src, level) + return w.b +} + +// WriteDeflateLevel writes deflated p to w using the given compression level +// and returns the number of compressed bytes written to w. +// +// Supported compression levels are: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +func WriteDeflateLevel(w io.Writer, p []byte, level int) (int, error) { + switch w.(type) { + case *byteSliceWriter, + *bytes.Buffer, + *ByteBuffer, + *bytebufferpool.ByteBuffer: + // These writers don't block, so we can just use stacklessWriteDeflate + ctx := &compressCtx{ + w: w, + p: p, + level: level, + } + stacklessWriteDeflate(ctx) + return len(p), nil + default: + zw := acquireStacklessDeflateWriter(w, level) + n, err := zw.Write(p) + releaseStacklessDeflateWriter(zw, level) + return n, err + } +} + +var stacklessWriteDeflate = stackless.NewFunc(nonblockingWriteDeflate) + +func nonblockingWriteDeflate(ctxv interface{}) { + ctx := ctxv.(*compressCtx) + zw := acquireRealDeflateWriter(ctx.w, ctx.level) + + _, err := zw.Write(ctx.p) + if err != nil { + panic(fmt.Sprintf("BUG: zlib.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err)) + } + + releaseRealDeflateWriter(zw, ctx.level) +} + +type compressCtx struct { + w io.Writer + p []byte + level int +} + +// WriteDeflate writes deflated p to w and returns the number of compressed +// bytes written to w. +func WriteDeflate(w io.Writer, p []byte) (int, error) { + return WriteDeflateLevel(w, p, CompressDefaultCompression) +} + +// AppendDeflateBytes appends deflated src to dst and returns the resulting dst. +func AppendDeflateBytes(dst, src []byte) []byte { + return AppendDeflateBytesLevel(dst, src, CompressDefaultCompression) +} + +// WriteInflate writes inflated p to w and returns the number of uncompressed +// bytes written to w. +func WriteInflate(w io.Writer, p []byte) (int, error) { + r := &byteSliceReader{p} + zr, err := acquireFlateReader(r) + if err != nil { + return 0, err + } + n, err := copyZeroAlloc(w, zr) + releaseFlateReader(zr) + nn := int(n) + if int64(nn) != n { + return 0, fmt.Errorf("too much data inflated: %d", n) + } + return nn, err +} + +// AppendInflateBytes appends inflated src to dst and returns the resulting dst. +func AppendInflateBytes(dst, src []byte) ([]byte, error) { + w := &byteSliceWriter{dst} + _, err := WriteInflate(w, src) + return w.b, err +} + +type byteSliceWriter struct { + b []byte +} + +func (w *byteSliceWriter) Write(p []byte) (int, error) { + w.b = append(w.b, p...) + return len(p), nil +} + +type byteSliceReader struct { + b []byte +} + +func (r *byteSliceReader) Read(p []byte) (int, error) { + if len(r.b) == 0 { + return 0, io.EOF + } + n := copy(p, r.b) + r.b = r.b[n:] + return n, nil +} + +func acquireStacklessDeflateWriter(w io.Writer, level int) stackless.Writer { + nLevel := normalizeCompressLevel(level) + p := stacklessDeflateWriterPoolMap[nLevel] + v := p.Get() + if v == nil { + return stackless.NewWriter(w, func(w io.Writer) stackless.Writer { + return acquireRealDeflateWriter(w, level) + }) + } + sw := v.(stackless.Writer) + sw.Reset(w) + return sw +} + +func releaseStacklessDeflateWriter(sw stackless.Writer, level int) { + sw.Close() + nLevel := normalizeCompressLevel(level) + p := stacklessDeflateWriterPoolMap[nLevel] + p.Put(sw) +} + +func acquireRealDeflateWriter(w io.Writer, level int) *zlib.Writer { + nLevel := normalizeCompressLevel(level) + p := realDeflateWriterPoolMap[nLevel] + v := p.Get() + if v == nil { + zw, err := zlib.NewWriterLevel(w, level) + if err != nil { + panic(fmt.Sprintf("BUG: unexpected error from zlib.NewWriterLevel(%d): %s", level, err)) + } + return zw + } + zw := v.(*zlib.Writer) + zw.Reset(w) + return zw +} + +func releaseRealDeflateWriter(zw *zlib.Writer, level int) { + zw.Close() + nLevel := normalizeCompressLevel(level) + p := realDeflateWriterPoolMap[nLevel] + p.Put(zw) +} + +var ( + stacklessDeflateWriterPoolMap = newCompressWriterPoolMap() + realDeflateWriterPoolMap = newCompressWriterPoolMap() +) + +func newCompressWriterPoolMap() []*sync.Pool { + // Initialize pools for all the compression levels defined + // in https://golang.org/pkg/compress/flate/#pkg-constants . + // Compression levels are normalized with normalizeCompressLevel, + // so the fit [0..11]. + var m []*sync.Pool + for i := 0; i < 12; i++ { + m = append(m, &sync.Pool{}) + } + return m +} + +func isFileCompressible(f *os.File, minCompressRatio float64) bool { + // Try compressing the first 4kb of of the file + // and see if it can be compressed by more than + // the given minCompressRatio. + b := AcquireByteBuffer() + zw := acquireStacklessGzipWriter(b, CompressDefaultCompression) + lr := &io.LimitedReader{ + R: f, + N: 4096, + } + _, err := copyZeroAlloc(zw, lr) + releaseStacklessGzipWriter(zw, CompressDefaultCompression) + f.Seek(0, 0) + if err != nil { + return false + } + + n := 4096 - lr.N + zn := len(b.B) + ReleaseByteBuffer(b) + return float64(zn) < float64(n)*minCompressRatio +} + +// normalizes compression level into [0..11], so it could be used as an index +// in *PoolMap. +func normalizeCompressLevel(level int) int { + // -2 is the lowest compression level - CompressHuffmanOnly + // 9 is the highest compression level - CompressBestCompression + if level < -2 || level > 9 { + level = CompressDefaultCompression + } + return level + 2 +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/compress_test.go b/vendor/github.com/erikdubbelboer/fasthttp/compress_test.go new file mode 100644 index 0000000..f0cdaff --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/compress_test.go @@ -0,0 +1,216 @@ +package fasthttp + +import ( + "bytes" + "fmt" + "io/ioutil" + "testing" + "time" +) + +var compressTestcases = func() []string { + a := []string{ + "", + "foobar", + "выфаодлодл одлфываыв sd2 k34", + } + bigS := createFixedBody(1e4) + a = append(a, string(bigS)) + return a +}() + +func TestGzipBytesSerial(t *testing.T) { + if err := testGzipBytes(); err != nil { + t.Fatal(err) + } +} + +func TestGzipBytesConcurrent(t *testing.T) { + if err := testConcurrent(10, testGzipBytes); err != nil { + t.Fatal(err) + } +} + +func TestDeflateBytesSerial(t *testing.T) { + if err := testDeflateBytes(); err != nil { + t.Fatal(err) + } +} + +func TestDeflateBytesConcurrent(t *testing.T) { + if err := testConcurrent(10, testDeflateBytes); err != nil { + t.Fatal(err) + } +} + +func testGzipBytes() error { + for _, s := range compressTestcases { + if err := testGzipBytesSingleCase(s); err != nil { + return err + } + } + return nil +} + +func testDeflateBytes() error { + for _, s := range compressTestcases { + if err := testDeflateBytesSingleCase(s); err != nil { + return err + } + } + return nil +} + +func testGzipBytesSingleCase(s string) error { + prefix := []byte("foobar") + gzippedS := AppendGzipBytes(prefix, []byte(s)) + if !bytes.Equal(gzippedS[:len(prefix)], prefix) { + return fmt.Errorf("unexpected prefix when compressing %q: %q. Expecting %q", s, gzippedS[:len(prefix)], prefix) + } + + gunzippedS, err := AppendGunzipBytes(prefix, gzippedS[len(prefix):]) + if err != nil { + return fmt.Errorf("unexpected error when uncompressing %q: %s", s, err) + } + if !bytes.Equal(gunzippedS[:len(prefix)], prefix) { + return fmt.Errorf("unexpected prefix when uncompressing %q: %q. Expecting %q", s, gunzippedS[:len(prefix)], prefix) + } + gunzippedS = gunzippedS[len(prefix):] + if string(gunzippedS) != s { + return fmt.Errorf("unexpected uncompressed string %q. Expecting %q", gunzippedS, s) + } + return nil +} + +func testDeflateBytesSingleCase(s string) error { + prefix := []byte("foobar") + deflatedS := AppendDeflateBytes(prefix, []byte(s)) + if !bytes.Equal(deflatedS[:len(prefix)], prefix) { + return fmt.Errorf("unexpected prefix when compressing %q: %q. Expecting %q", s, deflatedS[:len(prefix)], prefix) + } + + inflatedS, err := AppendInflateBytes(prefix, deflatedS[len(prefix):]) + if err != nil { + return fmt.Errorf("unexpected error when uncompressing %q: %s", s, err) + } + if !bytes.Equal(inflatedS[:len(prefix)], prefix) { + return fmt.Errorf("unexpected prefix when uncompressing %q: %q. Expecting %q", s, inflatedS[:len(prefix)], prefix) + } + inflatedS = inflatedS[len(prefix):] + if string(inflatedS) != s { + return fmt.Errorf("unexpected uncompressed string %q. Expecting %q", inflatedS, s) + } + return nil +} + +func TestGzipCompressSerial(t *testing.T) { + if err := testGzipCompress(); err != nil { + t.Fatal(err) + } +} + +func TestGzipCompressConcurrent(t *testing.T) { + if err := testConcurrent(10, testGzipCompress); err != nil { + t.Fatal(err) + } +} + +func TestFlateCompressSerial(t *testing.T) { + if err := testFlateCompress(); err != nil { + t.Fatal(err) + } +} + +func TestFlateCompressConcurrent(t *testing.T) { + if err := testConcurrent(10, testFlateCompress); err != nil { + t.Fatal(err) + } +} + +func testGzipCompress() error { + for _, s := range compressTestcases { + if err := testGzipCompressSingleCase(s); err != nil { + return err + } + } + return nil +} + +func testFlateCompress() error { + for _, s := range compressTestcases { + if err := testFlateCompressSingleCase(s); err != nil { + return err + } + } + return nil +} + +func testGzipCompressSingleCase(s string) error { + var buf bytes.Buffer + zw := acquireStacklessGzipWriter(&buf, CompressDefaultCompression) + if _, err := zw.Write([]byte(s)); err != nil { + return fmt.Errorf("unexpected error: %s. s=%q", err, s) + } + releaseStacklessGzipWriter(zw, CompressDefaultCompression) + + zr, err := acquireGzipReader(&buf) + if err != nil { + return fmt.Errorf("unexpected error: %s. s=%q", err, s) + } + body, err := ioutil.ReadAll(zr) + if err != nil { + return fmt.Errorf("unexpected error: %s. s=%q", err, s) + } + if string(body) != s { + return fmt.Errorf("unexpected string after decompression: %q. Expecting %q", body, s) + } + releaseGzipReader(zr) + return nil +} + +func testFlateCompressSingleCase(s string) error { + var buf bytes.Buffer + zw := acquireStacklessDeflateWriter(&buf, CompressDefaultCompression) + if _, err := zw.Write([]byte(s)); err != nil { + return fmt.Errorf("unexpected error: %s. s=%q", err, s) + } + releaseStacklessDeflateWriter(zw, CompressDefaultCompression) + + zr, err := acquireFlateReader(&buf) + if err != nil { + return fmt.Errorf("unexpected error: %s. s=%q", err, s) + } + body, err := ioutil.ReadAll(zr) + if err != nil { + return fmt.Errorf("unexpected error: %s. s=%q", err, s) + } + if string(body) != s { + return fmt.Errorf("unexpected string after decompression: %q. Expecting %q", body, s) + } + releaseFlateReader(zr) + return nil +} + +func testConcurrent(concurrency int, f func() error) error { + ch := make(chan error, concurrency) + for i := 0; i < concurrency; i++ { + go func(idx int) { + err := f() + if err != nil { + ch <- fmt.Errorf("error in goroutine %d: %s", idx, err) + } + ch <- nil + }(i) + } + for i := 0; i < concurrency; i++ { + select { + case err := <-ch: + if err != nil { + return err + } + case <-time.After(time.Second): + return fmt.Errorf("timeout") + } + } + return nil +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/cookie.go b/vendor/github.com/erikdubbelboer/fasthttp/cookie.go new file mode 100644 index 0000000..43e39ad --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/cookie.go @@ -0,0 +1,441 @@ +package fasthttp + +import ( + "bytes" + "errors" + "io" + "sync" + "time" +) + +var zeroTime time.Time + +var ( + // CookieExpireDelete may be set on Cookie.Expire for expiring the given cookie. + CookieExpireDelete = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + + // CookieExpireUnlimited indicates that the cookie doesn't expire. + CookieExpireUnlimited = zeroTime +) + +// AcquireCookie returns an empty Cookie object from the pool. +// +// The returned object may be returned back to the pool with ReleaseCookie. +// This allows reducing GC load. +func AcquireCookie() *Cookie { + return cookiePool.Get().(*Cookie) +} + +// ReleaseCookie returns the Cookie object acquired with AcquireCookie back +// to the pool. +// +// Do not access released Cookie object, otherwise data races may occur. +func ReleaseCookie(c *Cookie) { + c.Reset() + cookiePool.Put(c) +} + +var cookiePool = &sync.Pool{ + New: func() interface{} { + return &Cookie{} + }, +} + +// Cookie represents HTTP response cookie. +// +// Do not copy Cookie objects. Create new object and use CopyTo instead. +// +// Cookie instance MUST NOT be used from concurrently running goroutines. +type Cookie struct { + noCopy noCopy + + key []byte + value []byte + expire time.Time + domain []byte + path []byte + + httpOnly bool + secure bool + + bufKV argsKV + buf []byte +} + +// CopyTo copies src cookie to c. +func (c *Cookie) CopyTo(src *Cookie) { + c.Reset() + c.key = append(c.key[:0], src.key...) + c.value = append(c.value[:0], src.value...) + c.expire = src.expire + c.domain = append(c.domain[:0], src.domain...) + c.path = append(c.path[:0], src.path...) + c.httpOnly = src.httpOnly + c.secure = src.secure +} + +// HTTPOnly returns true if the cookie is http only. +func (c *Cookie) HTTPOnly() bool { + return c.httpOnly +} + +// SetHTTPOnly sets cookie's httpOnly flag to the given value. +func (c *Cookie) SetHTTPOnly(httpOnly bool) { + c.httpOnly = httpOnly +} + +// Secure returns true if the cookie is secure. +func (c *Cookie) Secure() bool { + return c.secure +} + +// SetSecure sets cookie's secure flag to the given value. +func (c *Cookie) SetSecure(secure bool) { + c.secure = secure +} + +// Path returns cookie path. +func (c *Cookie) Path() []byte { + return c.path +} + +// SetPath sets cookie path. +func (c *Cookie) SetPath(path string) { + c.buf = append(c.buf[:0], path...) + c.path = normalizePath(c.path, c.buf) +} + +// SetPathBytes sets cookie path. +func (c *Cookie) SetPathBytes(path []byte) { + c.buf = append(c.buf[:0], path...) + c.path = normalizePath(c.path, c.buf) +} + +// Domain returns cookie domain. +// +// The returned domain is valid until the next Cookie modification method call. +func (c *Cookie) Domain() []byte { + return c.domain +} + +// SetDomain sets cookie domain. +func (c *Cookie) SetDomain(domain string) { + c.domain = append(c.domain[:0], domain...) +} + +// SetDomainBytes sets cookie domain. +func (c *Cookie) SetDomainBytes(domain []byte) { + c.domain = append(c.domain[:0], domain...) +} + +// Expire returns cookie expiration time. +// +// CookieExpireUnlimited is returned if cookie doesn't expire +func (c *Cookie) Expire() time.Time { + expire := c.expire + if expire.IsZero() { + expire = CookieExpireUnlimited + } + return expire +} + +// SetExpire sets cookie expiration time. +// +// Set expiration time to CookieExpireDelete for expiring (deleting) +// the cookie on the client. +// +// By default cookie lifetime is limited by browser session. +func (c *Cookie) SetExpire(expire time.Time) { + c.expire = expire +} + +// Value returns cookie value. +// +// The returned value is valid until the next Cookie modification method call. +func (c *Cookie) Value() []byte { + return c.value +} + +// SetValue sets cookie value. +func (c *Cookie) SetValue(value string) { + c.value = append(c.value[:0], value...) +} + +// SetValueBytes sets cookie value. +func (c *Cookie) SetValueBytes(value []byte) { + c.value = append(c.value[:0], value...) +} + +// Key returns cookie name. +// +// The returned value is valid until the next Cookie modification method call. +func (c *Cookie) Key() []byte { + return c.key +} + +// SetKey sets cookie name. +func (c *Cookie) SetKey(key string) { + c.key = append(c.key[:0], key...) +} + +// SetKeyBytes sets cookie name. +func (c *Cookie) SetKeyBytes(key []byte) { + c.key = append(c.key[:0], key...) +} + +// Reset clears the cookie. +func (c *Cookie) Reset() { + c.key = c.key[:0] + c.value = c.value[:0] + c.expire = zeroTime + c.domain = c.domain[:0] + c.path = c.path[:0] + c.httpOnly = false + c.secure = false +} + +// AppendBytes appends cookie representation to dst and returns +// the extended dst. +func (c *Cookie) AppendBytes(dst []byte) []byte { + if len(c.key) > 0 { + dst = append(dst, c.key...) + dst = append(dst, '=') + } + dst = append(dst, c.value...) + + if !c.expire.IsZero() { + c.bufKV.value = AppendHTTPDate(c.bufKV.value[:0], c.expire) + dst = append(dst, ';', ' ') + dst = append(dst, strCookieExpires...) + dst = append(dst, '=') + dst = append(dst, c.bufKV.value...) + } + if len(c.domain) > 0 { + dst = appendCookiePart(dst, strCookieDomain, c.domain) + } + if len(c.path) > 0 { + dst = appendCookiePart(dst, strCookiePath, c.path) + } + if c.httpOnly { + dst = append(dst, ';', ' ') + dst = append(dst, strCookieHTTPOnly...) + } + if c.secure { + dst = append(dst, ';', ' ') + dst = append(dst, strCookieSecure...) + } + return dst +} + +// Cookie returns cookie representation. +// +// The returned value is valid until the next call to Cookie methods. +func (c *Cookie) Cookie() []byte { + c.buf = c.AppendBytes(c.buf[:0]) + return c.buf +} + +// String returns cookie representation. +func (c *Cookie) String() string { + return string(c.Cookie()) +} + +// WriteTo writes cookie representation to w. +// +// WriteTo implements io.WriterTo interface. +func (c *Cookie) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(c.Cookie()) + return int64(n), err +} + +var errNoCookies = errors.New("no cookies found") + +// Parse parses Set-Cookie header. +func (c *Cookie) Parse(src string) error { + c.buf = append(c.buf[:0], src...) + return c.ParseBytes(c.buf) +} + +// Case insensitive equality comparison of two []byte. Assumes only +// letters need to be matched. This is used to compare cookie key +// value pairs. +func cookieKeyCompare(a, b []byte) bool { + if len(a) != len(b) { + return false + } + + for i := 0; i < len(a); i++ { + if a[i]|0x20 != b[i]|0x20 { + return false + } + } + + return true +} + +// ParseBytes parses Set-Cookie header. +func (c *Cookie) ParseBytes(src []byte) error { + c.Reset() + + var s cookieScanner + s.b = src + + kv := &c.bufKV + if !s.next(kv) { + return errNoCookies + } + + c.key = append(c.key[:0], kv.key...) + c.value = append(c.value[:0], kv.value...) + + for s.next(kv) { + if len(kv.key) != 0 { + // Case insensitive switch on first char + switch kv.key[0] | 0x20 { + case 'e': // "expires" + if cookieKeyCompare(strCookieExpires, kv.key) { + v := b2s(kv.value) + exptime, err := time.ParseInLocation(time.RFC1123, v, time.UTC) + if err != nil { + return err + } + c.expire = exptime + } + + case 'd': // "domain" + if cookieKeyCompare(strCookieDomain, kv.key) { + c.domain = append(c.domain[:0], kv.value...) + } + + case 'p': // "path" + if cookieKeyCompare(strCookiePath, kv.key) { + c.path = append(c.path[:0], kv.value...) + } + } + + } else if len(kv.value) != 0 { + // Case insensitive switch on first char + switch kv.value[0] | 0x20 { + case 'h': // "httponly" + if cookieKeyCompare(strCookieHTTPOnly, kv.value) { + c.httpOnly = true + } + + case 's': // "secure" + if cookieKeyCompare(strCookieSecure, kv.value) { + c.secure = true + } + } + } // else empty or no match + } + return nil +} + +func appendCookiePart(dst, key, value []byte) []byte { + dst = append(dst, ';', ' ') + dst = append(dst, key...) + dst = append(dst, '=') + return append(dst, value...) +} + +func getCookieKey(dst, src []byte) []byte { + n := bytes.IndexByte(src, '=') + if n >= 0 { + src = src[:n] + } + return decodeCookieArg(dst, src, false) +} + +func appendRequestCookieBytes(dst []byte, cookies []argsKV) []byte { + for i, n := 0, len(cookies); i < n; i++ { + kv := &cookies[i] + if len(kv.key) > 0 { + dst = append(dst, kv.key...) + dst = append(dst, '=') + } + dst = append(dst, kv.value...) + if i+1 < n { + dst = append(dst, ';', ' ') + } + } + return dst +} + +// For Response we can not use the above function as response cookies +// already contain the key= in the value. +func appendResponseCookieBytes(dst []byte, cookies []argsKV) []byte { + for i, n := 0, len(cookies); i < n; i++ { + kv := &cookies[i] + dst = append(dst, kv.value...) + if i+1 < n { + dst = append(dst, ';', ' ') + } + } + return dst +} + +func parseRequestCookies(cookies []argsKV, src []byte) []argsKV { + var s cookieScanner + s.b = src + var kv *argsKV + cookies, kv = allocArg(cookies) + for s.next(kv) { + if len(kv.key) > 0 || len(kv.value) > 0 { + cookies, kv = allocArg(cookies) + } + } + return releaseArg(cookies) +} + +type cookieScanner struct { + b []byte +} + +func (s *cookieScanner) next(kv *argsKV) bool { + b := s.b + if len(b) == 0 { + return false + } + + isKey := true + k := 0 + for i, c := range b { + switch c { + case '=': + if isKey { + isKey = false + kv.key = decodeCookieArg(kv.key, b[:i], false) + k = i + 1 + } + case ';': + if isKey { + kv.key = kv.key[:0] + } + kv.value = decodeCookieArg(kv.value, b[k:i], true) + s.b = b[i+1:] + return true + } + } + + if isKey { + kv.key = kv.key[:0] + } + kv.value = decodeCookieArg(kv.value, b[k:], true) + s.b = b[len(b):] + return true +} + +func decodeCookieArg(dst, src []byte, skipQuotes bool) []byte { + for len(src) > 0 && src[0] == ' ' { + src = src[1:] + } + for len(src) > 0 && src[len(src)-1] == ' ' { + src = src[:len(src)-1] + } + if skipQuotes { + if len(src) > 1 && src[0] == '"' && src[len(src)-1] == '"' { + src = src[1 : len(src)-1] + } + } + return append(dst[:0], src...) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/cookie_test.go b/vendor/github.com/erikdubbelboer/fasthttp/cookie_test.go new file mode 100644 index 0000000..bca51a2 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/cookie_test.go @@ -0,0 +1,269 @@ +package fasthttp + +import ( + "strings" + "testing" + "time" +) + +func TestCookieValueWithEqualAndSpaceChars(t *testing.T) { + testCookieValueWithEqualAndSpaceChars(t, "sth1", "/", "MTQ2NjU5NTcwN3xfUVduVXk4aG9jSmZaNzNEb1dGa1VjekY1bG9vMmxSWlJBZUN2Q1ZtZVFNMTk2YU9YaWtCVmY1eDRWZXd3M3Q5RTJRZnZMbk5mWklSSFZJcVlXTDhiSFFHWWdpdFVLd1hwbXR2UUN4QlJ1N3BITFpkS3Y4PXzDvPNn6JVDBFB2wYVYPHdkdlZBm6n1_0QB3_GWwE40Tg ==") + testCookieValueWithEqualAndSpaceChars(t, "sth2", "/", "123") + testCookieValueWithEqualAndSpaceChars(t, "sth3", "/", "123 == 1") +} + +func testCookieValueWithEqualAndSpaceChars(t *testing.T, expectedName, expectedPath, expectedValue string) { + var c Cookie + c.SetKey(expectedName) + c.SetPath(expectedPath) + c.SetValue(expectedValue) + + s := c.String() + + var c1 Cookie + if err := c1.Parse(s); err != nil { + t.Fatalf("unexpected error: %s", err) + } + name := c1.Key() + if string(name) != expectedName { + t.Fatalf("unexpected name %q. Expecting %q", name, expectedName) + } + path := c1.Path() + if string(path) != expectedPath { + t.Fatalf("unexpected path %q. Expecting %q", path, expectedPath) + } + value := c1.Value() + if string(value) != expectedValue { + t.Fatalf("unexpected value %q. Expecting %q", value, expectedValue) + } +} + +func TestCookieSecureHttpOnly(t *testing.T) { + var c Cookie + + if err := c.Parse("foo=bar; HttpOnly; secure"); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !c.Secure() { + t.Fatalf("secure must be set") + } + if !c.HTTPOnly() { + t.Fatalf("HttpOnly must be set") + } + s := c.String() + if !strings.Contains(s, "; secure") { + t.Fatalf("missing secure flag in cookie %q", s) + } + if !strings.Contains(s, "; HttpOnly") { + t.Fatalf("missing HttpOnly flag in cookie %q", s) + } +} + +func TestCookieSecure(t *testing.T) { + var c Cookie + + if err := c.Parse("foo=bar; secure"); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !c.Secure() { + t.Fatalf("secure must be set") + } + s := c.String() + if !strings.Contains(s, "; secure") { + t.Fatalf("missing secure flag in cookie %q", s) + } + + if err := c.Parse("foo=bar"); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if c.HTTPOnly() { + t.Fatalf("Unexpected secure flag set") + } + s = c.String() + if strings.Contains(s, "secure") { + t.Fatalf("unexpected secure flag in cookie %q", s) + } +} + +func TestCookieHttpOnly(t *testing.T) { + var c Cookie + + if err := c.Parse("foo=bar; HttpOnly"); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !c.HTTPOnly() { + t.Fatalf("HTTPOnly must be set") + } + s := c.String() + if !strings.Contains(s, "; HttpOnly") { + t.Fatalf("missing HttpOnly flag in cookie %q", s) + } + + if err := c.Parse("foo=bar"); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if c.HTTPOnly() { + t.Fatalf("Unexpected HTTPOnly flag set") + } + s = c.String() + if strings.Contains(s, "HttpOnly") { + t.Fatalf("unexpected HttpOnly flag in cookie %q", s) + } +} + +func TestCookieAcquireReleaseSequential(t *testing.T) { + testCookieAcquireRelease(t) +} + +func TestCookieAcquireReleaseConcurrent(t *testing.T) { + ch := make(chan struct{}, 10) + for i := 0; i < 10; i++ { + go func() { + testCookieAcquireRelease(t) + ch <- struct{}{} + }() + } + for i := 0; i < 10; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } +} + +func testCookieAcquireRelease(t *testing.T) { + c := AcquireCookie() + + key := "foo" + c.SetKey(key) + + value := "bar" + c.SetValue(value) + + domain := "foo.bar.com" + c.SetDomain(domain) + + path := "/foi/bar/aaa" + c.SetPath(path) + + s := c.String() + c.Reset() + if err := c.Parse(s); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if string(c.Key()) != key { + t.Fatalf("unexpected cookie name %q. Expecting %q", c.Key(), key) + } + if string(c.Value()) != value { + t.Fatalf("unexpected cookie value %q. Expecting %q", c.Value(), value) + } + if string(c.Domain()) != domain { + t.Fatalf("unexpected domain %q. Expecting %q", c.Domain(), domain) + } + if string(c.Path()) != path { + t.Fatalf("unexpected path %q. Expecting %q", c.Path(), path) + } + + ReleaseCookie(c) +} + +func TestCookieParse(t *testing.T) { + testCookieParse(t, "foo", "foo") + testCookieParse(t, "foo=bar", "foo=bar") + testCookieParse(t, "foo=", "foo=") + testCookieParse(t, `foo="bar"`, "foo=bar") + testCookieParse(t, `"foo"=bar`, `"foo"=bar`) + testCookieParse(t, "foo=bar; domain=aaa.com; path=/foo/bar", "foo=bar; domain=aaa.com; path=/foo/bar") + testCookieParse(t, "foo=bar; Domain=aaa.com; PATH=/foo/bar", "foo=bar; domain=aaa.com; path=/foo/bar") + testCookieParse(t, " xxx = yyy ; path=/a/b;;;domain=foobar.com ; expires= Tue, 10 Nov 2009 23:00:00 GMT ; ;;", + "xxx=yyy; expires=Tue, 10 Nov 2009 23:00:00 GMT; domain=foobar.com; path=/a/b") +} + +func testCookieParse(t *testing.T, s, expectedS string) { + var c Cookie + if err := c.Parse(s); err != nil { + t.Fatalf("unexpected error: %s", err) + } + result := string(c.Cookie()) + if result != expectedS { + t.Fatalf("unexpected cookies %q. Expecting %q. Original %q", result, expectedS, s) + } +} + +func TestCookieAppendBytes(t *testing.T) { + c := &Cookie{} + + testCookieAppendBytes(t, c, "", "bar", "bar") + testCookieAppendBytes(t, c, "foo", "", "foo=") + testCookieAppendBytes(t, c, "ффф", "12 лодлы", "ффф=12 лодлы") + + c.SetDomain("foobar.com") + testCookieAppendBytes(t, c, "a", "b", "a=b; domain=foobar.com") + + c.SetPath("/a/b") + testCookieAppendBytes(t, c, "aa", "bb", "aa=bb; domain=foobar.com; path=/a/b") + + c.SetExpire(CookieExpireDelete) + testCookieAppendBytes(t, c, "xxx", "yyy", "xxx=yyy; expires=Tue, 10 Nov 2009 23:00:00 GMT; domain=foobar.com; path=/a/b") +} + +func testCookieAppendBytes(t *testing.T, c *Cookie, key, value, expectedS string) { + c.SetKey(key) + c.SetValue(value) + result := string(c.AppendBytes(nil)) + if result != expectedS { + t.Fatalf("Unexpected cookie %q. Expecting %q", result, expectedS) + } +} + +func TestParseRequestCookies(t *testing.T) { + testParseRequestCookies(t, "", "") + testParseRequestCookies(t, "=", "") + testParseRequestCookies(t, "foo", "foo") + testParseRequestCookies(t, "=foo", "foo") + testParseRequestCookies(t, "bar=", "bar=") + testParseRequestCookies(t, "xxx=aa;bb=c; =d; ;;e=g", "xxx=aa; bb=c; d; e=g") + testParseRequestCookies(t, "a;b;c; d=1;d=2", "a; b; c; d=1; d=2") + testParseRequestCookies(t, " %D0%B8%D0%B2%D0%B5%D1%82=a%20b%3Bc ;s%20s=aaa ", "%D0%B8%D0%B2%D0%B5%D1%82=a%20b%3Bc; s%20s=aaa") +} + +func testParseRequestCookies(t *testing.T, s, expectedS string) { + cookies := parseRequestCookies(nil, []byte(s)) + ss := string(appendRequestCookieBytes(nil, cookies)) + if ss != expectedS { + t.Fatalf("Unexpected cookies after parsing: %q. Expecting %q. String to parse %q", ss, expectedS, s) + } +} + +func TestAppendRequestCookieBytes(t *testing.T) { + testAppendRequestCookieBytes(t, "=", "") + testAppendRequestCookieBytes(t, "foo=", "foo=") + testAppendRequestCookieBytes(t, "=bar", "bar") + testAppendRequestCookieBytes(t, "привет=a bc&s s=aaa", "привет=a bc; s s=aaa") +} + +func testAppendRequestCookieBytes(t *testing.T, s, expectedS string) { + var cookies []argsKV + for _, ss := range strings.Split(s, "&") { + tmp := strings.SplitN(ss, "=", 2) + if len(tmp) != 2 { + t.Fatalf("Cannot find '=' in %q, part of %q", ss, s) + } + cookies = append(cookies, argsKV{ + key: []byte(tmp[0]), + value: []byte(tmp[1]), + }) + } + + prefix := "foobar" + result := string(appendRequestCookieBytes([]byte(prefix), cookies)) + if result[:len(prefix)] != prefix { + t.Fatalf("unexpected prefix %q. Expecting %q for cookie %q", result[:len(prefix)], prefix, s) + } + result = result[len(prefix):] + if result != expectedS { + t.Fatalf("Unexpected result %q. Expecting %q for cookie %q", result, expectedS, s) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/cookie_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/cookie_timing_test.go new file mode 100644 index 0000000..bcf958c --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/cookie_timing_test.go @@ -0,0 +1,35 @@ +package fasthttp + +import ( + "testing" +) + +func BenchmarkCookieParseMin(b *testing.B) { + var c Cookie + s := []byte("xxx=yyy") + for i := 0; i < b.N; i++ { + if err := c.ParseBytes(s); err != nil { + b.Fatalf("unexpected error when parsing cookies: %s", err) + } + } +} + +func BenchmarkCookieParseNoExpires(b *testing.B) { + var c Cookie + s := []byte("xxx=yyy; domain=foobar.com; path=/a/b") + for i := 0; i < b.N; i++ { + if err := c.ParseBytes(s); err != nil { + b.Fatalf("unexpected error when parsing cookies: %s", err) + } + } +} + +func BenchmarkCookieParseFull(b *testing.B) { + var c Cookie + s := []byte("xxx=yyy; expires=Tue, 10 Nov 2009 23:00:00 GMT; domain=foobar.com; path=/a/b") + for i := 0; i < b.N; i++ { + if err := c.ParseBytes(s); err != nil { + b.Fatalf("unexpected error when parsing cookies: %s", err) + } + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/doc.go b/vendor/github.com/erikdubbelboer/fasthttp/doc.go new file mode 100644 index 0000000..501eff6 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/doc.go @@ -0,0 +1,40 @@ +/* +Package fasthttp provides fast HTTP server and client API. + +Fasthttp provides the following features: + + * Optimized for speed. Easily handles more than 100K qps and more than 1M + concurrent keep-alive connections on modern hardware. + * Optimized for low memory usage. + * Easy 'Connection: Upgrade' support via RequestCtx.Hijack. + * Server supports requests' pipelining. Multiple requests may be read from + a single network packet and multiple responses may be sent in a single + network packet. This may be useful for highly loaded REST services. + * Server provides the following anti-DoS limits: + + * The number of concurrent connections. + * The number of concurrent connections per client IP. + * The number of requests per connection. + * Request read timeout. + * Response write timeout. + * Maximum request header size. + * Maximum request body size. + * Maximum request execution time. + * Maximum keep-alive connection lifetime. + * Early filtering out non-GET requests. + + * A lot of additional useful info is exposed to request handler: + + * Server and client address. + * Per-request logger. + * Unique request id. + * Request start time. + * Connection start time. + * Request sequence number for the current connection. + + * Client supports automatic retry on idempotent requests' failure. + * Fasthttp API is designed with the ability to extend existing client + and server implementations or to write custom client and server + implementations from scratch. +*/ +package fasthttp diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/README.md b/vendor/github.com/erikdubbelboer/fasthttp/examples/README.md new file mode 100644 index 0000000..f05e252 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/README.md @@ -0,0 +1,6 @@ +# Code examples + +* [HelloWorld server](helloworldserver) +* [Static file server](fileserver) +* [Gzip client and server](gzipped) +* [Multidomain using SSL certs](multidomain) diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/.gitignore b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/.gitignore new file mode 100644 index 0000000..6ea91ce --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/.gitignore @@ -0,0 +1 @@ +fileserver diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/Makefile b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/Makefile new file mode 100644 index 0000000..d9d99f0 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/Makefile @@ -0,0 +1,7 @@ +fileserver: clean + go get -u github.com/erikdubbelboer/fasthttp + go get -u github.com/erikdubbelboer/fasthttp/expvarhandler + go build + +clean: + rm -f fileserver diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/README.md b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/README.md new file mode 100644 index 0000000..e23f831 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/README.md @@ -0,0 +1,84 @@ +# Static file server example + +* Serves files from the given directory. +* Supports transparent response compression. +* Supports byte range responses. +* Generates directory index pages. +* Supports TLS (aka SSL or HTTPS). +* Supports virtual hosts. +* Exports various stats on /stats path. + +# How to build + +``` +make +``` + +# How to run + +``` +./fileserver -h +./fileserver -addr=tcp.addr.to.listen:to -dir=/path/to/directory/to/serve +``` + +# fileserver vs nginx performance comparison + +Serving default nginx path (`/usr/share/nginx/html` on ubuntu). + +* nginx + +``` +$ ./wrk -t 4 -c 16 -d 10 http://localhost:80 +Running 10s test @ http://localhost:80 + 4 threads and 16 connections + Thread Stats Avg Stdev Max +/- Stdev + Latency 397.76us 1.08ms 20.23ms 95.19% + Req/Sec 21.20k 2.49k 31.34k 79.65% + 850220 requests in 10.10s, 695.65MB read +Requests/sec: 84182.71 +Transfer/sec: 68.88MB +``` + +* fileserver + +``` +$ ./wrk -t 4 -c 16 -d 10 http://localhost:8080 +Running 10s test @ http://localhost:8080 + 4 threads and 16 connections + Thread Stats Avg Stdev Max +/- Stdev + Latency 447.99us 1.59ms 27.20ms 94.79% + Req/Sec 37.13k 3.99k 47.86k 76.00% + 1478457 requests in 10.02s, 1.03GB read +Requests/sec: 147597.06 +Transfer/sec: 105.15MB +``` + +8 pipelined requests + +* nginx + +``` +$ ./wrk -s pipeline.lua -t 4 -c 16 -d 10 http://localhost:80 -- 8 +Running 10s test @ http://localhost:80 + 4 threads and 16 connections + Thread Stats Avg Stdev Max +/- Stdev + Latency 1.34ms 2.15ms 30.91ms 92.16% + Req/Sec 33.54k 7.36k 108.12k 76.81% + 1339908 requests in 10.10s, 1.07GB read +Requests/sec: 132705.81 +Transfer/sec: 108.58MB +``` + +* fileserver + +``` +$ ./wrk -s pipeline.lua -t 4 -c 16 -d 10 http://localhost:8080 -- 8 +Running 10s test @ http://localhost:8080 + 4 threads and 16 connections + Thread Stats Avg Stdev Max +/- Stdev + Latency 2.08ms 6.33ms 88.26ms 92.83% + Req/Sec 116.54k 14.66k 167.98k 69.00% + 4642226 requests in 10.03s, 3.23GB read +Requests/sec: 462769.41 +Transfer/sec: 329.67MB +``` diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/fileserver.go b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/fileserver.go new file mode 100644 index 0000000..10288e0 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/fileserver.go @@ -0,0 +1,120 @@ +// Example static file server. +// +// Serves static files from the given directory. +// Exports various stats at /stats . +package main + +import ( + "expvar" + "flag" + "log" + + "github.com/erikdubbelboer/fasthttp" + "github.com/erikdubbelboer/fasthttp/expvarhandler" +) + +var ( + addr = flag.String("addr", "localhost:8080", "TCP address to listen to") + addrTLS = flag.String("addrTLS", "", "TCP address to listen to TLS (aka SSL or HTTPS) requests. Leave empty for disabling TLS") + byteRange = flag.Bool("byteRange", false, "Enables byte range requests if set to true") + certFile = flag.String("certFile", "./ssl-cert-snakeoil.pem", "Path to TLS certificate file") + compress = flag.Bool("compress", false, "Enables transparent response compression if set to true") + dir = flag.String("dir", "/usr/share/nginx/html", "Directory to serve static files from") + generateIndexPages = flag.Bool("generateIndexPages", true, "Whether to generate directory index pages") + keyFile = flag.String("keyFile", "./ssl-cert-snakeoil.key", "Path to TLS key file") + vhost = flag.Bool("vhost", false, "Enables virtual hosting by prepending the requested path with the requested hostname") +) + +func main() { + // Parse command-line flags. + flag.Parse() + + // Setup FS handler + fs := &fasthttp.FS{ + Root: *dir, + IndexNames: []string{"index.html"}, + GenerateIndexPages: *generateIndexPages, + Compress: *compress, + AcceptByteRange: *byteRange, + } + if *vhost { + fs.PathRewrite = fasthttp.NewVHostPathRewriter(0) + } + fsHandler := fs.NewRequestHandler() + + // Create RequestHandler serving server stats on /stats and files + // on other requested paths. + // /stats output may be filtered using regexps. For example: + // + // * /stats?r=fs will show only stats (expvars) containing 'fs' + // in their names. + requestHandler := func(ctx *fasthttp.RequestCtx) { + switch string(ctx.Path()) { + case "/stats": + expvarhandler.ExpvarHandler(ctx) + default: + fsHandler(ctx) + updateFSCounters(ctx) + } + } + + // Start HTTP server. + if len(*addr) > 0 { + log.Printf("Starting HTTP server on %q", *addr) + go func() { + if err := fasthttp.ListenAndServe(*addr, requestHandler); err != nil { + log.Fatalf("error in ListenAndServe: %s", err) + } + }() + } + + // Start HTTPS server. + if len(*addrTLS) > 0 { + log.Printf("Starting HTTPS server on %q", *addrTLS) + go func() { + if err := fasthttp.ListenAndServeTLS(*addrTLS, *certFile, *keyFile, requestHandler); err != nil { + log.Fatalf("error in ListenAndServeTLS: %s", err) + } + }() + } + + log.Printf("Serving files from directory %q", *dir) + log.Printf("See stats at http://%s/stats", *addr) + + // Wait forever. + select {} +} + +func updateFSCounters(ctx *fasthttp.RequestCtx) { + // Increment the number of fsHandler calls. + fsCalls.Add(1) + + // Update other stats counters + resp := &ctx.Response + switch resp.StatusCode() { + case fasthttp.StatusOK: + fsOKResponses.Add(1) + fsResponseBodyBytes.Add(int64(resp.Header.ContentLength())) + case fasthttp.StatusNotModified: + fsNotModifiedResponses.Add(1) + case fasthttp.StatusNotFound: + fsNotFoundResponses.Add(1) + default: + fsOtherResponses.Add(1) + } +} + +// Various counters - see https://golang.org/pkg/expvar/ for details. +var ( + // Counter for total number of fs calls + fsCalls = expvar.NewInt("fsCalls") + + // Counters for various response status codes + fsOKResponses = expvar.NewInt("fsOKResponses") + fsNotModifiedResponses = expvar.NewInt("fsNotModifiedResponses") + fsNotFoundResponses = expvar.NewInt("fsNotFoundResponses") + fsOtherResponses = expvar.NewInt("fsOtherResponses") + + // Total size in bytes for OK response bodies served. + fsResponseBodyBytes = expvar.NewInt("fsResponseBodyBytes") +) diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/ssl-cert-snakeoil.key b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/ssl-cert-snakeoil.key new file mode 100644 index 0000000..00a79a3 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/ssl-cert-snakeoil.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG +3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U +wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0 +FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf +IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg +GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF +sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2 +sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D +uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb +K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3 +YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+ +DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk +B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV +Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x +IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY +wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj +wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D +FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m +tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX +fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU +ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk +K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT +6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt +9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN +Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV +c257YgaWmjK9uB0Y2r2VxS0G +-----END PRIVATE KEY----- diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/ssl-cert-snakeoil.pem b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/ssl-cert-snakeoil.pem new file mode 100644 index 0000000..93e77cd --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/fileserver/ssl-cert-snakeoil.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV +BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV +MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D +K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te ++z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij +L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1 +xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY +6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG +SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98 +L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2 +45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li +K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6 +X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI +whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd +-----END CERTIFICATE----- diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/gzipped/Makefile b/vendor/github.com/erikdubbelboer/fasthttp/examples/gzipped/Makefile new file mode 100644 index 0000000..5bbae7a --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/gzipped/Makefile @@ -0,0 +1,6 @@ +writer: clean + go get -u github.com/erikdubbelboer/fasthttp + go build main.go + +clean: + rm -f main diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/gzipped/README.md b/vendor/github.com/erikdubbelboer/fasthttp/examples/gzipped/README.md new file mode 100644 index 0000000..5315974 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/gzipped/README.md @@ -0,0 +1,15 @@ +# Gzip client and server example + +* Prints gzipped message from server. + +# How to build + +``` +make +``` + +# How to run + +``` +./main +``` diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/gzipped/main.go b/vendor/github.com/erikdubbelboer/fasthttp/examples/gzipped/main.go new file mode 100644 index 0000000..547cb13 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/gzipped/main.go @@ -0,0 +1,59 @@ +package main + +import ( + "bytes" + "fmt" + "log" + + "github.com/erikdubbelboer/fasthttp" +) + +func server() { + server := fasthttp.Server{ + Name: "Fasthttp server", + Handler: handler, + ReduceMemoryUsage: true, + } + log.Fatal(server.ListenAndServe(":1313")) +} + +func handler(ctx *fasthttp.RequestCtx) { + ctx.SetContentType("text/html") + ctx.Response.Header.Add("Content-Encoding", "gzip") + if ctx.Request.Header.HasAcceptEncoding("gzip") { + log.Println("Sending gzipped content") + ctx.Write( + fasthttp.AppendGzipBytes( + nil, []byte(`CompressedHello`), + ), + ) + } else { + log.Println("Sending plain content") + ctx.Write( + []byte(`Not compressedHello`), + ) + } +} + +func main() { + go server() + req, res := fasthttp.AcquireRequest(), fasthttp.AcquireResponse() + + req.Header.Add("Accept-Encoding", "gzip") + req.SetRequestURI("http://localhost:1313") + + err := fasthttp.Do(req, res) + if err != nil { + panic(err) + } + body := res.Body() + if b := res.Header.Peek("Content-Encoding"); len(b) > 0 { + if bytes.Index(b, []byte("gzip")) >= 0 { + body, err = res.BodyGunzip() + if err != nil { + panic(err) + } + } + } + fmt.Printf("%s\n", body) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/.gitignore b/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/.gitignore new file mode 100644 index 0000000..32aeb84 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/.gitignore @@ -0,0 +1 @@ +helloworldserver diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/Makefile b/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/Makefile new file mode 100644 index 0000000..950c1d3 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/Makefile @@ -0,0 +1,6 @@ +helloworldserver: clean + go get -u github.com/erikdubbelboer/fasthttp + go build + +clean: + rm -f helloworldserver diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/README.md b/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/README.md new file mode 100644 index 0000000..80e801e --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/README.md @@ -0,0 +1,17 @@ +# HelloWorld server example + +* Displays various request info. +* Sets response headers and cookies. +* Supports transparent compression. + +# How to build + +``` +make +``` + +# How to run + +``` +./helloworldserver -addr=tcp.addr.to.listen:to +``` diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/helloworldserver.go b/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/helloworldserver.go new file mode 100644 index 0000000..6f22b7e --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/helloworldserver/helloworldserver.go @@ -0,0 +1,55 @@ +package main + +import ( + "flag" + "fmt" + "log" + + "github.com/erikdubbelboer/fasthttp" +) + +var ( + addr = flag.String("addr", ":8080", "TCP address to listen to") + compress = flag.Bool("compress", false, "Whether to enable transparent response compression") +) + +func main() { + flag.Parse() + + h := requestHandler + if *compress { + h = fasthttp.CompressHandler(h) + } + + if err := fasthttp.ListenAndServe(*addr, h); err != nil { + log.Fatalf("Error in ListenAndServe: %s", err) + } +} + +func requestHandler(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "Hello, world!\n\n") + + fmt.Fprintf(ctx, "Request method is %q\n", ctx.Method()) + fmt.Fprintf(ctx, "RequestURI is %q\n", ctx.RequestURI()) + fmt.Fprintf(ctx, "Requested path is %q\n", ctx.Path()) + fmt.Fprintf(ctx, "Host is %q\n", ctx.Host()) + fmt.Fprintf(ctx, "Query string is %q\n", ctx.QueryArgs()) + fmt.Fprintf(ctx, "User-Agent is %q\n", ctx.UserAgent()) + fmt.Fprintf(ctx, "Connection has been established at %s\n", ctx.ConnTime()) + fmt.Fprintf(ctx, "Request has been started at %s\n", ctx.Time()) + fmt.Fprintf(ctx, "Serial request number for the current connection is %d\n", ctx.ConnRequestNum()) + fmt.Fprintf(ctx, "Your ip is %q\n\n", ctx.RemoteIP()) + + fmt.Fprintf(ctx, "Raw request is:\n---CUT---\n%s\n---CUT---", &ctx.Request) + + ctx.SetContentType("text/plain; charset=utf8") + + // Set arbitrary headers + ctx.Response.Header.Set("X-My-Header", "my-header-value") + + // Set cookies + var c fasthttp.Cookie + c.SetKey("cookie-name") + c.SetValue("cookie-value") + ctx.Response.Header.SetCookie(&c) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/multidomain/Makefile b/vendor/github.com/erikdubbelboer/fasthttp/examples/multidomain/Makefile new file mode 100644 index 0000000..968b019 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/multidomain/Makefile @@ -0,0 +1,6 @@ +writer: clean + go get -u github.com/erikdubbelboer/fasthttp + go build + +clean: + rm -f multidomain diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/multidomain/README.md b/vendor/github.com/erikdubbelboer/fasthttp/examples/multidomain/README.md new file mode 100644 index 0000000..12c09ec --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/multidomain/README.md @@ -0,0 +1,15 @@ +# Multidomain using SSL certs example + +* Prints two messages depending on visited host. + +# How to build + +``` +make +``` + +# How to run + +``` +./multidomain +``` diff --git a/vendor/github.com/erikdubbelboer/fasthttp/examples/multidomain/multidomain.go b/vendor/github.com/erikdubbelboer/fasthttp/examples/multidomain/multidomain.go new file mode 100644 index 0000000..72b806e --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/examples/multidomain/multidomain.go @@ -0,0 +1,118 @@ +package main + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "time" + + "github.com/erikdubbelboer/fasthttp" +) + +var domains = make(map[string]fasthttp.RequestHandler) + +func main() { + server := &fasthttp.Server{ + // You can check the access using openssl command: + // $ openssl s_client -connect localhost:8080 << EOF + // > GET / + // > Host: localhost + // > EOF + // + // $ openssl s_client -connect localhost:8080 << EOF + // > GET / + // > Host: 127.0.0.1:8080 + // > EOF + // + Handler: func(ctx *fasthttp.RequestCtx) { + h, ok := domains[string(ctx.Host())] + if !ok { + ctx.NotFound() + return + } + h(ctx) + }, + } + + // preparing first host + cert, priv, err := GenerateCert("localhost:8080") + if err != nil { + panic(err) + } + domains["localhost:8080"] = func(ctx *fasthttp.RequestCtx) { + ctx.Write([]byte("You are accessing to localhost:8080\n")) + } + + err = server.AppendCertEmbed(cert, priv) + if err != nil { + panic(err) + } + + // preparing second host + cert, priv, err = GenerateCert("127.0.0.1") + if err != nil { + panic(err) + } + domains["127.0.0.1:8080"] = func(ctx *fasthttp.RequestCtx) { + ctx.Write([]byte("You are accessing to 127.0.0.1:8080\n")) + } + + err = server.AppendCertEmbed(cert, priv) + if err != nil { + panic(err) + } + + fmt.Println(server.ListenAndServeTLS(":8080", "", "")) +} + +func GenerateCert(host string) ([]byte, []byte, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + cert := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"I have your data"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * 24 * time.Hour), + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + SignatureAlgorithm: x509.SHA256WithRSA, + DNSNames: []string{host}, + BasicConstraintsValid: true, + IsCA: true, + } + + certBytes, err := x509.CreateCertificate( + rand.Reader, cert, cert, &priv.PublicKey, priv, + ) + + p := pem.EncodeToMemory( + &pem.Block{ + Type: "PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(priv), + }, + ) + + b := pem.EncodeToMemory( + &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }, + ) + + return b, p, err +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/expvarhandler/expvar.go b/vendor/github.com/erikdubbelboer/fasthttp/expvarhandler/expvar.go new file mode 100644 index 0000000..5df6900 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/expvarhandler/expvar.go @@ -0,0 +1,62 @@ +// Package expvarhandler provides fasthttp-compatible request handler +// serving expvars. +package expvarhandler + +import ( + "expvar" + "fmt" + "regexp" + + "github.com/erikdubbelboer/fasthttp" +) + +var ( + expvarHandlerCalls = expvar.NewInt("expvarHandlerCalls") + expvarRegexpErrors = expvar.NewInt("expvarRegexpErrors") +) + +// ExpvarHandler dumps json representation of expvars to http response. +// +// Expvars may be filtered by regexp provided via 'r' query argument. +// +// See https://golang.org/pkg/expvar/ for details. +func ExpvarHandler(ctx *fasthttp.RequestCtx) { + expvarHandlerCalls.Add(1) + + ctx.Response.Reset() + + r, err := getExpvarRegexp(ctx) + if err != nil { + expvarRegexpErrors.Add(1) + fmt.Fprintf(ctx, "Error when obtaining expvar regexp: %s", err) + ctx.SetStatusCode(fasthttp.StatusBadRequest) + return + } + + fmt.Fprintf(ctx, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if r.MatchString(kv.Key) { + if !first { + fmt.Fprintf(ctx, ",\n") + } + first = false + fmt.Fprintf(ctx, "\t%q: %s", kv.Key, kv.Value) + } + }) + fmt.Fprintf(ctx, "\n}\n") + + ctx.SetContentType("application/json; charset=utf-8") +} + +func getExpvarRegexp(ctx *fasthttp.RequestCtx) (*regexp.Regexp, error) { + r := string(ctx.QueryArgs().Peek("r")) + if len(r) == 0 { + r = "." + } + rr, err := regexp.Compile(r) + if err != nil { + return nil, fmt.Errorf("cannot parse r=%q: %s", r, err) + } + return rr, nil +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/expvarhandler/expvar_test.go b/vendor/github.com/erikdubbelboer/fasthttp/expvarhandler/expvar_test.go new file mode 100644 index 0000000..0da3c94 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/expvarhandler/expvar_test.go @@ -0,0 +1,67 @@ +package expvarhandler + +import ( + "encoding/json" + "expvar" + "strings" + "testing" + + "github.com/erikdubbelboer/fasthttp" +) + +func TestExpvarHandlerBasic(t *testing.T) { + expvar.Publish("customVar", expvar.Func(func() interface{} { + return "foobar" + })) + + var ctx fasthttp.RequestCtx + + expvarHandlerCalls.Set(0) + + ExpvarHandler(&ctx) + + body := ctx.Response.Body() + + var m map[string]interface{} + if err := json.Unmarshal(body, &m); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if _, ok := m["cmdline"]; !ok { + t.Fatalf("cannot locate cmdline expvar") + } + if _, ok := m["memstats"]; !ok { + t.Fatalf("cannot locate memstats expvar") + } + + v := m["customVar"] + sv, ok := v.(string) + if !ok { + t.Fatalf("unexpected custom var type %T. Expecting string", v) + } + if sv != "foobar" { + t.Fatalf("unexpected custom var value: %q. Expecting %q", v, "foobar") + } + + v = m["expvarHandlerCalls"] + fv, ok := v.(float64) + if !ok { + t.Fatalf("unexpected expvarHandlerCalls type %T. Expecting float64", v) + } + if int(fv) != 1 { + t.Fatalf("unexpected value for expvarHandlerCalls: %v. Expecting %v", fv, 1) + } +} + +func TestExpvarHandlerRegexp(t *testing.T) { + var ctx fasthttp.RequestCtx + ctx.QueryArgs().Set("r", "cmd") + ExpvarHandler(&ctx) + body := string(ctx.Response.Body()) + if !strings.Contains(body, `"cmdline"`) { + t.Fatalf("missing 'cmdline' expvar") + } + if strings.Contains(body, `"memstats"`) { + t.Fatalf("unexpected memstats expvar found") + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttpadaptor/adaptor.go b/vendor/github.com/erikdubbelboer/fasthttp/fasthttpadaptor/adaptor.go new file mode 100644 index 0000000..ca9f298 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttpadaptor/adaptor.go @@ -0,0 +1,142 @@ +// Package fasthttpadaptor provides helper functions for converting net/http +// request handlers to fasthttp request handlers. +package fasthttpadaptor + +import ( + "io" + "net/http" + "net/url" + + "github.com/erikdubbelboer/fasthttp" +) + +// NewFastHTTPHandlerFunc wraps net/http handler func to fasthttp +// request handler, so it can be passed to fasthttp server. +// +// While this function may be used for easy switching from net/http to fasthttp, +// it has the following drawbacks comparing to using manually written fasthttp +// request handler: +// +// * A lot of useful functionality provided by fasthttp is missing +// from net/http handler. +// * net/http -> fasthttp handler conversion has some overhead, +// so the returned handler will be always slower than manually written +// fasthttp handler. +// +// So it is advisable using this function only for quick net/http -> fasthttp +// switching. Then manually convert net/http handlers to fasthttp handlers +// according to https://github.com/erikdubbelboer/fasthttp#switching-from-nethttp-to-fasthttp . +func NewFastHTTPHandlerFunc(h http.HandlerFunc) fasthttp.RequestHandler { + return NewFastHTTPHandler(h) +} + +// NewFastHTTPHandler wraps net/http handler to fasthttp request handler, +// so it can be passed to fasthttp server. +// +// While this function may be used for easy switching from net/http to fasthttp, +// it has the following drawbacks comparing to using manually written fasthttp +// request handler: +// +// * A lot of useful functionality provided by fasthttp is missing +// from net/http handler. +// * net/http -> fasthttp handler conversion has some overhead, +// so the returned handler will be always slower than manually written +// fasthttp handler. +// +// So it is advisable using this function only for quick net/http -> fasthttp +// switching. Then manually convert net/http handlers to fasthttp handlers +// according to https://github.com/erikdubbelboer/fasthttp#switching-from-nethttp-to-fasthttp . +func NewFastHTTPHandler(h http.Handler) fasthttp.RequestHandler { + return func(ctx *fasthttp.RequestCtx) { + var r http.Request + + body := ctx.PostBody() + r.Method = string(ctx.Method()) + r.Proto = "HTTP/1.1" + r.ProtoMajor = 1 + r.ProtoMinor = 1 + r.RequestURI = string(ctx.RequestURI()) + r.ContentLength = int64(len(body)) + r.Host = string(ctx.Host()) + r.RemoteAddr = ctx.RemoteAddr().String() + + hdr := make(http.Header) + ctx.Request.Header.VisitAll(func(k, v []byte) { + sk := string(k) + sv := string(v) + switch sk { + case "Transfer-Encoding": + r.TransferEncoding = append(r.TransferEncoding, sv) + default: + hdr.Set(sk, sv) + } + }) + r.Header = hdr + r.Body = &netHTTPBody{body} + rURL, err := url.ParseRequestURI(r.RequestURI) + if err != nil { + ctx.Logger().Printf("cannot parse requestURI %q: %s", r.RequestURI, err) + ctx.Error("Internal Server Error", fasthttp.StatusInternalServerError) + return + } + r.URL = rURL + + var w netHTTPResponseWriter + h.ServeHTTP(&w, &r) + + ctx.SetStatusCode(w.StatusCode()) + for k, vv := range w.Header() { + for _, v := range vv { + ctx.Response.Header.Set(k, v) + } + } + ctx.Write(w.body) + } +} + +type netHTTPBody struct { + b []byte +} + +func (r *netHTTPBody) Read(p []byte) (int, error) { + if len(r.b) == 0 { + return 0, io.EOF + } + n := copy(p, r.b) + r.b = r.b[n:] + return n, nil +} + +func (r *netHTTPBody) Close() error { + r.b = r.b[:0] + return nil +} + +type netHTTPResponseWriter struct { + statusCode int + h http.Header + body []byte +} + +func (w *netHTTPResponseWriter) StatusCode() int { + if w.statusCode == 0 { + return http.StatusOK + } + return w.statusCode +} + +func (w *netHTTPResponseWriter) Header() http.Header { + if w.h == nil { + w.h = make(http.Header) + } + return w.h +} + +func (w *netHTTPResponseWriter) WriteHeader(statusCode int) { + w.statusCode = statusCode +} + +func (w *netHTTPResponseWriter) Write(p []byte) (int, error) { + w.body = append(w.body, p...) + return len(p), nil +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttpadaptor/adaptor_test.go b/vendor/github.com/erikdubbelboer/fasthttp/fasthttpadaptor/adaptor_test.go new file mode 100644 index 0000000..d5eb1d6 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttpadaptor/adaptor_test.go @@ -0,0 +1,130 @@ +package fasthttpadaptor + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "reflect" + "testing" + + "github.com/erikdubbelboer/fasthttp" +) + +func TestNewFastHTTPHandler(t *testing.T) { + expectedMethod := "POST" + expectedProto := "HTTP/1.1" + expectedProtoMajor := 1 + expectedProtoMinor := 1 + expectedRequestURI := "/foo/bar?baz=123" + expectedBody := "body 123 foo bar baz" + expectedContentLength := len(expectedBody) + expectedTransferEncoding := "encoding" + expectedHost := "foobar.com" + expectedRemoteAddr := "1.2.3.4:6789" + expectedHeader := map[string]string{ + "Foo-Bar": "baz", + "Abc": "defg", + "XXX-Remote-Addr": "123.43.4543.345", + } + expectedURL, err := url.ParseRequestURI(expectedRequestURI) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + callsCount := 0 + nethttpH := func(w http.ResponseWriter, r *http.Request) { + callsCount++ + if r.Method != expectedMethod { + t.Fatalf("unexpected method %q. Expecting %q", r.Method, expectedMethod) + } + if r.Proto != expectedProto { + t.Fatalf("unexpected proto %q. Expecting %q", r.Proto, expectedProto) + } + if r.ProtoMajor != expectedProtoMajor { + t.Fatalf("unexpected protoMajor %d. Expecting %d", r.ProtoMajor, expectedProtoMajor) + } + if r.ProtoMinor != expectedProtoMinor { + t.Fatalf("unexpected protoMinor %d. Expecting %d", r.ProtoMinor, expectedProtoMinor) + } + if r.RequestURI != expectedRequestURI { + t.Fatalf("unexpected requestURI %q. Expecting %q", r.RequestURI, expectedRequestURI) + } + if r.ContentLength != int64(expectedContentLength) { + t.Fatalf("unexpected contentLength %d. Expecting %d", r.ContentLength, expectedContentLength) + } + if len(r.TransferEncoding) != 1 || r.TransferEncoding[0] != expectedTransferEncoding { + t.Fatalf("unexpected transferEncoding %q. Expecting %q", r.TransferEncoding, expectedTransferEncoding) + } + if r.Host != expectedHost { + t.Fatalf("unexpected host %q. Expecting %q", r.Host, expectedHost) + } + if r.RemoteAddr != expectedRemoteAddr { + t.Fatalf("unexpected remoteAddr %q. Expecting %q", r.RemoteAddr, expectedRemoteAddr) + } + body, err := ioutil.ReadAll(r.Body) + r.Body.Close() + if err != nil { + t.Fatalf("unexpected error when reading request body: %s", err) + } + if string(body) != expectedBody { + t.Fatalf("unexpected body %q. Expecting %q", body, expectedBody) + } + if !reflect.DeepEqual(r.URL, expectedURL) { + t.Fatalf("unexpected URL: %#v. Expecting %#v", r.URL, expectedURL) + } + + for k, expectedV := range expectedHeader { + v := r.Header.Get(k) + if v != expectedV { + t.Fatalf("unexpected header value %q for key %q. Expecting %q", v, k, expectedV) + } + } + + w.Header().Set("Header1", "value1") + w.Header().Set("Header2", "value2") + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request body is %q", body) + } + fasthttpH := NewFastHTTPHandler(http.HandlerFunc(nethttpH)) + + var ctx fasthttp.RequestCtx + var req fasthttp.Request + + req.Header.SetMethod(expectedMethod) + req.SetRequestURI(expectedRequestURI) + req.Header.SetHost(expectedHost) + req.Header.Add("Transfer-Encoding", expectedTransferEncoding) + req.BodyWriter().Write([]byte(expectedBody)) + for k, v := range expectedHeader { + req.Header.Set(k, v) + } + + remoteAddr, err := net.ResolveTCPAddr("tcp", expectedRemoteAddr) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + ctx.Init(&req, remoteAddr, nil) + + fasthttpH(&ctx) + + if callsCount != 1 { + t.Fatalf("unexpected callsCount: %d. Expecting 1", callsCount) + } + + resp := &ctx.Response + if resp.StatusCode() != fasthttp.StatusBadRequest { + t.Fatalf("unexpected statusCode: %d. Expecting %d", resp.StatusCode(), fasthttp.StatusBadRequest) + } + if string(resp.Header.Peek("Header1")) != "value1" { + t.Fatalf("unexpected header value: %q. Expecting %q", resp.Header.Peek("Header1"), "value1") + } + if string(resp.Header.Peek("Header2")) != "value2" { + t.Fatalf("unexpected header value: %q. Expecting %q", resp.Header.Peek("Header2"), "value2") + } + expectedResponseBody := fmt.Sprintf("request body is %q", expectedBody) + if string(resp.Body()) != expectedResponseBody { + t.Fatalf("unexpected response body %q. Expecting %q", resp.Body(), expectedResponseBody) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/doc.go b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/doc.go new file mode 100644 index 0000000..9cf69e7 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/doc.go @@ -0,0 +1,2 @@ +// Package fasthttputil provides utility functions for fasthttp. +package fasthttputil diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/ecdsa.key b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/ecdsa.key new file mode 100644 index 0000000..7e201fc --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/ecdsa.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIBpQbZ6a5jL1Yh4wdP6yZk4MKjYWArD/QOLENFw8vbELoAoGCCqGSM49 +AwEHoUQDQgAEKQCZWgE2IBhb47ot8MIs1D4KSisHYlZ41IWyeutpjb0fjwwIhimh +pl1Qld1/d2j3Z3vVyfa5yD+ncV7qCFZuSg== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/ecdsa.pem b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/ecdsa.pem new file mode 100644 index 0000000..ca1a7f2 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/ecdsa.pem @@ -0,0 +1,10 @@ +-----BEGIN CERTIFICATE----- +MIIBbTCCAROgAwIBAgIQPo718S+K+G7hc1SgTEU4QDAKBggqhkjOPQQDAjASMRAw +DgYDVQQKEwdBY21lIENvMB4XDTE3MDQyMDIxMDExNFoXDTE4MDQyMDIxMDExNFow +EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCkA +mVoBNiAYW+O6LfDCLNQ+CkorB2JWeNSFsnrraY29H48MCIYpoaZdUJXdf3do92d7 +1cn2ucg/p3Fe6ghWbkqjSzBJMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggr +BgEFBQcDATAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDAKBggq +hkjOPQQDAgNIADBFAiEAoLAIQkvSuIcHUqyWroA6yWYw2fznlRH/uO9/hMCxUCEC +IClRYb/5O9eD/Eq/ozPnwNpsQHOeYefEhadJ/P82y0lG +-----END CERTIFICATE----- diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/inmemory_listener.go b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/inmemory_listener.go new file mode 100644 index 0000000..d6bcca4 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/inmemory_listener.go @@ -0,0 +1,84 @@ +package fasthttputil + +import ( + "fmt" + "net" + "sync" +) + +// InmemoryListener provides in-memory dialer<->net.Listener implementation. +// +// It may be used either for fast in-process client<->server communcations +// without network stack overhead or for client<->server tests. +type InmemoryListener struct { + lock sync.Mutex + closed bool + conns chan net.Conn +} + +// NewInmemoryListener returns new in-memory dialer<->net.Listener. +func NewInmemoryListener() *InmemoryListener { + return &InmemoryListener{ + conns: make(chan net.Conn, 1024), + } +} + +// Accept implements net.Listener's Accept. +// +// It is safe calling Accept from concurrently running goroutines. +// +// Accept returns new connection per each Dial call. +func (ln *InmemoryListener) Accept() (net.Conn, error) { + c, ok := <-ln.conns + if !ok { + return nil, fmt.Errorf("InmemoryListener is already closed: use of closed network connection") + } + return c, nil +} + +// Close implements net.Listener's Close. +func (ln *InmemoryListener) Close() error { + var err error + + ln.lock.Lock() + if !ln.closed { + close(ln.conns) + ln.closed = true + } else { + err = fmt.Errorf("InmemoryListener is already closed") + } + ln.lock.Unlock() + return err +} + +// Addr implements net.Listener's Addr. +func (ln *InmemoryListener) Addr() net.Addr { + return &net.UnixAddr{ + Name: "InmemoryListener", + Net: "memory", + } +} + +// Dial creates new client<->server connection, enqueues server side +// of the connection to Accept and returns client side of the connection. +// +// It is safe calling Dial from concurrently running goroutines. +func (ln *InmemoryListener) Dial() (net.Conn, error) { + pc := NewPipeConns() + cConn := pc.Conn1() + sConn := pc.Conn2() + ln.lock.Lock() + if !ln.closed { + ln.conns <- sConn + } else { + sConn.Close() + cConn.Close() + cConn = nil + } + ln.lock.Unlock() + + if cConn == nil { + return nil, fmt.Errorf("InmemoryListener is already closed") + } + return cConn, nil +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/inmemory_listener_test.go b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/inmemory_listener_test.go new file mode 100644 index 0000000..86aab68 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/inmemory_listener_test.go @@ -0,0 +1,92 @@ +package fasthttputil + +import ( + "bytes" + "fmt" + "testing" + "time" +) + +func TestInmemoryListener(t *testing.T) { + ln := NewInmemoryListener() + + ch := make(chan struct{}) + for i := 0; i < 10; i++ { + go func(n int) { + conn, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer conn.Close() + req := fmt.Sprintf("request_%d", n) + nn, err := conn.Write([]byte(req)) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if nn != len(req) { + t.Fatalf("unexpected number of bytes written: %d. Expecting %d", nn, len(req)) + } + buf := make([]byte, 30) + nn, err = conn.Read(buf) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + buf = buf[:nn] + resp := fmt.Sprintf("response_%d", n) + if nn != len(resp) { + t.Fatalf("unexpected number of bytes read: %d. Expecting %d", nn, len(resp)) + } + if string(buf) != resp { + t.Fatalf("unexpected response %q. Expecting %q", buf, resp) + } + ch <- struct{}{} + }(i) + } + + serverCh := make(chan struct{}) + go func() { + for { + conn, err := ln.Accept() + if err != nil { + close(serverCh) + return + } + defer conn.Close() + buf := make([]byte, 30) + n, err := conn.Read(buf) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + buf = buf[:n] + if !bytes.HasPrefix(buf, []byte("request_")) { + t.Fatalf("unexpected request prefix %q. Expecting %q", buf, "request_") + } + resp := fmt.Sprintf("response_%s", buf[len("request_"):]) + n, err = conn.Write([]byte(resp)) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if n != len(resp) { + t.Fatalf("unexpected number of bytes written: %d. Expecting %d", n, len(resp)) + } + } + }() + + for i := 0; i < 10; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case <-serverCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/inmemory_listener_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/inmemory_listener_timing_test.go new file mode 100644 index 0000000..876d9bc --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/inmemory_listener_timing_test.go @@ -0,0 +1,198 @@ +package fasthttputil_test + +import ( + "crypto/tls" + "net" + "testing" + + "github.com/erikdubbelboer/fasthttp" + "github.com/erikdubbelboer/fasthttp/fasthttputil" +) + +// BenchmarkPlainStreaming measures end-to-end plaintext streaming performance +// for fasthttp client and server. +// +// It issues http requests over a small number of keep-alive connections. +func BenchmarkPlainStreaming(b *testing.B) { + benchmark(b, streamingHandler, false) +} + +// BenchmarkPlainHandshake measures end-to-end plaintext handshake performance +// for fasthttp client and server. +// +// It re-establishes new connection per each http request. +func BenchmarkPlainHandshake(b *testing.B) { + benchmark(b, handshakeHandler, false) +} + +// BenchmarkTLSStreaming measures end-to-end TLS streaming performance +// for fasthttp client and server. +// +// It issues http requests over a small number of TLS keep-alive connections. +func BenchmarkTLSStreaming(b *testing.B) { + benchmark(b, streamingHandler, true) +} + +// BenchmarkTLSHandshake measures end-to-end TLS handshake performance +// for fasthttp client and server. +// +// It re-establishes new TLS connection per each http request. +func BenchmarkTLSHandshakeRSAWithClientSessionCache(b *testing.B) { + bc := &benchConfig{ + IsTLS: true, + DisableClientSessionCache: false, + } + benchmarkExt(b, handshakeHandler, bc) +} + +func BenchmarkTLSHandshakeRSAWithoutClientSessionCache(b *testing.B) { + bc := &benchConfig{ + IsTLS: true, + DisableClientSessionCache: true, + } + benchmarkExt(b, handshakeHandler, bc) +} + +func BenchmarkTLSHandshakeECDSAWithClientSessionCache(b *testing.B) { + bc := &benchConfig{ + IsTLS: true, + DisableClientSessionCache: false, + UseECDSA: true, + } + benchmarkExt(b, handshakeHandler, bc) +} + +func BenchmarkTLSHandshakeECDSAWithoutClientSessionCache(b *testing.B) { + bc := &benchConfig{ + IsTLS: true, + DisableClientSessionCache: true, + UseECDSA: true, + } + benchmarkExt(b, handshakeHandler, bc) +} + +func BenchmarkTLSHandshakeECDSAWithCurvesWithClientSessionCache(b *testing.B) { + bc := &benchConfig{ + IsTLS: true, + DisableClientSessionCache: false, + UseCurves: true, + UseECDSA: true, + } + benchmarkExt(b, handshakeHandler, bc) +} + +func BenchmarkTLSHandshakeECDSAWithCurvesWithoutClientSessionCache(b *testing.B) { + bc := &benchConfig{ + IsTLS: true, + DisableClientSessionCache: true, + UseCurves: true, + UseECDSA: true, + } + benchmarkExt(b, handshakeHandler, bc) +} + +func benchmark(b *testing.B, h fasthttp.RequestHandler, isTLS bool) { + bc := &benchConfig{ + IsTLS: isTLS, + } + benchmarkExt(b, h, bc) +} + +type benchConfig struct { + IsTLS bool + DisableClientSessionCache bool + UseCurves bool + UseECDSA bool +} + +func benchmarkExt(b *testing.B, h fasthttp.RequestHandler, bc *benchConfig) { + var serverTLSConfig, clientTLSConfig *tls.Config + if bc.IsTLS { + certFile := "rsa.pem" + keyFile := "rsa.key" + if bc.UseECDSA { + certFile = "ecdsa.pem" + keyFile = "ecdsa.key" + } + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + b.Fatalf("cannot load TLS certificate from certFile=%q, keyFile=%q: %s", certFile, keyFile, err) + } + serverTLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + PreferServerCipherSuites: true, + } + serverTLSConfig.CurvePreferences = []tls.CurveID{} + if bc.UseCurves { + serverTLSConfig.CurvePreferences = []tls.CurveID{ + tls.CurveP256, + } + } + clientTLSConfig = &tls.Config{ + InsecureSkipVerify: true, + } + if bc.DisableClientSessionCache { + clientTLSConfig.ClientSessionCache = fakeSessionCache{} + } + } + ln := fasthttputil.NewInmemoryListener() + serverStopCh := make(chan struct{}) + go func() { + serverLn := net.Listener(ln) + if serverTLSConfig != nil { + serverLn = tls.NewListener(serverLn, serverTLSConfig) + } + if err := fasthttp.Serve(serverLn, h); err != nil { + b.Fatalf("unexpected error in server: %s", err) + } + close(serverStopCh) + }() + c := &fasthttp.HostClient{ + Dial: func(addr string) (net.Conn, error) { + return ln.Dial() + }, + IsTLS: clientTLSConfig != nil, + TLSConfig: clientTLSConfig, + } + + b.RunParallel(func(pb *testing.PB) { + runRequests(b, pb, c) + }) + ln.Close() + <-serverStopCh +} + +func streamingHandler(ctx *fasthttp.RequestCtx) { + ctx.WriteString("foobar") +} + +func handshakeHandler(ctx *fasthttp.RequestCtx) { + streamingHandler(ctx) + + // Explicitly close connection after each response. + ctx.SetConnectionClose() +} + +func runRequests(b *testing.B, pb *testing.PB, c *fasthttp.HostClient) { + var req fasthttp.Request + req.SetRequestURI("http://foo.bar/baz") + var resp fasthttp.Response + for pb.Next() { + if err := c.Do(&req, &resp); err != nil { + b.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode() != fasthttp.StatusOK { + b.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), fasthttp.StatusOK) + } + } +} + +type fakeSessionCache struct{} + +func (fakeSessionCache) Get(sessionKey string) (*tls.ClientSessionState, bool) { + return nil, false +} + +func (fakeSessionCache) Put(sessionKey string, cs *tls.ClientSessionState) { + // no-op +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/pipeconns.go b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/pipeconns.go new file mode 100644 index 0000000..e5a0235 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/pipeconns.go @@ -0,0 +1,283 @@ +package fasthttputil + +import ( + "errors" + "io" + "net" + "sync" + "time" +) + +// NewPipeConns returns new bi-directonal connection pipe. +func NewPipeConns() *PipeConns { + ch1 := make(chan *byteBuffer, 4) + ch2 := make(chan *byteBuffer, 4) + + pc := &PipeConns{ + stopCh: make(chan struct{}), + } + pc.c1.rCh = ch1 + pc.c1.wCh = ch2 + pc.c2.rCh = ch2 + pc.c2.wCh = ch1 + pc.c1.pc = pc + pc.c2.pc = pc + return pc +} + +// PipeConns provides bi-directional connection pipe, +// which use in-process memory as a transport. +// +// PipeConns must be created by calling NewPipeConns. +// +// PipeConns has the following additional features comparing to connections +// returned from net.Pipe(): +// +// * It is faster. +// * It buffers Write calls, so there is no need to have concurrent goroutine +// calling Read in order to unblock each Write call. +// * It supports read and write deadlines. +// +type PipeConns struct { + c1 pipeConn + c2 pipeConn + stopCh chan struct{} + stopChLock sync.Mutex +} + +// Conn1 returns the first end of bi-directional pipe. +// +// Data written to Conn1 may be read from Conn2. +// Data written to Conn2 may be read from Conn1. +func (pc *PipeConns) Conn1() net.Conn { + return &pc.c1 +} + +// Conn2 returns the second end of bi-directional pipe. +// +// Data written to Conn2 may be read from Conn1. +// Data written to Conn1 may be read from Conn2. +func (pc *PipeConns) Conn2() net.Conn { + return &pc.c2 +} + +// Close closes pipe connections. +func (pc *PipeConns) Close() error { + pc.stopChLock.Lock() + select { + case <-pc.stopCh: + default: + close(pc.stopCh) + } + pc.stopChLock.Unlock() + + return nil +} + +type pipeConn struct { + b *byteBuffer + bb []byte + + rCh chan *byteBuffer + wCh chan *byteBuffer + pc *PipeConns + + readDeadlineTimer *time.Timer + writeDeadlineTimer *time.Timer + + readDeadlineCh <-chan time.Time + writeDeadlineCh <-chan time.Time +} + +func (c *pipeConn) Write(p []byte) (int, error) { + b := acquireByteBuffer() + b.b = append(b.b[:0], p...) + + select { + case <-c.pc.stopCh: + releaseByteBuffer(b) + return 0, errConnectionClosed + default: + } + + select { + case c.wCh <- b: + default: + select { + case c.wCh <- b: + case <-c.writeDeadlineCh: + c.writeDeadlineCh = closedDeadlineCh + return 0, ErrTimeout + case <-c.pc.stopCh: + releaseByteBuffer(b) + return 0, errConnectionClosed + } + } + + return len(p), nil +} + +func (c *pipeConn) Read(p []byte) (int, error) { + mayBlock := true + nn := 0 + for len(p) > 0 { + n, err := c.read(p, mayBlock) + nn += n + if err != nil { + if !mayBlock && err == errWouldBlock { + err = nil + } + return nn, err + } + p = p[n:] + mayBlock = false + } + + return nn, nil +} + +func (c *pipeConn) read(p []byte, mayBlock bool) (int, error) { + if len(c.bb) == 0 { + if err := c.readNextByteBuffer(mayBlock); err != nil { + return 0, err + } + } + n := copy(p, c.bb) + c.bb = c.bb[n:] + + return n, nil +} + +func (c *pipeConn) readNextByteBuffer(mayBlock bool) error { + releaseByteBuffer(c.b) + c.b = nil + + select { + case c.b = <-c.rCh: + default: + if !mayBlock { + return errWouldBlock + } + select { + case c.b = <-c.rCh: + case <-c.readDeadlineCh: + c.readDeadlineCh = closedDeadlineCh + // rCh may contain data when deadline is reached. + // Read the data before returning ErrTimeout. + select { + case c.b = <-c.rCh: + default: + return ErrTimeout + } + case <-c.pc.stopCh: + // rCh may contain data when stopCh is closed. + // Read the data before returning EOF. + select { + case c.b = <-c.rCh: + default: + return io.EOF + } + } + } + + c.bb = c.b.b + return nil +} + +var ( + errWouldBlock = errors.New("would block") + errConnectionClosed = errors.New("connection closed") + + // ErrTimeout is returned from Read() or Write() on timeout. + ErrTimeout = errors.New("timeout") +) + +func (c *pipeConn) Close() error { + return c.pc.Close() +} + +func (c *pipeConn) LocalAddr() net.Addr { + return pipeAddr(0) +} + +func (c *pipeConn) RemoteAddr() net.Addr { + return pipeAddr(0) +} + +func (c *pipeConn) SetDeadline(deadline time.Time) error { + c.SetReadDeadline(deadline) + c.SetWriteDeadline(deadline) + return nil +} + +func (c *pipeConn) SetReadDeadline(deadline time.Time) error { + if c.readDeadlineTimer == nil { + c.readDeadlineTimer = time.NewTimer(time.Hour) + } + c.readDeadlineCh = updateTimer(c.readDeadlineTimer, deadline) + return nil +} + +func (c *pipeConn) SetWriteDeadline(deadline time.Time) error { + if c.writeDeadlineTimer == nil { + c.writeDeadlineTimer = time.NewTimer(time.Hour) + } + c.writeDeadlineCh = updateTimer(c.writeDeadlineTimer, deadline) + return nil +} + +func updateTimer(t *time.Timer, deadline time.Time) <-chan time.Time { + if !t.Stop() { + select { + case <-t.C: + default: + } + } + if deadline.IsZero() { + return nil + } + d := -time.Since(deadline) + if d <= 0 { + return closedDeadlineCh + } + t.Reset(d) + return t.C +} + +var closedDeadlineCh = func() <-chan time.Time { + ch := make(chan time.Time) + close(ch) + return ch +}() + +type pipeAddr int + +func (pipeAddr) Network() string { + return "pipe" +} + +func (pipeAddr) String() string { + return "pipe" +} + +type byteBuffer struct { + b []byte +} + +func acquireByteBuffer() *byteBuffer { + return byteBufferPool.Get().(*byteBuffer) +} + +func releaseByteBuffer(b *byteBuffer) { + if b != nil { + byteBufferPool.Put(b) + } +} + +var byteBufferPool = &sync.Pool{ + New: func() interface{} { + return &byteBuffer{ + b: make([]byte, 1024), + } + }, +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/pipeconns_test.go b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/pipeconns_test.go new file mode 100644 index 0000000..017dd57 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/pipeconns_test.go @@ -0,0 +1,342 @@ +package fasthttputil + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "testing" + "time" +) + +func TestPipeConnsWriteTimeout(t *testing.T) { + pc := NewPipeConns() + c1 := pc.Conn1() + + deadline := time.Now().Add(time.Millisecond) + if err := c1.SetWriteDeadline(deadline); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + data := []byte("foobar") + for { + _, err := c1.Write(data) + if err != nil { + if err == ErrTimeout { + break + } + t.Fatalf("unexpected error: %s", err) + } + } + + for i := 0; i < 10; i++ { + _, err := c1.Write(data) + if err == nil { + t.Fatalf("expecting error") + } + if err != ErrTimeout { + t.Fatalf("unexpected error: %s. Expecting %s", err, ErrTimeout) + } + } + + // read the written data + c2 := pc.Conn2() + if err := c2.SetReadDeadline(time.Now().Add(10 * time.Millisecond)); err != nil { + t.Fatalf("unexpected error: %s", err) + } + for { + _, err := c2.Read(data) + if err != nil { + if err == ErrTimeout { + break + } + t.Fatalf("unexpected error: %s", err) + } + } + + for i := 0; i < 10; i++ { + _, err := c2.Read(data) + if err == nil { + t.Fatalf("expecting error") + } + if err != ErrTimeout { + t.Fatalf("unexpected error: %s. Expecting %s", err, ErrTimeout) + } + } +} + +func TestPipeConnsPositiveReadTimeout(t *testing.T) { + testPipeConnsReadTimeout(t, time.Millisecond) +} + +func TestPipeConnsNegativeReadTimeout(t *testing.T) { + testPipeConnsReadTimeout(t, -time.Second) +} + +var zeroTime time.Time + +func testPipeConnsReadTimeout(t *testing.T, timeout time.Duration) { + pc := NewPipeConns() + c1 := pc.Conn1() + + deadline := time.Now().Add(timeout) + if err := c1.SetReadDeadline(deadline); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + var buf [1]byte + for i := 0; i < 10; i++ { + _, err := c1.Read(buf[:]) + if err == nil { + t.Fatalf("expecting error on iteration %d", i) + } + if err != ErrTimeout { + t.Fatalf("unexpected error on iteration %d: %s. Expecting %s", i, err, ErrTimeout) + } + } + + // disable deadline and send data from c2 to c1 + if err := c1.SetReadDeadline(zeroTime); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + data := []byte("foobar") + c2 := pc.Conn2() + if _, err := c2.Write(data); err != nil { + t.Fatalf("unexpected error: %s", err) + } + dataBuf := make([]byte, len(data)) + if _, err := io.ReadFull(c1, dataBuf); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !bytes.Equal(data, dataBuf) { + t.Fatalf("unexpected data received: %q. Expecting %q", dataBuf, data) + } +} + +func TestPipeConnsCloseWhileReadWriteConcurrent(t *testing.T) { + concurrency := 4 + ch := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + testPipeConnsCloseWhileReadWriteSerial(t) + ch <- struct{}{} + }() + } + + for i := 0; i < concurrency; i++ { + select { + case <-ch: + case <-time.After(3 * time.Second): + t.Fatalf("timeout") + } + } +} + +func TestPipeConnsCloseWhileReadWriteSerial(t *testing.T) { + testPipeConnsCloseWhileReadWriteSerial(t) +} + +func testPipeConnsCloseWhileReadWriteSerial(t *testing.T) { + for i := 0; i < 10; i++ { + testPipeConnsCloseWhileReadWrite(t) + } +} + +func testPipeConnsCloseWhileReadWrite(t *testing.T) { + pc := NewPipeConns() + c1 := pc.Conn1() + c2 := pc.Conn2() + + readCh := make(chan error) + go func() { + var err error + if _, err = io.Copy(ioutil.Discard, c1); err != nil { + if err != errConnectionClosed { + err = fmt.Errorf("unexpected error: %s", err) + } else { + err = nil + } + } + readCh <- err + }() + + writeCh := make(chan error) + go func() { + var err error + for { + if _, err = c2.Write([]byte("foobar")); err != nil { + if err != errConnectionClosed { + err = fmt.Errorf("unexpected error: %s", err) + } else { + err = nil + } + break + } + } + writeCh <- err + }() + + time.Sleep(10 * time.Millisecond) + if err := c1.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := c2.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case err := <-readCh: + if err != nil { + t.Fatalf("unexpected error in reader: %s", err) + } + case <-time.After(time.Second): + t.Fatalf("timeout") + } + select { + case err := <-writeCh: + if err != nil { + t.Fatalf("unexpected error in writer: %s", err) + } + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestPipeConnsReadWriteSerial(t *testing.T) { + testPipeConnsReadWriteSerial(t) +} + +func TestPipeConnsReadWriteConcurrent(t *testing.T) { + testConcurrency(t, 10, testPipeConnsReadWriteSerial) +} + +func testPipeConnsReadWriteSerial(t *testing.T) { + pc := NewPipeConns() + testPipeConnsReadWrite(t, pc.Conn1(), pc.Conn2()) + + pc = NewPipeConns() + testPipeConnsReadWrite(t, pc.Conn2(), pc.Conn1()) +} + +func testPipeConnsReadWrite(t *testing.T, c1, c2 net.Conn) { + defer c1.Close() + defer c2.Close() + + var buf [32]byte + for i := 0; i < 10; i++ { + // The first write + s1 := fmt.Sprintf("foo_%d", i) + n, err := c1.Write([]byte(s1)) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if n != len(s1) { + t.Fatalf("unexpected number of bytes written: %d. Expecting %d", n, len(s1)) + } + + // The second write + s2 := fmt.Sprintf("bar_%d", i) + n, err = c1.Write([]byte(s2)) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if n != len(s2) { + t.Fatalf("unexpected number of bytes written: %d. Expecting %d", n, len(s2)) + } + + // Read data written above in two writes + s := s1 + s2 + n, err = c2.Read(buf[:]) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if n != len(s) { + t.Fatalf("unexpected number of bytes read: %d. Expecting %d", n, len(s)) + } + if string(buf[:n]) != s { + t.Fatalf("unexpected string read: %q. Expecting %q", buf[:n], s) + } + } +} + +func TestPipeConnsCloseSerial(t *testing.T) { + testPipeConnsCloseSerial(t) +} + +func TestPipeConnsCloseConcurrent(t *testing.T) { + testConcurrency(t, 10, testPipeConnsCloseSerial) +} + +func testPipeConnsCloseSerial(t *testing.T) { + pc := NewPipeConns() + testPipeConnsClose(t, pc.Conn1(), pc.Conn2()) + + pc = NewPipeConns() + testPipeConnsClose(t, pc.Conn2(), pc.Conn1()) +} + +func testPipeConnsClose(t *testing.T, c1, c2 net.Conn) { + if err := c1.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + var buf [10]byte + + // attempt writing to closed conn + for i := 0; i < 10; i++ { + n, err := c1.Write(buf[:]) + if err == nil { + t.Fatalf("expecting error") + } + if n != 0 { + t.Fatalf("unexpected number of bytes written: %d. Expecting 0", n) + } + } + + // attempt reading from closed conn + for i := 0; i < 10; i++ { + n, err := c2.Read(buf[:]) + if err == nil { + t.Fatalf("expecting error") + } + if err != io.EOF { + t.Fatalf("unexpected error: %s. Expecting %s", err, io.EOF) + } + if n != 0 { + t.Fatalf("unexpected number of bytes read: %d. Expecting 0", n) + } + } + + if err := c2.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // attempt closing already closed conns + for i := 0; i < 10; i++ { + if err := c1.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := c2.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + } +} + +func testConcurrency(t *testing.T, concurrency int, f func(*testing.T)) { + ch := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + f(t) + ch <- struct{}{} + }() + } + + for i := 0; i < concurrency; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/rsa.key b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/rsa.key new file mode 100644 index 0000000..00a79a3 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/rsa.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG +3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U +wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0 +FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf +IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg +GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF +sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2 +sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D +uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb +K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3 +YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+ +DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk +B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV +Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x +IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY +wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj +wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D +FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m +tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX +fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU +ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk +K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT +6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt +9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN +Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV +c257YgaWmjK9uB0Y2r2VxS0G +-----END PRIVATE KEY----- diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/rsa.pem b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/rsa.pem new file mode 100644 index 0000000..93e77cd --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fasthttputil/rsa.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV +BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV +MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D +K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te ++z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij +L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1 +xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY +6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG +SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98 +L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2 +45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li +K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6 +X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI +whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd +-----END CERTIFICATE----- diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fs.go b/vendor/github.com/erikdubbelboer/fasthttp/fs.go new file mode 100644 index 0000000..2c75251 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fs.go @@ -0,0 +1,1267 @@ +package fasthttp + +import ( + "bytes" + "errors" + "fmt" + "html" + "io" + "io/ioutil" + "mime" + "net/http" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/klauspost/compress/gzip" +) + +// ServeFileBytesUncompressed returns HTTP response containing file contents +// from the given path. +// +// Directory contents is returned if path points to directory. +// +// ServeFileBytes may be used for saving network traffic when serving files +// with good compression ratio. +// +// See also RequestCtx.SendFileBytes. +func ServeFileBytesUncompressed(ctx *RequestCtx, path []byte) { + ServeFileUncompressed(ctx, b2s(path)) +} + +// ServeFileUncompressed returns HTTP response containing file contents +// from the given path. +// +// Directory contents is returned if path points to directory. +// +// ServeFile may be used for saving network traffic when serving files +// with good compression ratio. +// +// See also RequestCtx.SendFile. +func ServeFileUncompressed(ctx *RequestCtx, path string) { + ctx.Request.Header.DelBytes(strAcceptEncoding) + ServeFile(ctx, path) +} + +// ServeFileBytes returns HTTP response containing compressed file contents +// from the given path. +// +// HTTP response may contain uncompressed file contents in the following cases: +// +// * Missing 'Accept-Encoding: gzip' request header. +// * No write access to directory containing the file. +// +// Directory contents is returned if path points to directory. +// +// Use ServeFileBytesUncompressed is you don't need serving compressed +// file contents. +// +// See also RequestCtx.SendFileBytes. +func ServeFileBytes(ctx *RequestCtx, path []byte) { + ServeFile(ctx, b2s(path)) +} + +// ServeFile returns HTTP response containing compressed file contents +// from the given path. +// +// HTTP response may contain uncompressed file contents in the following cases: +// +// * Missing 'Accept-Encoding: gzip' request header. +// * No write access to directory containing the file. +// +// Directory contents is returned if path points to directory. +// +// Use ServeFileUncompressed is you don't need serving compressed file contents. +// +// See also RequestCtx.SendFile. +func ServeFile(ctx *RequestCtx, path string) { + rootFSOnce.Do(func() { + rootFSHandler = rootFS.NewRequestHandler() + }) + if len(path) == 0 || path[0] != '/' { + // extend relative path to absolute path + var err error + if path, err = filepath.Abs(path); err != nil { + ctx.Logger().Printf("cannot resolve path %q to absolute file path: %s", path, err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + } + ctx.Request.SetRequestURI(path) + rootFSHandler(ctx) +} + +var ( + rootFSOnce sync.Once + rootFS = &FS{ + Root: "/", + GenerateIndexPages: true, + Compress: true, + AcceptByteRange: true, + } + rootFSHandler RequestHandler +) + +// PathRewriteFunc must return new request path based on arbitrary ctx +// info such as ctx.Path(). +// +// Path rewriter is used in FS for translating the current request +// to the local filesystem path relative to FS.Root. +// +// The returned path must not contain '/../' substrings due to security reasons, +// since such paths may refer files outside FS.Root. +// +// The returned path may refer to ctx members. For example, ctx.Path(). +type PathRewriteFunc func(ctx *RequestCtx) []byte + +// NewVHostPathRewriter returns path rewriter, which strips slashesCount +// leading slashes from the path and prepends the path with request's host, +// thus simplifying virtual hosting for static files. +// +// Examples: +// +// * host=foobar.com, slashesCount=0, original path="/foo/bar". +// Resulting path: "/foobar.com/foo/bar" +// +// * host=img.aaa.com, slashesCount=1, original path="/images/123/456.jpg" +// Resulting path: "/img.aaa.com/123/456.jpg" +// +func NewVHostPathRewriter(slashesCount int) PathRewriteFunc { + return func(ctx *RequestCtx) []byte { + path := stripLeadingSlashes(ctx.Path(), slashesCount) + host := ctx.Host() + if n := bytes.IndexByte(host, '/'); n >= 0 { + host = nil + } + if len(host) == 0 { + host = strInvalidHost + } + b := AcquireByteBuffer() + b.B = append(b.B, '/') + b.B = append(b.B, host...) + b.B = append(b.B, path...) + ctx.URI().SetPathBytes(b.B) + ReleaseByteBuffer(b) + + return ctx.Path() + } +} + +var strInvalidHost = []byte("invalid-host") + +// NewPathSlashesStripper returns path rewriter, which strips slashesCount +// leading slashes from the path. +// +// Examples: +// +// * slashesCount = 0, original path: "/foo/bar", result: "/foo/bar" +// * slashesCount = 1, original path: "/foo/bar", result: "/bar" +// * slashesCount = 2, original path: "/foo/bar", result: "" +// +// The returned path rewriter may be used as FS.PathRewrite . +func NewPathSlashesStripper(slashesCount int) PathRewriteFunc { + return func(ctx *RequestCtx) []byte { + return stripLeadingSlashes(ctx.Path(), slashesCount) + } +} + +// NewPathPrefixStripper returns path rewriter, which removes prefixSize bytes +// from the path prefix. +// +// Examples: +// +// * prefixSize = 0, original path: "/foo/bar", result: "/foo/bar" +// * prefixSize = 3, original path: "/foo/bar", result: "o/bar" +// * prefixSize = 7, original path: "/foo/bar", result: "r" +// +// The returned path rewriter may be used as FS.PathRewrite . +func NewPathPrefixStripper(prefixSize int) PathRewriteFunc { + return func(ctx *RequestCtx) []byte { + path := ctx.Path() + if len(path) >= prefixSize { + path = path[prefixSize:] + } + return path + } +} + +// FS represents settings for request handler serving static files +// from the local filesystem. +// +// It is prohibited copying FS values. Create new values instead. +type FS struct { + noCopy noCopy + + // Path to the root directory to serve files from. + Root string + + // List of index file names to try opening during directory access. + // + // For example: + // + // * index.html + // * index.htm + // * my-super-index.xml + // + // By default the list is empty. + IndexNames []string + + // Index pages for directories without files matching IndexNames + // are automatically generated if set. + // + // Directory index generation may be quite slow for directories + // with many files (more than 1K), so it is discouraged enabling + // index pages' generation for such directories. + // + // By default index pages aren't generated. + GenerateIndexPages bool + + // Transparently compresses responses if set to true. + // + // The server tries minimizing CPU usage by caching compressed files. + // It adds CompressedFileSuffix suffix to the original file name and + // tries saving the resulting compressed file under the new file name. + // So it is advisable to give the server write access to Root + // and to all inner folders in order to minimze CPU usage when serving + // compressed responses. + // + // Transparent compression is disabled by default. + Compress bool + + // Enables byte range requests if set to true. + // + // Byte range requests are disabled by default. + AcceptByteRange bool + + // Path rewriting function. + // + // By default request path is not modified. + PathRewrite PathRewriteFunc + + // PathNotFound fires when file is not found in filesystem + // this functions tries to replace "Cannot open requested path" + // server response giving to the programmer the control of server flow. + // + // By default PathNotFound returns + // "Cannot open requested path" + PathNotFound RequestHandler + + // Expiration duration for inactive file handlers. + // + // FSHandlerCacheDuration is used by default. + CacheDuration time.Duration + + // Suffix to add to the name of cached compressed file. + // + // This value has sense only if Compress is set. + // + // FSCompressedFileSuffix is used by default. + CompressedFileSuffix string + + once sync.Once + h RequestHandler +} + +// FSCompressedFileSuffix is the suffix FS adds to the original file names +// when trying to store compressed file under the new file name. +// See FS.Compress for details. +const FSCompressedFileSuffix = ".fasthttp.gz" + +// FSHandlerCacheDuration is the default expiration duration for inactive +// file handlers opened by FS. +const FSHandlerCacheDuration = 10 * time.Second + +// FSHandler returns request handler serving static files from +// the given root folder. +// +// stripSlashes indicates how many leading slashes must be stripped +// from requested path before searching requested file in the root folder. +// Examples: +// +// * stripSlashes = 0, original path: "/foo/bar", result: "/foo/bar" +// * stripSlashes = 1, original path: "/foo/bar", result: "/bar" +// * stripSlashes = 2, original path: "/foo/bar", result: "" +// +// The returned request handler automatically generates index pages +// for directories without index.html. +// +// The returned handler caches requested file handles +// for FSHandlerCacheDuration. +// Make sure your program has enough 'max open files' limit aka +// 'ulimit -n' if root folder contains many files. +// +// Do not create multiple request handler instances for the same +// (root, stripSlashes) arguments - just reuse a single instance. +// Otherwise goroutine leak will occur. +func FSHandler(root string, stripSlashes int) RequestHandler { + fs := &FS{ + Root: root, + IndexNames: []string{"index.html"}, + GenerateIndexPages: true, + AcceptByteRange: true, + } + if stripSlashes > 0 { + fs.PathRewrite = NewPathSlashesStripper(stripSlashes) + } + return fs.NewRequestHandler() +} + +// NewRequestHandler returns new request handler with the given FS settings. +// +// The returned handler caches requested file handles +// for FS.CacheDuration. +// Make sure your program has enough 'max open files' limit aka +// 'ulimit -n' if FS.Root folder contains many files. +// +// Do not create multiple request handlers from a single FS instance - +// just reuse a single request handler. +func (fs *FS) NewRequestHandler() RequestHandler { + fs.once.Do(fs.initRequestHandler) + return fs.h +} + +func (fs *FS) initRequestHandler() { + root := fs.Root + + // serve files from the current working directory if root is empty + if len(root) == 0 { + root = "." + } + + // strip trailing slashes from the root path + for len(root) > 0 && root[len(root)-1] == '/' { + root = root[:len(root)-1] + } + + cacheDuration := fs.CacheDuration + if cacheDuration <= 0 { + cacheDuration = FSHandlerCacheDuration + } + compressedFileSuffix := fs.CompressedFileSuffix + if len(compressedFileSuffix) == 0 { + compressedFileSuffix = FSCompressedFileSuffix + } + + h := &fsHandler{ + root: root, + indexNames: fs.IndexNames, + pathRewrite: fs.PathRewrite, + generateIndexPages: fs.GenerateIndexPages, + compress: fs.Compress, + pathNotFound: fs.PathNotFound, + acceptByteRange: fs.AcceptByteRange, + cacheDuration: cacheDuration, + compressedFileSuffix: compressedFileSuffix, + cache: make(map[string]*fsFile), + compressedCache: make(map[string]*fsFile), + } + + go func() { + var pendingFiles []*fsFile + for { + time.Sleep(cacheDuration / 2) + pendingFiles = h.cleanCache(pendingFiles) + } + }() + + fs.h = h.handleRequest +} + +type fsHandler struct { + root string + indexNames []string + pathRewrite PathRewriteFunc + pathNotFound RequestHandler + generateIndexPages bool + compress bool + acceptByteRange bool + cacheDuration time.Duration + compressedFileSuffix string + + cache map[string]*fsFile + compressedCache map[string]*fsFile + cacheLock sync.Mutex + + smallFileReaderPool sync.Pool +} + +type fsFile struct { + h *fsHandler + f *os.File + dirIndex []byte + contentType string + contentLength int + compressed bool + + lastModified time.Time + lastModifiedStr []byte + + t time.Time + readersCount int + + bigFiles []*bigFileReader + bigFilesLock sync.Mutex +} + +func (ff *fsFile) NewReader() (io.Reader, error) { + if ff.isBig() { + r, err := ff.bigFileReader() + if err != nil { + ff.decReadersCount() + } + return r, err + } + return ff.smallFileReader(), nil +} + +func (ff *fsFile) smallFileReader() io.Reader { + v := ff.h.smallFileReaderPool.Get() + if v == nil { + v = &fsSmallFileReader{} + } + r := v.(*fsSmallFileReader) + r.ff = ff + r.endPos = ff.contentLength + if r.startPos > 0 { + panic("BUG: fsSmallFileReader with non-nil startPos found in the pool") + } + return r +} + +// files bigger than this size are sent with sendfile +const maxSmallFileSize = 2 * 4096 + +func (ff *fsFile) isBig() bool { + return ff.contentLength > maxSmallFileSize && len(ff.dirIndex) == 0 +} + +func (ff *fsFile) bigFileReader() (io.Reader, error) { + if ff.f == nil { + panic("BUG: ff.f must be non-nil in bigFileReader") + } + + var r io.Reader + + ff.bigFilesLock.Lock() + n := len(ff.bigFiles) + if n > 0 { + r = ff.bigFiles[n-1] + ff.bigFiles = ff.bigFiles[:n-1] + } + ff.bigFilesLock.Unlock() + + if r != nil { + return r, nil + } + + f, err := os.Open(ff.f.Name()) + if err != nil { + return nil, fmt.Errorf("cannot open already opened file: %s", err) + } + return &bigFileReader{ + f: f, + ff: ff, + r: f, + }, nil +} + +func (ff *fsFile) Release() { + if ff.f != nil { + ff.f.Close() + + if ff.isBig() { + ff.bigFilesLock.Lock() + for _, r := range ff.bigFiles { + r.f.Close() + } + ff.bigFilesLock.Unlock() + } + } +} + +func (ff *fsFile) decReadersCount() { + ff.h.cacheLock.Lock() + ff.readersCount-- + if ff.readersCount < 0 { + panic("BUG: negative fsFile.readersCount!") + } + ff.h.cacheLock.Unlock() +} + +// bigFileReader attempts to trigger sendfile +// for sending big files over the wire. +type bigFileReader struct { + f *os.File + ff *fsFile + r io.Reader + lr io.LimitedReader +} + +func (r *bigFileReader) UpdateByteRange(startPos, endPos int) error { + if _, err := r.f.Seek(int64(startPos), 0); err != nil { + return err + } + r.r = &r.lr + r.lr.R = r.f + r.lr.N = int64(endPos - startPos + 1) + return nil +} + +func (r *bigFileReader) Read(p []byte) (int, error) { + return r.r.Read(p) +} + +func (r *bigFileReader) WriteTo(w io.Writer) (int64, error) { + if rf, ok := w.(io.ReaderFrom); ok { + // fast path. Senfile must be triggered + return rf.ReadFrom(r.r) + } + + // slow path + return copyZeroAlloc(w, r.r) +} + +func (r *bigFileReader) Close() error { + r.r = r.f + n, err := r.f.Seek(0, 0) + if err == nil { + if n != 0 { + panic("BUG: File.Seek(0,0) returned (non-zero, nil)") + } + + ff := r.ff + ff.bigFilesLock.Lock() + ff.bigFiles = append(ff.bigFiles, r) + ff.bigFilesLock.Unlock() + } else { + r.f.Close() + } + r.ff.decReadersCount() + return err +} + +type fsSmallFileReader struct { + ff *fsFile + startPos int + endPos int +} + +func (r *fsSmallFileReader) Close() error { + ff := r.ff + ff.decReadersCount() + r.ff = nil + r.startPos = 0 + r.endPos = 0 + ff.h.smallFileReaderPool.Put(r) + return nil +} + +func (r *fsSmallFileReader) UpdateByteRange(startPos, endPos int) error { + r.startPos = startPos + r.endPos = endPos + 1 + return nil +} + +func (r *fsSmallFileReader) Read(p []byte) (int, error) { + tailLen := r.endPos - r.startPos + if tailLen <= 0 { + return 0, io.EOF + } + if len(p) > tailLen { + p = p[:tailLen] + } + + ff := r.ff + if ff.f != nil { + n, err := ff.f.ReadAt(p, int64(r.startPos)) + r.startPos += n + return n, err + } + + n := copy(p, ff.dirIndex[r.startPos:]) + r.startPos += n + return n, nil +} + +func (r *fsSmallFileReader) WriteTo(w io.Writer) (int64, error) { + ff := r.ff + + var n int + var err error + if ff.f == nil { + n, err = w.Write(ff.dirIndex[r.startPos:r.endPos]) + return int64(n), err + } + + if rf, ok := w.(io.ReaderFrom); ok { + return rf.ReadFrom(r) + } + + curPos := r.startPos + bufv := copyBufPool.Get() + buf := bufv.([]byte) + for err == nil { + tailLen := r.endPos - curPos + if tailLen <= 0 { + break + } + if len(buf) > tailLen { + buf = buf[:tailLen] + } + n, err = ff.f.ReadAt(buf, int64(curPos)) + nw, errw := w.Write(buf[:n]) + curPos += nw + if errw == nil && nw != n { + panic("BUG: Write(p) returned (n, nil), where n != len(p)") + } + if err == nil { + err = errw + } + } + copyBufPool.Put(bufv) + + if err == io.EOF { + err = nil + } + return int64(curPos - r.startPos), err +} + +func (h *fsHandler) cleanCache(pendingFiles []*fsFile) []*fsFile { + var filesToRelease []*fsFile + + h.cacheLock.Lock() + + // Close files which couldn't be closed before due to non-zero + // readers count on the previous run. + var remainingFiles []*fsFile + for _, ff := range pendingFiles { + if ff.readersCount > 0 { + remainingFiles = append(remainingFiles, ff) + } else { + filesToRelease = append(filesToRelease, ff) + } + } + pendingFiles = remainingFiles + + pendingFiles, filesToRelease = cleanCacheNolock(h.cache, pendingFiles, filesToRelease, h.cacheDuration) + pendingFiles, filesToRelease = cleanCacheNolock(h.compressedCache, pendingFiles, filesToRelease, h.cacheDuration) + + h.cacheLock.Unlock() + + for _, ff := range filesToRelease { + ff.Release() + } + + return pendingFiles +} + +func cleanCacheNolock(cache map[string]*fsFile, pendingFiles, filesToRelease []*fsFile, cacheDuration time.Duration) ([]*fsFile, []*fsFile) { + t := time.Now() + for k, ff := range cache { + if t.Sub(ff.t) > cacheDuration { + if ff.readersCount > 0 { + // There are pending readers on stale file handle, + // so we cannot close it. Put it into pendingFiles + // so it will be closed later. + pendingFiles = append(pendingFiles, ff) + } else { + filesToRelease = append(filesToRelease, ff) + } + delete(cache, k) + } + } + return pendingFiles, filesToRelease +} + +func (h *fsHandler) handleRequest(ctx *RequestCtx) { + var path []byte + if h.pathRewrite != nil { + path = h.pathRewrite(ctx) + } else { + path = ctx.Path() + } + path = stripTrailingSlashes(path) + + if n := bytes.IndexByte(path, 0); n >= 0 { + ctx.Logger().Printf("cannot serve path with nil byte at position %d: %q", n, path) + ctx.Error("Are you a hacker?", StatusBadRequest) + return + } + if h.pathRewrite != nil { + // There is no need to check for '/../' if path = ctx.Path(), + // since ctx.Path must normalize and sanitize the path. + + if n := bytes.Index(path, strSlashDotDotSlash); n >= 0 { + ctx.Logger().Printf("cannot serve path with '/../' at position %d due to security reasons: %q", n, path) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + } + + mustCompress := false + fileCache := h.cache + byteRange := ctx.Request.Header.peek(strRange) + if len(byteRange) == 0 && h.compress && ctx.Request.Header.HasAcceptEncodingBytes(strGzip) { + mustCompress = true + fileCache = h.compressedCache + } + + h.cacheLock.Lock() + ff, ok := fileCache[string(path)] + if ok { + ff.readersCount++ + } + h.cacheLock.Unlock() + + if !ok { + pathStr := string(path) + filePath := h.root + pathStr + var err error + ff, err = h.openFSFile(filePath, mustCompress) + if mustCompress && err == errNoCreatePermission { + ctx.Logger().Printf("insufficient permissions for saving compressed file for %q. Serving uncompressed file. "+ + "Allow write access to the directory with this file in order to improve fasthttp performance", filePath) + mustCompress = false + ff, err = h.openFSFile(filePath, mustCompress) + } + if err == errDirIndexRequired { + ff, err = h.openIndexFile(ctx, filePath, mustCompress) + if err != nil { + ctx.Logger().Printf("cannot open dir index %q: %s", filePath, err) + ctx.Error("Directory index is forbidden", StatusForbidden) + return + } + } else if err != nil { + ctx.Logger().Printf("cannot open file %q: %s", filePath, err) + if h.pathNotFound == nil { + ctx.Error("Cannot open requested path", StatusNotFound) + } else { + h.pathNotFound(ctx) + ctx.SetStatusCode(StatusNotFound) + } + return + } + + h.cacheLock.Lock() + ff1, ok := fileCache[pathStr] + if !ok { + fileCache[pathStr] = ff + ff.readersCount++ + } else { + ff1.readersCount++ + } + h.cacheLock.Unlock() + + if ok { + // The file has been already opened by another + // goroutine, so close the current file and use + // the file opened by another goroutine instead. + ff.Release() + ff = ff1 + } + } + + if !ctx.IfModifiedSince(ff.lastModified) { + ff.decReadersCount() + ctx.NotModified() + return + } + + r, err := ff.NewReader() + if err != nil { + ctx.Logger().Printf("cannot obtain file reader for path=%q: %s", path, err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + + hdr := &ctx.Response.Header + if ff.compressed { + hdr.SetCanonical(strContentEncoding, strGzip) + } + + statusCode := StatusOK + contentLength := ff.contentLength + if h.acceptByteRange { + hdr.SetCanonical(strAcceptRanges, strBytes) + if len(byteRange) > 0 { + startPos, endPos, err := ParseByteRange(byteRange, contentLength) + if err != nil { + r.(io.Closer).Close() + ctx.Logger().Printf("cannot parse byte range %q for path=%q: %s", byteRange, path, err) + ctx.Error("Range Not Satisfiable", StatusRequestedRangeNotSatisfiable) + return + } + + if err = r.(byteRangeUpdater).UpdateByteRange(startPos, endPos); err != nil { + r.(io.Closer).Close() + ctx.Logger().Printf("cannot seek byte range %q for path=%q: %s", byteRange, path, err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + + hdr.SetContentRange(startPos, endPos, contentLength) + contentLength = endPos - startPos + 1 + statusCode = StatusPartialContent + } + } + + hdr.SetCanonical(strLastModified, ff.lastModifiedStr) + if !ctx.IsHead() { + ctx.SetBodyStream(r, contentLength) + } else { + ctx.Response.ResetBody() + ctx.Response.SkipBody = true + ctx.Response.Header.SetContentLength(contentLength) + if rc, ok := r.(io.Closer); ok { + if err := rc.Close(); err != nil { + ctx.Logger().Printf("cannot close file reader: %s", err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + } + } + ctx.SetContentType(ff.contentType) + ctx.SetStatusCode(statusCode) +} + +type byteRangeUpdater interface { + UpdateByteRange(startPos, endPos int) error +} + +// ParseByteRange parses 'Range: bytes=...' header value. +// +// It follows https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 . +func ParseByteRange(byteRange []byte, contentLength int) (startPos, endPos int, err error) { + b := byteRange + if !bytes.HasPrefix(b, strBytes) { + return 0, 0, fmt.Errorf("unsupported range units: %q. Expecting %q", byteRange, strBytes) + } + + b = b[len(strBytes):] + if len(b) == 0 || b[0] != '=' { + return 0, 0, fmt.Errorf("missing byte range in %q", byteRange) + } + b = b[1:] + + n := bytes.IndexByte(b, '-') + if n < 0 { + return 0, 0, fmt.Errorf("missing the end position of byte range in %q", byteRange) + } + + if n == 0 { + v, err := ParseUint(b[n+1:]) + if err != nil { + return 0, 0, err + } + startPos := contentLength - v + if startPos < 0 { + startPos = 0 + } + return startPos, contentLength - 1, nil + } + + if startPos, err = ParseUint(b[:n]); err != nil { + return 0, 0, err + } + if startPos >= contentLength { + return 0, 0, fmt.Errorf("the start position of byte range cannot exceed %d. byte range %q", contentLength-1, byteRange) + } + + b = b[n+1:] + if len(b) == 0 { + return startPos, contentLength - 1, nil + } + + if endPos, err = ParseUint(b); err != nil { + return 0, 0, err + } + if endPos >= contentLength { + endPos = contentLength - 1 + } + if endPos < startPos { + return 0, 0, fmt.Errorf("the start position of byte range cannot exceed the end position. byte range %q", byteRange) + } + return startPos, endPos, nil +} + +func (h *fsHandler) openIndexFile(ctx *RequestCtx, dirPath string, mustCompress bool) (*fsFile, error) { + for _, indexName := range h.indexNames { + indexFilePath := dirPath + "/" + indexName + ff, err := h.openFSFile(indexFilePath, mustCompress) + if err == nil { + return ff, nil + } + if !os.IsNotExist(err) { + return nil, fmt.Errorf("cannot open file %q: %s", indexFilePath, err) + } + } + + if !h.generateIndexPages { + return nil, fmt.Errorf("cannot access directory without index page. Directory %q", dirPath) + } + + return h.createDirIndex(ctx.URI(), dirPath, mustCompress) +} + +var ( + errDirIndexRequired = errors.New("directory index required") + errNoCreatePermission = errors.New("no 'create file' permissions") +) + +func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool) (*fsFile, error) { + w := &ByteBuffer{} + + basePathEscaped := html.EscapeString(string(base.Path())) + fmt.Fprintf(w, "%s", basePathEscaped) + fmt.Fprintf(w, "

%s

", basePathEscaped) + fmt.Fprintf(w, "") + + if mustCompress { + var zbuf ByteBuffer + zbuf.B = AppendGzipBytesLevel(zbuf.B, w.B, CompressDefaultCompression) + w = &zbuf + } + + dirIndex := w.B + lastModified := time.Now() + ff := &fsFile{ + h: h, + dirIndex: dirIndex, + contentType: "text/html; charset=utf-8", + contentLength: len(dirIndex), + compressed: mustCompress, + lastModified: lastModified, + lastModifiedStr: AppendHTTPDate(nil, lastModified), + + t: lastModified, + } + return ff, nil +} + +const ( + fsMinCompressRatio = 0.8 + fsMaxCompressibleFileSize = 8 * 1024 * 1024 +) + +func (h *fsHandler) compressAndOpenFSFile(filePath string) (*fsFile, error) { + f, err := os.Open(filePath) + if err != nil { + return nil, err + } + + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for file %q: %s", filePath, err) + } + + if fileInfo.IsDir() { + f.Close() + return nil, errDirIndexRequired + } + + if strings.HasSuffix(filePath, h.compressedFileSuffix) || + fileInfo.Size() > fsMaxCompressibleFileSize || + !isFileCompressible(f, fsMinCompressRatio) { + return h.newFSFile(f, fileInfo, false) + } + + compressedFilePath := filePath + h.compressedFileSuffix + absPath, err := filepath.Abs(compressedFilePath) + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot determine absolute path for %q: %s", compressedFilePath, err) + } + + flock := getFileLock(absPath) + flock.Lock() + ff, err := h.compressFileNolock(f, fileInfo, filePath, compressedFilePath) + flock.Unlock() + + return ff, err +} + +func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePath, compressedFilePath string) (*fsFile, error) { + // Attempt to open compressed file created by another concurrent + // goroutine. + // It is safe opening such a file, since the file creation + // is guarded by file mutex - see getFileLock call. + if _, err := os.Stat(compressedFilePath); err == nil { + f.Close() + return h.newCompressedFSFile(compressedFilePath) + } + + // Create temporary file, so concurrent goroutines don't use + // it until it is created. + tmpFilePath := compressedFilePath + ".tmp" + zf, err := os.Create(tmpFilePath) + if err != nil { + f.Close() + if !os.IsPermission(err) { + return nil, fmt.Errorf("cannot create temporary file %q: %s", tmpFilePath, err) + } + return nil, errNoCreatePermission + } + + zw := acquireStacklessGzipWriter(zf, CompressDefaultCompression) + _, err = copyZeroAlloc(zw, f) + if err1 := zw.Flush(); err == nil { + err = err1 + } + releaseStacklessGzipWriter(zw, CompressDefaultCompression) + zf.Close() + f.Close() + if err != nil { + return nil, fmt.Errorf("error when compressing file %q to %q: %s", filePath, tmpFilePath, err) + } + if err = os.Chtimes(tmpFilePath, time.Now(), fileInfo.ModTime()); err != nil { + return nil, fmt.Errorf("cannot change modification time to %s for tmp file %q: %s", + fileInfo.ModTime(), tmpFilePath, err) + } + if err = os.Rename(tmpFilePath, compressedFilePath); err != nil { + return nil, fmt.Errorf("cannot move compressed file from %q to %q: %s", tmpFilePath, compressedFilePath, err) + } + return h.newCompressedFSFile(compressedFilePath) +} + +func (h *fsHandler) newCompressedFSFile(filePath string) (*fsFile, error) { + f, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("cannot open compressed file %q: %s", filePath, err) + } + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for compressed file %q: %s", filePath, err) + } + return h.newFSFile(f, fileInfo, true) +} + +func (h *fsHandler) openFSFile(filePath string, mustCompress bool) (*fsFile, error) { + filePathOriginal := filePath + if mustCompress { + filePath += h.compressedFileSuffix + } + + f, err := os.Open(filePath) + if err != nil { + if mustCompress && os.IsNotExist(err) { + return h.compressAndOpenFSFile(filePathOriginal) + } + return nil, err + } + + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for file %q: %s", filePath, err) + } + + if fileInfo.IsDir() { + f.Close() + if mustCompress { + return nil, fmt.Errorf("directory with unexpected suffix found: %q. Suffix: %q", + filePath, h.compressedFileSuffix) + } + return nil, errDirIndexRequired + } + + if mustCompress { + fileInfoOriginal, err := os.Stat(filePathOriginal) + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for original file %q: %s", filePathOriginal, err) + } + + if fileInfoOriginal.ModTime() != fileInfo.ModTime() { + // The compressed file became stale. Re-create it. + f.Close() + os.Remove(filePath) + return h.compressAndOpenFSFile(filePathOriginal) + } + } + + return h.newFSFile(f, fileInfo, mustCompress) +} + +func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool) (*fsFile, error) { + n := fileInfo.Size() + contentLength := int(n) + if n != int64(contentLength) { + f.Close() + return nil, fmt.Errorf("too big file: %d bytes", n) + } + + // detect content-type + ext := fileExtension(fileInfo.Name(), compressed, h.compressedFileSuffix) + contentType := mime.TypeByExtension(ext) + if len(contentType) == 0 { + data, err := readFileHeader(f, compressed) + if err != nil { + return nil, fmt.Errorf("cannot read header of the file %q: %s", f.Name(), err) + } + contentType = http.DetectContentType(data) + } + + lastModified := fileInfo.ModTime() + ff := &fsFile{ + h: h, + f: f, + contentType: contentType, + contentLength: contentLength, + compressed: compressed, + lastModified: lastModified, + lastModifiedStr: AppendHTTPDate(nil, lastModified), + + t: time.Now(), + } + return ff, nil +} + +func readFileHeader(f *os.File, compressed bool) ([]byte, error) { + r := io.Reader(f) + var zr *gzip.Reader + if compressed { + var err error + if zr, err = acquireGzipReader(f); err != nil { + return nil, err + } + r = zr + } + + lr := &io.LimitedReader{ + R: r, + N: 512, + } + data, err := ioutil.ReadAll(lr) + f.Seek(0, 0) + + if zr != nil { + releaseGzipReader(zr) + } + + return data, err +} + +func stripLeadingSlashes(path []byte, stripSlashes int) []byte { + for stripSlashes > 0 && len(path) > 0 { + if path[0] != '/' { + panic("BUG: path must start with slash") + } + n := bytes.IndexByte(path[1:], '/') + if n < 0 { + path = path[:0] + break + } + path = path[n+1:] + stripSlashes-- + } + return path +} + +func stripTrailingSlashes(path []byte) []byte { + for len(path) > 0 && path[len(path)-1] == '/' { + path = path[:len(path)-1] + } + return path +} + +func fileExtension(path string, compressed bool, compressedFileSuffix string) string { + if compressed && strings.HasSuffix(path, compressedFileSuffix) { + path = path[:len(path)-len(compressedFileSuffix)] + } + n := strings.LastIndexByte(path, '.') + if n < 0 { + return "" + } + return path[n:] +} + +// FileLastModified returns last modified time for the file. +func FileLastModified(path string) (time.Time, error) { + f, err := os.Open(path) + if err != nil { + return zeroTime, err + } + fileInfo, err := f.Stat() + f.Close() + if err != nil { + return zeroTime, err + } + return fsModTime(fileInfo.ModTime()), nil +} + +func fsModTime(t time.Time) time.Time { + return t.In(time.UTC).Truncate(time.Second) +} + +var ( + filesLockMap = make(map[string]*sync.Mutex) + filesLockMapLock sync.Mutex +) + +func getFileLock(absPath string) *sync.Mutex { + filesLockMapLock.Lock() + flock := filesLockMap[absPath] + if flock == nil { + flock = &sync.Mutex{} + filesLockMap[absPath] = flock + } + filesLockMapLock.Unlock() + return flock +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fs_example_test.go b/vendor/github.com/erikdubbelboer/fasthttp/fs_example_test.go new file mode 100644 index 0000000..9f923cd --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fs_example_test.go @@ -0,0 +1,28 @@ +package fasthttp_test + +import ( + "log" + + "github.com/erikdubbelboer/fasthttp" +) + +func ExampleFS() { + fs := &fasthttp.FS{ + // Path to directory to serve. + Root: "/var/www/static-site", + + // Generate index pages if client requests directory contents. + GenerateIndexPages: true, + + // Enable transparent compression to save network traffic. + Compress: true, + } + + // Create request handler for serving static files. + h := fs.NewRequestHandler() + + // Start the server. + if err := fasthttp.ListenAndServe(":8080", h); err != nil { + log.Fatalf("error in ListenAndServe: %s", err) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fs_handler_example_test.go b/vendor/github.com/erikdubbelboer/fasthttp/fs_handler_example_test.go new file mode 100644 index 0000000..32d16d8 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fs_handler_example_test.go @@ -0,0 +1,47 @@ +package fasthttp_test + +import ( + "bytes" + "log" + + "github.com/erikdubbelboer/fasthttp" +) + +// Setup file handlers (aka 'file server config') +var ( + // Handler for serving images from /img/ path, + // i.e. /img/foo/bar.jpg will be served from + // /var/www/images/foo/bar.jpb . + imgPrefix = []byte("/img/") + imgHandler = fasthttp.FSHandler("/var/www/images", 1) + + // Handler for serving css from /static/css/ path, + // i.e. /static/css/foo/bar.css will be served from + // /home/dev/css/foo/bar.css . + cssPrefix = []byte("/static/css/") + cssHandler = fasthttp.FSHandler("/home/dev/css", 2) + + // Handler for serving the rest of requests, + // i.e. /foo/bar/baz.html will be served from + // /var/www/files/foo/bar/baz.html . + filesHandler = fasthttp.FSHandler("/var/www/files", 0) +) + +// Main request handler +func requestHandler(ctx *fasthttp.RequestCtx) { + path := ctx.Path() + switch { + case bytes.HasPrefix(path, imgPrefix): + imgHandler(ctx) + case bytes.HasPrefix(path, cssPrefix): + cssHandler(ctx) + default: + filesHandler(ctx) + } +} + +func ExampleFSHandler() { + if err := fasthttp.ListenAndServe(":80", requestHandler); err != nil { + log.Fatalf("Error in server: %s", err) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/fs_test.go b/vendor/github.com/erikdubbelboer/fasthttp/fs_test.go new file mode 100644 index 0000000..78b1e13 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/fs_test.go @@ -0,0 +1,684 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path" + "sort" + "testing" + "time" +) + +type TestLogger struct { + t *testing.T +} + +func (t TestLogger) Printf(format string, args ...interface{}) { + t.t.Logf(format, args...) +} + +func TestNewVHostPathRewriter(t *testing.T) { + var ctx RequestCtx + var req Request + req.Header.SetHost("foobar.com") + req.SetRequestURI("/foo/bar/baz") + ctx.Init(&req, nil, nil) + + f := NewVHostPathRewriter(0) + path := f(&ctx) + expectedPath := "/foobar.com/foo/bar/baz" + if string(path) != expectedPath { + t.Fatalf("unexpected path %q. Expecting %q", path, expectedPath) + } + + ctx.Request.Reset() + ctx.Request.SetRequestURI("https://aaa.bbb.cc/one/two/three/four?asdf=dsf") + f = NewVHostPathRewriter(2) + path = f(&ctx) + expectedPath = "/aaa.bbb.cc/three/four" + if string(path) != expectedPath { + t.Fatalf("unexpected path %q. Expecting %q", path, expectedPath) + } +} + +func TestNewVHostPathRewriterMaliciousHost(t *testing.T) { + var ctx RequestCtx + var req Request + req.Header.SetHost("/../../../etc/passwd") + req.SetRequestURI("/foo/bar/baz") + ctx.Init(&req, nil, nil) + + f := NewVHostPathRewriter(0) + path := f(&ctx) + expectedPath := "/invalid-host/foo/bar/baz" + if string(path) != expectedPath { + t.Fatalf("unexpected path %q. Expecting %q", path, expectedPath) + } +} + +func testPathNotFound(t *testing.T, pathNotFoundFunc RequestHandler) { + var ctx RequestCtx + var req Request + req.SetRequestURI("http//some.url/file") + ctx.Init(&req, nil, TestLogger{t}) + + fs := &FS{ + Root: "./", + PathNotFound: pathNotFoundFunc, + } + fs.NewRequestHandler()(&ctx) + + if pathNotFoundFunc == nil { + // different to ... + if !bytes.Equal(ctx.Response.Body(), + []byte("Cannot open requested path")) { + t.Fatalf("response defers. Response: %q", ctx.Response.Body()) + } + } else { + // Equals to ... + if bytes.Equal(ctx.Response.Body(), + []byte("Cannot open requested path")) { + t.Fatalf("respones defers. Response: %q", ctx.Response.Body()) + } + } +} + +func TestPathNotFound(t *testing.T) { + testPathNotFound(t, nil) +} + +func TestPathNotFoundFunc(t *testing.T) { + testPathNotFound(t, func(ctx *RequestCtx) { + ctx.WriteString("Not found hehe") + }) +} + +func TestServeFileHead(t *testing.T) { + var ctx RequestCtx + var req Request + req.Header.SetMethod("HEAD") + req.SetRequestURI("http://foobar.com/baz") + ctx.Init(&req, nil, nil) + + ServeFile(&ctx, "fs.go") + + var resp Response + resp.SkipBody = true + s := ctx.Response.String() + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + ce := resp.Header.Peek("Content-Encoding") + if len(ce) > 0 { + t.Fatalf("Unexpected 'Content-Encoding' %q", ce) + } + + body := resp.Body() + if len(body) > 0 { + t.Fatalf("unexpected response body %q. Expecting empty body", body) + } + + expectedBody, err := getFileContents("/fs.go") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + contentLength := resp.Header.ContentLength() + if contentLength != len(expectedBody) { + t.Fatalf("unexpected Content-Length: %d. expecting %d", contentLength, len(expectedBody)) + } +} + +func TestServeFileSmallNoReadFrom(t *testing.T) { + teststr := "hello, world!" + + tempdir, err := ioutil.TempDir("", "httpexpect") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempdir) + + if err := ioutil.WriteFile( + path.Join(tempdir, "hello"), []byte(teststr), 0666); err != nil { + t.Fatal(err) + } + + var ctx RequestCtx + var req Request + req.SetRequestURI("http://foobar.com/baz") + ctx.Init(&req, nil, nil) + + ServeFile(&ctx, path.Join(tempdir, "hello")) + + reader, ok := ctx.Response.bodyStream.(*fsSmallFileReader) + if !ok { + t.Fatal("expected fsSmallFileReader") + } + + buf := bytes.NewBuffer(nil) + + n, err := reader.WriteTo(pureWriter{buf}) + if err != nil { + t.Fatal(err) + } + + if n != int64(len(teststr)) { + t.Fatalf("expected %d bytes, got %d bytes", len(teststr), n) + } + + body := string(buf.Bytes()) + if body != teststr { + t.Fatalf("expected '%s'", teststr) + } +} + +type pureWriter struct { + w io.Writer +} + +func (pw pureWriter) Write(p []byte) (nn int, err error) { + return pw.w.Write(p) +} + +func TestServeFileCompressed(t *testing.T) { + var ctx RequestCtx + var req Request + req.SetRequestURI("http://foobar.com/baz") + req.Header.Set("Accept-Encoding", "gzip") + ctx.Init(&req, nil, nil) + + ServeFile(&ctx, "fs.go") + + var resp Response + s := ctx.Response.String() + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + ce := resp.Header.Peek("Content-Encoding") + if string(ce) != "gzip" { + t.Fatalf("Unexpected 'Content-Encoding' %q. Expecting %q", ce, "gzip") + } + + body, err := resp.BodyGunzip() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + expectedBody, err := getFileContents("/fs.go") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !bytes.Equal(body, expectedBody) { + t.Fatalf("unexpected body %q. expecting %q", body, expectedBody) + } +} + +func TestServeFileUncompressed(t *testing.T) { + var ctx RequestCtx + var req Request + req.SetRequestURI("http://foobar.com/baz") + req.Header.Set("Accept-Encoding", "gzip") + ctx.Init(&req, nil, nil) + + ServeFileUncompressed(&ctx, "fs.go") + + var resp Response + s := ctx.Response.String() + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + ce := resp.Header.Peek("Content-Encoding") + if len(ce) > 0 { + t.Fatalf("Unexpected 'Content-Encoding' %q", ce) + } + + body := resp.Body() + expectedBody, err := getFileContents("/fs.go") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !bytes.Equal(body, expectedBody) { + t.Fatalf("unexpected body %q. expecting %q", body, expectedBody) + } +} + +func TestFSByteRangeConcurrent(t *testing.T) { + fs := &FS{ + Root: ".", + AcceptByteRange: true, + } + h := fs.NewRequestHandler() + + concurrency := 10 + ch := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for j := 0; j < 5; j++ { + testFSByteRange(t, h, "/fs.go") + testFSByteRange(t, h, "/README.md") + } + ch <- struct{}{} + }() + } + + for i := 0; i < concurrency; i++ { + select { + case <-time.After(time.Second): + t.Fatalf("timeout") + case <-ch: + } + } +} + +func TestFSByteRangeSingleThread(t *testing.T) { + fs := &FS{ + Root: ".", + AcceptByteRange: true, + } + h := fs.NewRequestHandler() + + testFSByteRange(t, h, "/fs.go") + testFSByteRange(t, h, "/README.md") +} + +func testFSByteRange(t *testing.T, h RequestHandler, filePath string) { + var ctx RequestCtx + ctx.Init(&Request{}, nil, nil) + + expectedBody, err := getFileContents(filePath) + if err != nil { + t.Fatalf("cannot read file %q: %s", filePath, err) + } + + fileSize := len(expectedBody) + startPos := rand.Intn(fileSize) + endPos := rand.Intn(fileSize) + if endPos < startPos { + startPos, endPos = endPos, startPos + } + + ctx.Request.SetRequestURI(filePath) + ctx.Request.Header.SetByteRange(startPos, endPos) + h(&ctx) + + var resp Response + s := ctx.Response.String() + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s. filePath=%q", err, filePath) + } + if resp.StatusCode() != StatusPartialContent { + t.Fatalf("unexpected status code: %d. Expecting %d. filePath=%q", resp.StatusCode(), StatusPartialContent, filePath) + } + cr := resp.Header.Peek("Content-Range") + + expectedCR := fmt.Sprintf("bytes %d-%d/%d", startPos, endPos, fileSize) + if string(cr) != expectedCR { + t.Fatalf("unexpected content-range %q. Expecting %q. filePath=%q", cr, expectedCR, filePath) + } + body := resp.Body() + bodySize := endPos - startPos + 1 + if len(body) != bodySize { + t.Fatalf("unexpected body size %d. Expecting %d. filePath=%q, startPos=%d, endPos=%d", + len(body), bodySize, filePath, startPos, endPos) + } + + expectedBody = expectedBody[startPos : endPos+1] + if !bytes.Equal(body, expectedBody) { + t.Fatalf("unexpected body %q. Expecting %q. filePath=%q, startPos=%d, endPos=%d", + body, expectedBody, filePath, startPos, endPos) + } +} + +func getFileContents(path string) ([]byte, error) { + path = "." + path + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return ioutil.ReadAll(f) +} + +func TestParseByteRangeSuccess(t *testing.T) { + testParseByteRangeSuccess(t, "bytes=0-0", 1, 0, 0) + testParseByteRangeSuccess(t, "bytes=1234-6789", 6790, 1234, 6789) + + testParseByteRangeSuccess(t, "bytes=123-", 456, 123, 455) + testParseByteRangeSuccess(t, "bytes=-1", 1, 0, 0) + testParseByteRangeSuccess(t, "bytes=-123", 456, 333, 455) + + // End position exceeding content-length. It should be updated to content-length-1. + // See https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + testParseByteRangeSuccess(t, "bytes=1-2345", 234, 1, 233) + testParseByteRangeSuccess(t, "bytes=0-2345", 2345, 0, 2344) + + // Start position overflow. Whole range must be returned. + // See https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + testParseByteRangeSuccess(t, "bytes=-567", 56, 0, 55) +} + +func testParseByteRangeSuccess(t *testing.T, v string, contentLength, startPos, endPos int) { + startPos1, endPos1, err := ParseByteRange([]byte(v), contentLength) + if err != nil { + t.Fatalf("unexpected error: %s. v=%q, contentLength=%d", err, v, contentLength) + } + if startPos1 != startPos { + t.Fatalf("unexpected startPos=%d. Expecting %d. v=%q, contentLength=%d", startPos1, startPos, v, contentLength) + } + if endPos1 != endPos { + t.Fatalf("unexpected endPos=%d. Expectind %d. v=%q, contentLenght=%d", endPos1, endPos, v, contentLength) + } +} + +func TestParseByteRangeError(t *testing.T) { + // invalid value + testParseByteRangeError(t, "asdfasdfas", 1234) + + // invalid units + testParseByteRangeError(t, "foobar=1-34", 600) + + // missing '-' + testParseByteRangeError(t, "bytes=1234", 1235) + + // non-numeric range + testParseByteRangeError(t, "bytes=foobar", 123) + testParseByteRangeError(t, "bytes=1-foobar", 123) + testParseByteRangeError(t, "bytes=df-344", 545) + + // multiple byte ranges + testParseByteRangeError(t, "bytes=1-2,4-6", 123) + + // byte range exceeding contentLength + testParseByteRangeError(t, "bytes=123-", 12) + + // startPos exceeding endPos + testParseByteRangeError(t, "bytes=123-34", 1234) +} + +func testParseByteRangeError(t *testing.T, v string, contentLength int) { + _, _, err := ParseByteRange([]byte(v), contentLength) + if err == nil { + t.Fatalf("expecting error when parsing byte range %q", v) + } +} + +func TestFSCompressConcurrent(t *testing.T) { + fs := &FS{ + Root: ".", + GenerateIndexPages: true, + Compress: true, + } + h := fs.NewRequestHandler() + + concurrency := 4 + ch := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for j := 0; j < 5; j++ { + testFSCompress(t, h, "/fs.go") + testFSCompress(t, h, "/") + testFSCompress(t, h, "/README.md") + } + ch <- struct{}{} + }() + } + + for i := 0; i < concurrency; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } +} + +func TestFSCompressSingleThread(t *testing.T) { + fs := &FS{ + Root: ".", + GenerateIndexPages: true, + Compress: true, + } + h := fs.NewRequestHandler() + + testFSCompress(t, h, "/fs.go") + testFSCompress(t, h, "/") + testFSCompress(t, h, "/README.md") +} + +func testFSCompress(t *testing.T, h RequestHandler, filePath string) { + var ctx RequestCtx + ctx.Init(&Request{}, nil, nil) + + // request uncompressed file + ctx.Request.Reset() + ctx.Request.SetRequestURI(filePath) + h(&ctx) + + var resp Response + s := ctx.Response.String() + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s. filePath=%q", err, filePath) + } + if resp.StatusCode() != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d. filePath=%q", resp.StatusCode(), StatusOK, filePath) + } + ce := resp.Header.Peek("Content-Encoding") + if string(ce) != "" { + t.Fatalf("unexpected content-encoding %q. Expecting empty string. filePath=%q", ce, filePath) + } + body := string(resp.Body()) + + // request compressed file + ctx.Request.Reset() + ctx.Request.SetRequestURI(filePath) + ctx.Request.Header.Set("Accept-Encoding", "gzip") + h(&ctx) + s = ctx.Response.String() + br = bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s. filePath=%q", err, filePath) + } + if resp.StatusCode() != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d. filePath=%q", resp.StatusCode(), StatusOK, filePath) + } + ce = resp.Header.Peek("Content-Encoding") + if string(ce) != "gzip" { + t.Fatalf("unexpected content-encoding %q. Expecting %q. filePath=%q", ce, "gzip", filePath) + } + zbody, err := resp.BodyGunzip() + if err != nil { + t.Fatalf("unexpected error when gunzipping response body: %s. filePath=%q", err, filePath) + } + if string(zbody) != body { + t.Fatalf("unexpected body %q. Expected %q. FilePath=%q", zbody, body, filePath) + } +} + +func TestFileLock(t *testing.T) { + for i := 0; i < 10; i++ { + filePath := fmt.Sprintf("foo/bar/%d.jpg", i) + lock := getFileLock(filePath) + lock.Lock() + lock.Unlock() + } + + for i := 0; i < 10; i++ { + filePath := fmt.Sprintf("foo/bar/%d.jpg", i) + lock := getFileLock(filePath) + lock.Lock() + lock.Unlock() + } +} + +func TestFSHandlerSingleThread(t *testing.T) { + requestHandler := FSHandler(".", 0) + + f, err := os.Open(".") + if err != nil { + t.Fatalf("cannot open cwd: %s", err) + } + + filenames, err := f.Readdirnames(0) + f.Close() + if err != nil { + t.Fatalf("cannot read dirnames in cwd: %s", err) + } + sort.Sort(sort.StringSlice(filenames)) + + for i := 0; i < 3; i++ { + fsHandlerTest(t, requestHandler, filenames) + } +} + +func TestFSHandlerConcurrent(t *testing.T) { + requestHandler := FSHandler(".", 0) + + f, err := os.Open(".") + if err != nil { + t.Fatalf("cannot open cwd: %s", err) + } + + filenames, err := f.Readdirnames(0) + f.Close() + if err != nil { + t.Fatalf("cannot read dirnames in cwd: %s", err) + } + sort.Sort(sort.StringSlice(filenames)) + + concurrency := 10 + ch := make(chan struct{}, concurrency) + for j := 0; j < concurrency; j++ { + go func() { + for i := 0; i < 3; i++ { + fsHandlerTest(t, requestHandler, filenames) + } + ch <- struct{}{} + }() + } + + for j := 0; j < concurrency; j++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } +} + +func fsHandlerTest(t *testing.T, requestHandler RequestHandler, filenames []string) { + var ctx RequestCtx + var req Request + ctx.Init(&req, nil, defaultLogger) + ctx.Request.Header.SetHost("foobar.com") + + filesTested := 0 + for _, name := range filenames { + f, err := os.Open(name) + if err != nil { + t.Fatalf("cannot open file %q: %s", name, err) + } + stat, err := f.Stat() + if err != nil { + t.Fatalf("cannot get file stat %q: %s", name, err) + } + if stat.IsDir() { + f.Close() + continue + } + data, err := ioutil.ReadAll(f) + f.Close() + if err != nil { + t.Fatalf("cannot read file contents %q: %s", name, err) + } + + ctx.URI().Update(name) + requestHandler(&ctx) + if ctx.Response.bodyStream == nil { + t.Fatalf("response body stream must be non-empty") + } + body, err := ioutil.ReadAll(ctx.Response.bodyStream) + if err != nil { + t.Fatalf("error when reading response body stream: %s", err) + } + if !bytes.Equal(body, data) { + t.Fatalf("unexpected body returned: %q. Expecting %q", body, data) + } + filesTested++ + if filesTested >= 10 { + break + } + } + + // verify index page generation + ctx.URI().Update("/") + requestHandler(&ctx) + if ctx.Response.bodyStream == nil { + t.Fatalf("response body stream must be non-empty") + } + body, err := ioutil.ReadAll(ctx.Response.bodyStream) + if err != nil { + t.Fatalf("error when reading response body stream: %s", err) + } + if len(body) == 0 { + t.Fatalf("index page must be non-empty") + } +} + +func TestStripPathSlashes(t *testing.T) { + testStripPathSlashes(t, "", 0, "") + testStripPathSlashes(t, "", 10, "") + testStripPathSlashes(t, "/", 0, "") + testStripPathSlashes(t, "/", 1, "") + testStripPathSlashes(t, "/", 10, "") + testStripPathSlashes(t, "/foo/bar/baz", 0, "/foo/bar/baz") + testStripPathSlashes(t, "/foo/bar/baz", 1, "/bar/baz") + testStripPathSlashes(t, "/foo/bar/baz", 2, "/baz") + testStripPathSlashes(t, "/foo/bar/baz", 3, "") + testStripPathSlashes(t, "/foo/bar/baz", 10, "") + + // trailing slash + testStripPathSlashes(t, "/foo/bar/", 0, "/foo/bar") + testStripPathSlashes(t, "/foo/bar/", 1, "/bar") + testStripPathSlashes(t, "/foo/bar/", 2, "") + testStripPathSlashes(t, "/foo/bar/", 3, "") +} + +func testStripPathSlashes(t *testing.T, path string, stripSlashes int, expectedPath string) { + s := stripLeadingSlashes([]byte(path), stripSlashes) + s = stripTrailingSlashes(s) + if string(s) != expectedPath { + t.Fatalf("unexpected path after stripping %q with stripSlashes=%d: %q. Expecting %q", path, stripSlashes, s, expectedPath) + } +} + +func TestFileExtension(t *testing.T) { + testFileExtension(t, "foo.bar", false, "zzz", ".bar") + testFileExtension(t, "foobar", false, "zzz", "") + testFileExtension(t, "foo.bar.baz", false, "zzz", ".baz") + testFileExtension(t, "", false, "zzz", "") + testFileExtension(t, "/a/b/c.d/efg.jpg", false, ".zzz", ".jpg") + + testFileExtension(t, "foo.bar", true, ".zzz", ".bar") + testFileExtension(t, "foobar.zzz", true, ".zzz", "") + testFileExtension(t, "foo.bar.baz.fasthttp.gz", true, ".fasthttp.gz", ".baz") + testFileExtension(t, "", true, ".zzz", "") + testFileExtension(t, "/a/b/c.d/efg.jpg.xxx", true, ".xxx", ".jpg") +} + +func testFileExtension(t *testing.T, path string, compressed bool, compressedFileSuffix, expectedExt string) { + ext := fileExtension(path, compressed, compressedFileSuffix) + if ext != expectedExt { + t.Fatalf("unexpected file extension for file %q: %q. Expecting %q", path, ext, expectedExt) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/header.go b/vendor/github.com/erikdubbelboer/fasthttp/header.go new file mode 100644 index 0000000..0635adb --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/header.go @@ -0,0 +1,2120 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "sync/atomic" + "time" +) + +// ResponseHeader represents HTTP response header. +// +// It is forbidden copying ResponseHeader instances. +// Create new instances instead and use CopyTo. +// +// ResponseHeader instance MUST NOT be used from concurrently running +// goroutines. +type ResponseHeader struct { + noCopy noCopy + + disableNormalizing bool + noHTTP11 bool + connectionClose bool + + statusCode int + contentLength int + contentLengthBytes []byte + + contentType []byte + server []byte + + h []argsKV + bufKV argsKV + + cookies []argsKV +} + +// RequestHeader represents HTTP request header. +// +// It is forbidden copying RequestHeader instances. +// Create new instances instead and use CopyTo. +// +// RequestHeader instance MUST NOT be used from concurrently running +// goroutines. +type RequestHeader struct { + noCopy noCopy + + disableNormalizing bool + noHTTP11 bool + connectionClose bool + + // These two fields have been moved close to other bool fields + // for reducing RequestHeader object size. + cookiesCollected bool + rawHeadersParsed bool + + contentLength int + contentLengthBytes []byte + + method []byte + requestURI []byte + host []byte + contentType []byte + userAgent []byte + + h []argsKV + bufKV argsKV + + cookies []argsKV + + rawHeaders []byte +} + +// SetContentRange sets 'Content-Range: bytes startPos-endPos/contentLength' +// header. +func (h *ResponseHeader) SetContentRange(startPos, endPos, contentLength int) { + b := h.bufKV.value[:0] + b = append(b, strBytes...) + b = append(b, ' ') + b = AppendUint(b, startPos) + b = append(b, '-') + b = AppendUint(b, endPos) + b = append(b, '/') + b = AppendUint(b, contentLength) + h.bufKV.value = b + + h.SetCanonical(strContentRange, h.bufKV.value) +} + +// SetByteRange sets 'Range: bytes=startPos-endPos' header. +// +// * If startPos is negative, then 'bytes=-startPos' value is set. +// * If endPos is negative, then 'bytes=startPos-' value is set. +func (h *RequestHeader) SetByteRange(startPos, endPos int) { + h.parseRawHeaders() + + b := h.bufKV.value[:0] + b = append(b, strBytes...) + b = append(b, '=') + if startPos >= 0 { + b = AppendUint(b, startPos) + } else { + endPos = -startPos + } + b = append(b, '-') + if endPos >= 0 { + b = AppendUint(b, endPos) + } + h.bufKV.value = b + + h.SetCanonical(strRange, h.bufKV.value) +} + +// StatusCode returns response status code. +func (h *ResponseHeader) StatusCode() int { + if h.statusCode == 0 { + return StatusOK + } + return h.statusCode +} + +// SetStatusCode sets response status code. +func (h *ResponseHeader) SetStatusCode(statusCode int) { + h.statusCode = statusCode +} + +// PeekCookie is able to returns cookie by a given key from response. +func (h *ResponseHeader) PeekCookie(key string) []byte { + return peekArgStr(h.cookies, key) +} + +// SetLastModified sets 'Last-Modified' header to the given value. +func (h *ResponseHeader) SetLastModified(t time.Time) { + h.bufKV.value = AppendHTTPDate(h.bufKV.value[:0], t) + h.SetCanonical(strLastModified, h.bufKV.value) +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (h *ResponseHeader) ConnectionClose() bool { + return h.connectionClose +} + +// SetConnectionClose sets 'Connection: close' header. +func (h *ResponseHeader) SetConnectionClose() { + h.connectionClose = true +} + +// ResetConnectionClose clears 'Connection: close' header if it exists. +func (h *ResponseHeader) ResetConnectionClose() { + if h.connectionClose { + h.connectionClose = false + h.h = delAllArgsBytes(h.h, strConnection) + } +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (h *RequestHeader) ConnectionClose() bool { + h.parseRawHeaders() + return h.connectionClose +} + +func (h *RequestHeader) connectionCloseFast() bool { + // h.parseRawHeaders() isn't called for performance reasons. + // Use ConnectionClose for triggering raw headers parsing. + return h.connectionClose +} + +// SetConnectionClose sets 'Connection: close' header. +func (h *RequestHeader) SetConnectionClose() { + // h.parseRawHeaders() isn't called for performance reasons. + h.connectionClose = true +} + +// ResetConnectionClose clears 'Connection: close' header if it exists. +func (h *RequestHeader) ResetConnectionClose() { + h.parseRawHeaders() + if h.connectionClose { + h.connectionClose = false + h.h = delAllArgsBytes(h.h, strConnection) + } +} + +// ConnectionUpgrade returns true if 'Connection: Upgrade' header is set. +func (h *ResponseHeader) ConnectionUpgrade() bool { + return hasHeaderValue(h.Peek("Connection"), strUpgrade) +} + +// ConnectionUpgrade returns true if 'Connection: Upgrade' header is set. +func (h *RequestHeader) ConnectionUpgrade() bool { + h.parseRawHeaders() + return hasHeaderValue(h.Peek("Connection"), strUpgrade) +} + +// ContentLength returns Content-Length header value. +// +// It may be negative: +// -1 means Transfer-Encoding: chunked. +// -2 means Transfer-Encoding: identity. +func (h *ResponseHeader) ContentLength() int { + return h.contentLength +} + +// SetContentLength sets Content-Length header value. +// +// Content-Length may be negative: +// -1 means Transfer-Encoding: chunked. +// -2 means Transfer-Encoding: identity. +func (h *ResponseHeader) SetContentLength(contentLength int) { + if h.mustSkipContentLength() { + return + } + h.contentLength = contentLength + if contentLength >= 0 { + h.contentLengthBytes = AppendUint(h.contentLengthBytes[:0], contentLength) + h.h = delAllArgsBytes(h.h, strTransferEncoding) + } else { + h.contentLengthBytes = h.contentLengthBytes[:0] + value := strChunked + if contentLength == -2 { + h.SetConnectionClose() + value = strIdentity + } + h.h = setArgBytes(h.h, strTransferEncoding, value) + } +} + +func (h *ResponseHeader) mustSkipContentLength() bool { + // From http/1.1 specs: + // All 1xx (informational), 204 (no content), and 304 (not modified) responses MUST NOT include a message-body + statusCode := h.StatusCode() + + // Fast path. + if statusCode < 100 || statusCode == StatusOK { + return false + } + + // Slow path. + return statusCode == StatusNotModified || statusCode == StatusNoContent || statusCode < 200 +} + +// ContentLength returns Content-Length header value. +// +// It may be negative: +// -1 means Transfer-Encoding: chunked. +func (h *RequestHeader) ContentLength() int { + if h.ignoreBody() { + return 0 + } + return h.realContentLength() +} + +// realContentLength returns the actual Content-Length set in the request, +// including positive lengths for GET/HEAD requests. +func (h *RequestHeader) realContentLength() int { + h.parseRawHeaders() + return h.contentLength +} + +// SetContentLength sets Content-Length header value. +// +// Negative content-length sets 'Transfer-Encoding: chunked' header. +func (h *RequestHeader) SetContentLength(contentLength int) { + h.parseRawHeaders() + h.contentLength = contentLength + if contentLength >= 0 { + h.contentLengthBytes = AppendUint(h.contentLengthBytes[:0], contentLength) + h.h = delAllArgsBytes(h.h, strTransferEncoding) + } else { + h.contentLengthBytes = h.contentLengthBytes[:0] + h.h = setArgBytes(h.h, strTransferEncoding, strChunked) + } +} + +func (h *ResponseHeader) isCompressibleContentType() bool { + contentType := h.ContentType() + return bytes.HasPrefix(contentType, strTextSlash) || + bytes.HasPrefix(contentType, strApplicationSlash) +} + +// ContentType returns Content-Type header value. +func (h *ResponseHeader) ContentType() []byte { + contentType := h.contentType + if len(h.contentType) == 0 { + contentType = defaultContentType + } + return contentType +} + +// SetContentType sets Content-Type header value. +func (h *ResponseHeader) SetContentType(contentType string) { + h.contentType = append(h.contentType[:0], contentType...) +} + +// SetContentTypeBytes sets Content-Type header value. +func (h *ResponseHeader) SetContentTypeBytes(contentType []byte) { + h.contentType = append(h.contentType[:0], contentType...) +} + +// Server returns Server header value. +func (h *ResponseHeader) Server() []byte { + return h.server +} + +// SetServer sets Server header value. +func (h *ResponseHeader) SetServer(server string) { + h.server = append(h.server[:0], server...) +} + +// SetServerBytes sets Server header value. +func (h *ResponseHeader) SetServerBytes(server []byte) { + h.server = append(h.server[:0], server...) +} + +// ContentType returns Content-Type header value. +func (h *RequestHeader) ContentType() []byte { + h.parseRawHeaders() + return h.contentType +} + +// SetContentType sets Content-Type header value. +func (h *RequestHeader) SetContentType(contentType string) { + h.parseRawHeaders() + h.contentType = append(h.contentType[:0], contentType...) +} + +// SetContentTypeBytes sets Content-Type header value. +func (h *RequestHeader) SetContentTypeBytes(contentType []byte) { + h.parseRawHeaders() + h.contentType = append(h.contentType[:0], contentType...) +} + +// SetMultipartFormBoundary sets the following Content-Type: +// 'multipart/form-data; boundary=...' +// where ... is substituted by the given boundary. +func (h *RequestHeader) SetMultipartFormBoundary(boundary string) { + h.parseRawHeaders() + + b := h.bufKV.value[:0] + b = append(b, strMultipartFormData...) + b = append(b, ';', ' ') + b = append(b, strBoundary...) + b = append(b, '=') + b = append(b, boundary...) + h.bufKV.value = b + + h.SetContentTypeBytes(h.bufKV.value) +} + +// SetMultipartFormBoundaryBytes sets the following Content-Type: +// 'multipart/form-data; boundary=...' +// where ... is substituted by the given boundary. +func (h *RequestHeader) SetMultipartFormBoundaryBytes(boundary []byte) { + h.parseRawHeaders() + + b := h.bufKV.value[:0] + b = append(b, strMultipartFormData...) + b = append(b, ';', ' ') + b = append(b, strBoundary...) + b = append(b, '=') + b = append(b, boundary...) + h.bufKV.value = b + + h.SetContentTypeBytes(h.bufKV.value) +} + +// MultipartFormBoundary returns boundary part +// from 'multipart/form-data; boundary=...' Content-Type. +func (h *RequestHeader) MultipartFormBoundary() []byte { + b := h.ContentType() + if !bytes.HasPrefix(b, strMultipartFormData) { + return nil + } + b = b[len(strMultipartFormData):] + if len(b) == 0 || b[0] != ';' { + return nil + } + + var n int + for len(b) > 0 { + n++ + for len(b) > n && b[n] == ' ' { + n++ + } + b = b[n:] + if !bytes.HasPrefix(b, strBoundary) { + if n = bytes.IndexByte(b, ';'); n < 0 { + return nil + } + continue + } + + b = b[len(strBoundary):] + if len(b) == 0 || b[0] != '=' { + return nil + } + b = b[1:] + if n = bytes.IndexByte(b, ';'); n >= 0 { + b = b[:n] + } + if len(b) > 1 && b[0] == '"' && b[len(b)-1] == '"' { + b = b[1 : len(b)-1] + } + return b + } + return nil +} + +// Host returns Host header value. +func (h *RequestHeader) Host() []byte { + if len(h.host) > 0 { + return h.host + } + if !h.rawHeadersParsed { + // fast path without employing full headers parsing. + host := peekRawHeader(h.rawHeaders, strHost) + if len(host) > 0 { + h.host = append(h.host[:0], host...) + return h.host + } + } + + // slow path. + h.parseRawHeaders() + return h.host +} + +// SetHost sets Host header value. +func (h *RequestHeader) SetHost(host string) { + h.parseRawHeaders() + h.host = append(h.host[:0], host...) +} + +// SetHostBytes sets Host header value. +func (h *RequestHeader) SetHostBytes(host []byte) { + h.parseRawHeaders() + h.host = append(h.host[:0], host...) +} + +// UserAgent returns User-Agent header value. +func (h *RequestHeader) UserAgent() []byte { + h.parseRawHeaders() + return h.userAgent +} + +// SetUserAgent sets User-Agent header value. +func (h *RequestHeader) SetUserAgent(userAgent string) { + h.parseRawHeaders() + h.userAgent = append(h.userAgent[:0], userAgent...) +} + +// SetUserAgentBytes sets User-Agent header value. +func (h *RequestHeader) SetUserAgentBytes(userAgent []byte) { + h.parseRawHeaders() + h.userAgent = append(h.userAgent[:0], userAgent...) +} + +// Referer returns Referer header value. +func (h *RequestHeader) Referer() []byte { + return h.PeekBytes(strReferer) +} + +// SetReferer sets Referer header value. +func (h *RequestHeader) SetReferer(referer string) { + h.SetBytesK(strReferer, referer) +} + +// SetRefererBytes sets Referer header value. +func (h *RequestHeader) SetRefererBytes(referer []byte) { + h.SetCanonical(strReferer, referer) +} + +// Method returns HTTP request method. +func (h *RequestHeader) Method() []byte { + if len(h.method) == 0 { + return strGet + } + return h.method +} + +// SetMethod sets HTTP request method. +func (h *RequestHeader) SetMethod(method string) { + h.method = append(h.method[:0], method...) +} + +// SetMethodBytes sets HTTP request method. +func (h *RequestHeader) SetMethodBytes(method []byte) { + h.method = append(h.method[:0], method...) +} + +// RequestURI returns RequestURI from the first HTTP request line. +func (h *RequestHeader) RequestURI() []byte { + requestURI := h.requestURI + if len(requestURI) == 0 { + requestURI = strSlash + } + return requestURI +} + +// SetRequestURI sets RequestURI for the first HTTP request line. +// RequestURI must be properly encoded. +// Use URI.RequestURI for constructing proper RequestURI if unsure. +func (h *RequestHeader) SetRequestURI(requestURI string) { + h.requestURI = append(h.requestURI[:0], requestURI...) +} + +// SetRequestURIBytes sets RequestURI for the first HTTP request line. +// RequestURI must be properly encoded. +// Use URI.RequestURI for constructing proper RequestURI if unsure. +func (h *RequestHeader) SetRequestURIBytes(requestURI []byte) { + h.requestURI = append(h.requestURI[:0], requestURI...) +} + +// IsGet returns true if request method is GET. +func (h *RequestHeader) IsGet() bool { + return bytes.Equal(h.Method(), strGet) +} + +// IsPost returns true if request methos is POST. +func (h *RequestHeader) IsPost() bool { + return bytes.Equal(h.Method(), strPost) +} + +// IsPut returns true if request method is PUT. +func (h *RequestHeader) IsPut() bool { + return bytes.Equal(h.Method(), strPut) +} + +// IsHead returns true if request method is HEAD. +func (h *RequestHeader) IsHead() bool { + return bytes.Equal(h.Method(), strHead) +} + +// IsDelete returns true if request method is DELETE. +func (h *RequestHeader) IsDelete() bool { + return bytes.Equal(h.Method(), strDelete) +} + +// IsConnect returns true if request method is CONNECT. +func (h *RequestHeader) IsConnect() bool { + return bytes.Equal(h.Method(), strConnect) +} + +// IsHTTP11 returns true if the request is HTTP/1.1. +func (h *RequestHeader) IsHTTP11() bool { + return !h.noHTTP11 +} + +// IsHTTP11 returns true if the response is HTTP/1.1. +func (h *ResponseHeader) IsHTTP11() bool { + return !h.noHTTP11 +} + +// HasAcceptEncoding returns true if the header contains +// the given Accept-Encoding value. +func (h *RequestHeader) HasAcceptEncoding(acceptEncoding string) bool { + h.bufKV.value = append(h.bufKV.value[:0], acceptEncoding...) + return h.HasAcceptEncodingBytes(h.bufKV.value) +} + +// HasAcceptEncodingBytes returns true if the header contains +// the given Accept-Encoding value. +func (h *RequestHeader) HasAcceptEncodingBytes(acceptEncoding []byte) bool { + ae := h.peek(strAcceptEncoding) + n := bytes.Index(ae, acceptEncoding) + if n < 0 { + return false + } + b := ae[n+len(acceptEncoding):] + if len(b) > 0 && b[0] != ',' { + return false + } + if n == 0 { + return true + } + return ae[n-1] == ' ' +} + +// Len returns the number of headers set, +// i.e. the number of times f is called in VisitAll. +func (h *ResponseHeader) Len() int { + n := 0 + h.VisitAll(func(k, v []byte) { n++ }) + return n +} + +// Len returns the number of headers set, +// i.e. the number of times f is called in VisitAll. +func (h *RequestHeader) Len() int { + n := 0 + h.VisitAll(func(k, v []byte) { n++ }) + return n +} + +// DisableNormalizing disables header names' normalization. +// +// By default all the header names are normalized by uppercasing +// the first letter and all the first letters following dashes, +// while lowercasing all the other letters. +// Examples: +// +// * CONNECTION -> Connection +// * conteNT-tYPE -> Content-Type +// * foo-bar-baz -> Foo-Bar-Baz +// +// Disable header names' normalization only if know what are you doing. +func (h *RequestHeader) DisableNormalizing() { + h.disableNormalizing = true +} + +// DisableNormalizing disables header names' normalization. +// +// By default all the header names are normalized by uppercasing +// the first letter and all the first letters following dashes, +// while lowercasing all the other letters. +// Examples: +// +// * CONNECTION -> Connection +// * conteNT-tYPE -> Content-Type +// * foo-bar-baz -> Foo-Bar-Baz +// +// Disable header names' normalization only if know what are you doing. +func (h *ResponseHeader) DisableNormalizing() { + h.disableNormalizing = true +} + +// Reset clears response header. +func (h *ResponseHeader) Reset() { + h.disableNormalizing = false + h.resetSkipNormalize() +} + +func (h *ResponseHeader) resetSkipNormalize() { + h.noHTTP11 = false + h.connectionClose = false + + h.statusCode = 0 + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + + h.contentType = h.contentType[:0] + h.server = h.server[:0] + + h.h = h.h[:0] + h.cookies = h.cookies[:0] +} + +// Reset clears request header. +func (h *RequestHeader) Reset() { + h.disableNormalizing = false + h.resetSkipNormalize() +} + +func (h *RequestHeader) resetSkipNormalize() { + h.noHTTP11 = false + h.connectionClose = false + + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + + h.method = h.method[:0] + h.requestURI = h.requestURI[:0] + h.host = h.host[:0] + h.contentType = h.contentType[:0] + h.userAgent = h.userAgent[:0] + + h.h = h.h[:0] + h.cookies = h.cookies[:0] + h.cookiesCollected = false + + h.rawHeaders = h.rawHeaders[:0] + h.rawHeadersParsed = false +} + +// CopyTo copies all the headers to dst. +func (h *ResponseHeader) CopyTo(dst *ResponseHeader) { + dst.Reset() + + dst.disableNormalizing = h.disableNormalizing + dst.noHTTP11 = h.noHTTP11 + dst.connectionClose = h.connectionClose + + dst.statusCode = h.statusCode + dst.contentLength = h.contentLength + dst.contentLengthBytes = append(dst.contentLengthBytes[:0], h.contentLengthBytes...) + dst.contentType = append(dst.contentType[:0], h.contentType...) + dst.server = append(dst.server[:0], h.server...) + dst.h = copyArgs(dst.h, h.h) + dst.cookies = copyArgs(dst.cookies, h.cookies) +} + +// CopyTo copies all the headers to dst. +func (h *RequestHeader) CopyTo(dst *RequestHeader) { + dst.Reset() + + dst.disableNormalizing = h.disableNormalizing + dst.noHTTP11 = h.noHTTP11 + dst.connectionClose = h.connectionClose + + dst.contentLength = h.contentLength + dst.contentLengthBytes = append(dst.contentLengthBytes[:0], h.contentLengthBytes...) + dst.method = append(dst.method[:0], h.method...) + dst.requestURI = append(dst.requestURI[:0], h.requestURI...) + dst.host = append(dst.host[:0], h.host...) + dst.contentType = append(dst.contentType[:0], h.contentType...) + dst.userAgent = append(dst.userAgent[:0], h.userAgent...) + dst.h = copyArgs(dst.h, h.h) + dst.cookies = copyArgs(dst.cookies, h.cookies) + dst.cookiesCollected = h.cookiesCollected + dst.rawHeaders = append(dst.rawHeaders[:0], h.rawHeaders...) + dst.rawHeadersParsed = h.rawHeadersParsed +} + +// VisitAll calls f for each header. +// +// f must not retain references to key and/or value after returning. +// Copy key and/or value contents before returning if you need retaining them. +func (h *ResponseHeader) VisitAll(f func(key, value []byte)) { + if len(h.contentLengthBytes) > 0 { + f(strContentLength, h.contentLengthBytes) + } + contentType := h.ContentType() + if len(contentType) > 0 { + f(strContentType, contentType) + } + server := h.Server() + if len(server) > 0 { + f(strServer, server) + } + if len(h.cookies) > 0 { + visitArgs(h.cookies, func(k, v []byte) { + f(strSetCookie, v) + }) + } + visitArgs(h.h, f) + if h.ConnectionClose() { + f(strConnection, strClose) + } +} + +// VisitAllCookie calls f for each response cookie. +// +// Cookie name is passed in key and the whole Set-Cookie header value +// is passed in value on each f invocation. Value may be parsed +// with Cookie.ParseBytes(). +// +// f must not retain references to key and/or value after returning. +func (h *ResponseHeader) VisitAllCookie(f func(key, value []byte)) { + visitArgs(h.cookies, f) +} + +// VisitAllCookie calls f for each request cookie. +// +// f must not retain references to key and/or value after returning. +func (h *RequestHeader) VisitAllCookie(f func(key, value []byte)) { + h.parseRawHeaders() + h.collectCookies() + visitArgs(h.cookies, f) +} + +// VisitAll calls f for each header. +// +// f must not retain references to key and/or value after returning. +// Copy key and/or value contents before returning if you need retaining them. +func (h *RequestHeader) VisitAll(f func(key, value []byte)) { + h.parseRawHeaders() + host := h.Host() + if len(host) > 0 { + f(strHost, host) + } + if len(h.contentLengthBytes) > 0 { + f(strContentLength, h.contentLengthBytes) + } + contentType := h.ContentType() + if len(contentType) > 0 { + f(strContentType, contentType) + } + userAgent := h.UserAgent() + if len(userAgent) > 0 { + f(strUserAgent, userAgent) + } + + h.collectCookies() + if len(h.cookies) > 0 { + h.bufKV.value = appendRequestCookieBytes(h.bufKV.value[:0], h.cookies) + f(strCookie, h.bufKV.value) + } + visitArgs(h.h, f) + if h.ConnectionClose() { + f(strConnection, strClose) + } +} + +// Del deletes header with the given key. +func (h *ResponseHeader) Del(key string) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.del(k) +} + +// DelBytes deletes header with the given key. +func (h *ResponseHeader) DelBytes(key []byte) { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.del(h.bufKV.key) +} + +func (h *ResponseHeader) del(key []byte) { + switch string(key) { + case "Content-Type": + h.contentType = h.contentType[:0] + case "Server": + h.server = h.server[:0] + case "Set-Cookie": + h.cookies = h.cookies[:0] + case "Content-Length": + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + case "Connection": + h.connectionClose = false + } + h.h = delAllArgsBytes(h.h, key) +} + +// Del deletes header with the given key. +func (h *RequestHeader) Del(key string) { + h.parseRawHeaders() + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.del(k) +} + +// DelBytes deletes header with the given key. +func (h *RequestHeader) DelBytes(key []byte) { + h.parseRawHeaders() + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.del(h.bufKV.key) +} + +func (h *RequestHeader) del(key []byte) { + switch string(key) { + case "Host": + h.host = h.host[:0] + case "Content-Type": + h.contentType = h.contentType[:0] + case "User-Agent": + h.userAgent = h.userAgent[:0] + case "Cookie": + h.cookies = h.cookies[:0] + case "Content-Length": + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + case "Connection": + h.connectionClose = false + } + h.h = delAllArgsBytes(h.h, key) +} + +// Add adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use Set for setting a single header for the given key. +func (h *ResponseHeader) Add(key, value string) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.h = appendArg(h.h, b2s(k), value) +} + +// AddBytesK adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesK for setting a single header for the given key. +func (h *ResponseHeader) AddBytesK(key []byte, value string) { + h.Add(b2s(key), value) +} + +// AddBytesV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesV for setting a single header for the given key. +func (h *ResponseHeader) AddBytesV(key string, value []byte) { + h.Add(key, b2s(value)) +} + +// AddBytesKV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesKV for setting a single header for the given key. +func (h *ResponseHeader) AddBytesKV(key, value []byte) { + h.Add(b2s(key), b2s(value)) +} + +// Set sets the given 'key: value' header. +// +// Use Add for setting multiple header values under the same key. +func (h *ResponseHeader) Set(key, value string) { + initHeaderKV(&h.bufKV, key, value, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, h.bufKV.value) +} + +// SetBytesK sets the given 'key: value' header. +// +// Use AddBytesK for setting multiple header values under the same key. +func (h *ResponseHeader) SetBytesK(key []byte, value string) { + h.bufKV.value = append(h.bufKV.value[:0], value...) + h.SetBytesKV(key, h.bufKV.value) +} + +// SetBytesV sets the given 'key: value' header. +// +// Use AddBytesV for setting multiple header values under the same key. +func (h *ResponseHeader) SetBytesV(key string, value []byte) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.SetCanonical(k, value) +} + +// SetBytesKV sets the given 'key: value' header. +// +// Use AddBytesKV for setting multiple header values under the same key. +func (h *ResponseHeader) SetBytesKV(key, value []byte) { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, value) +} + +// SetCanonical sets the given 'key: value' header assuming that +// key is in canonical form. +func (h *ResponseHeader) SetCanonical(key, value []byte) { + switch string(key) { + case "Content-Type": + h.SetContentTypeBytes(value) + case "Server": + h.SetServerBytes(value) + case "Set-Cookie": + var kv *argsKV + h.cookies, kv = allocArg(h.cookies) + kv.key = getCookieKey(kv.key, value) + kv.value = append(kv.value[:0], value...) + case "Content-Length": + if contentLength, err := parseContentLength(value); err == nil { + h.contentLength = contentLength + h.contentLengthBytes = append(h.contentLengthBytes[:0], value...) + } + case "Connection": + if bytes.Equal(strClose, value) { + h.SetConnectionClose() + } else { + h.ResetConnectionClose() + h.h = setArgBytes(h.h, key, value) + } + case "Transfer-Encoding": + // Transfer-Encoding is managed automatically. + case "Date": + // Date is managed automatically. + default: + h.h = setArgBytes(h.h, key, value) + } +} + +// SetCookie sets the given response cookie. +// +// It is save re-using the cookie after the function returns. +func (h *ResponseHeader) SetCookie(cookie *Cookie) { + h.cookies = setArgBytes(h.cookies, cookie.Key(), cookie.Cookie()) +} + +// SetCookie sets 'key: value' cookies. +func (h *RequestHeader) SetCookie(key, value string) { + h.parseRawHeaders() + h.collectCookies() + h.cookies = setArg(h.cookies, key, value) +} + +// SetCookieBytesK sets 'key: value' cookies. +func (h *RequestHeader) SetCookieBytesK(key []byte, value string) { + h.SetCookie(b2s(key), value) +} + +// SetCookieBytesKV sets 'key: value' cookies. +func (h *RequestHeader) SetCookieBytesKV(key, value []byte) { + h.SetCookie(b2s(key), b2s(value)) +} + +// DelClientCookie instructs the client to remove the given cookie. +// +// Use DelCookie if you want just removing the cookie from response header. +func (h *ResponseHeader) DelClientCookie(key string) { + h.DelCookie(key) + + c := AcquireCookie() + c.SetKey(key) + c.SetExpire(CookieExpireDelete) + h.SetCookie(c) + ReleaseCookie(c) +} + +// DelClientCookieBytes instructs the client to remove the given cookie. +// +// Use DelCookieBytes if you want just removing the cookie from response header. +func (h *ResponseHeader) DelClientCookieBytes(key []byte) { + h.DelClientCookie(b2s(key)) +} + +// DelCookie removes cookie under the given key from response header. +// +// Note that DelCookie doesn't remove the cookie from the client. +// Use DelClientCookie instead. +func (h *ResponseHeader) DelCookie(key string) { + h.cookies = delAllArgs(h.cookies, key) +} + +// DelCookieBytes removes cookie under the given key from response header. +// +// Note that DelCookieBytes doesn't remove the cookie from the client. +// Use DelClientCookieBytes instead. +func (h *ResponseHeader) DelCookieBytes(key []byte) { + h.DelCookie(b2s(key)) +} + +// DelCookie removes cookie under the given key. +func (h *RequestHeader) DelCookie(key string) { + h.parseRawHeaders() + h.collectCookies() + h.cookies = delAllArgs(h.cookies, key) +} + +// DelCookieBytes removes cookie under the given key. +func (h *RequestHeader) DelCookieBytes(key []byte) { + h.DelCookie(b2s(key)) +} + +// DelAllCookies removes all the cookies from response headers. +func (h *ResponseHeader) DelAllCookies() { + h.cookies = h.cookies[:0] +} + +// DelAllCookies removes all the cookies from request headers. +func (h *RequestHeader) DelAllCookies() { + h.parseRawHeaders() + h.collectCookies() + h.cookies = h.cookies[:0] +} + +// Add adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use Set for setting a single header for the given key. +func (h *RequestHeader) Add(key, value string) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.h = appendArg(h.h, b2s(k), value) +} + +// AddBytesK adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesK for setting a single header for the given key. +func (h *RequestHeader) AddBytesK(key []byte, value string) { + h.Add(b2s(key), value) +} + +// AddBytesV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesV for setting a single header for the given key. +func (h *RequestHeader) AddBytesV(key string, value []byte) { + h.Add(key, b2s(value)) +} + +// AddBytesKV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesKV for setting a single header for the given key. +func (h *RequestHeader) AddBytesKV(key, value []byte) { + h.Add(b2s(key), b2s(value)) +} + +// Set sets the given 'key: value' header. +// +// Use Add for setting multiple header values under the same key. +func (h *RequestHeader) Set(key, value string) { + initHeaderKV(&h.bufKV, key, value, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, h.bufKV.value) +} + +// SetBytesK sets the given 'key: value' header. +// +// Use AddBytesK for setting multiple header values under the same key. +func (h *RequestHeader) SetBytesK(key []byte, value string) { + h.bufKV.value = append(h.bufKV.value[:0], value...) + h.SetBytesKV(key, h.bufKV.value) +} + +// SetBytesV sets the given 'key: value' header. +// +// Use AddBytesV for setting multiple header values under the same key. +func (h *RequestHeader) SetBytesV(key string, value []byte) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.SetCanonical(k, value) +} + +// SetBytesKV sets the given 'key: value' header. +// +// Use AddBytesKV for setting multiple header values under the same key. +func (h *RequestHeader) SetBytesKV(key, value []byte) { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, value) +} + +// SetCanonical sets the given 'key: value' header assuming that +// key is in canonical form. +func (h *RequestHeader) SetCanonical(key, value []byte) { + h.parseRawHeaders() + switch string(key) { + case "Host": + h.SetHostBytes(value) + case "Content-Type": + h.SetContentTypeBytes(value) + case "User-Agent": + h.SetUserAgentBytes(value) + case "Cookie": + h.collectCookies() + h.cookies = parseRequestCookies(h.cookies, value) + case "Content-Length": + if contentLength, err := parseContentLength(value); err == nil { + h.contentLength = contentLength + h.contentLengthBytes = append(h.contentLengthBytes[:0], value...) + } + case "Connection": + if bytes.Equal(strClose, value) { + h.SetConnectionClose() + } else { + h.ResetConnectionClose() + h.h = setArgBytes(h.h, key, value) + } + case "Transfer-Encoding": + // Transfer-Encoding is managed automatically. + default: + h.h = setArgBytes(h.h, key, value) + } +} + +// Peek returns header value for the given key. +// +// Returned value is valid until the next call to ResponseHeader. +// Do not store references to returned value. Make copies instead. +func (h *ResponseHeader) Peek(key string) []byte { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + return h.peek(k) +} + +// PeekBytes returns header value for the given key. +// +// Returned value is valid until the next call to ResponseHeader. +// Do not store references to returned value. Make copies instead. +func (h *ResponseHeader) PeekBytes(key []byte) []byte { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + return h.peek(h.bufKV.key) +} + +// Peek returns header value for the given key. +// +// Returned value is valid until the next call to RequestHeader. +// Do not store references to returned value. Make copies instead. +func (h *RequestHeader) Peek(key string) []byte { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + return h.peek(k) +} + +// PeekBytes returns header value for the given key. +// +// Returned value is valid until the next call to RequestHeader. +// Do not store references to returned value. Make copies instead. +func (h *RequestHeader) PeekBytes(key []byte) []byte { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + return h.peek(h.bufKV.key) +} + +func (h *ResponseHeader) peek(key []byte) []byte { + switch string(key) { + case "Content-Type": + return h.ContentType() + case "Server": + return h.Server() + case "Connection": + if h.ConnectionClose() { + return strClose + } + return peekArgBytes(h.h, key) + case "Content-Length": + return h.contentLengthBytes + case "Set-Cookie": + return appendResponseCookieBytes(nil, h.cookies) + default: + return peekArgBytes(h.h, key) + } +} + +func (h *RequestHeader) peek(key []byte) []byte { + h.parseRawHeaders() + switch string(key) { + case "Host": + return h.Host() + case "Content-Type": + return h.ContentType() + case "User-Agent": + return h.UserAgent() + case "Connection": + if h.ConnectionClose() { + return strClose + } + return peekArgBytes(h.h, key) + case "Content-Length": + return h.contentLengthBytes + case "Cookie": + if h.cookiesCollected { + return appendRequestCookieBytes(nil, h.cookies) + } else { + return peekArgBytes(h.h, key) + } + default: + return peekArgBytes(h.h, key) + } +} + +// Cookie returns cookie for the given key. +func (h *RequestHeader) Cookie(key string) []byte { + h.parseRawHeaders() + h.collectCookies() + return peekArgStr(h.cookies, key) +} + +// CookieBytes returns cookie for the given key. +func (h *RequestHeader) CookieBytes(key []byte) []byte { + h.parseRawHeaders() + h.collectCookies() + return peekArgBytes(h.cookies, key) +} + +// Cookie fills cookie for the given cookie.Key. +// +// Returns false if cookie with the given cookie.Key is missing. +func (h *ResponseHeader) Cookie(cookie *Cookie) bool { + v := peekArgBytes(h.cookies, cookie.Key()) + if v == nil { + return false + } + cookie.ParseBytes(v) + return true +} + +// Read reads response header from r. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (h *ResponseHeader) Read(r *bufio.Reader) error { + n := 1 + for { + err := h.tryRead(r, n) + if err == nil { + return nil + } + if err != errNeedMore { + h.resetSkipNormalize() + return err + } + n = r.Buffered() + 1 + } +} + +func (h *ResponseHeader) tryRead(r *bufio.Reader, n int) error { + h.resetSkipNormalize() + b, err := r.Peek(n) + if len(b) == 0 { + // treat all errors on the first byte read as EOF + if n == 1 || err == io.EOF { + return io.EOF + } + + // This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 . + if err == bufio.ErrBufferFull { + return &ErrSmallBuffer{ + error: fmt.Errorf("error when reading response headers: %s", errSmallBuffer), + } + } + + return fmt.Errorf("error when reading response headers: %s", err) + } + b = mustPeekBuffered(r) + headersLen, errParse := h.parse(b) + if errParse != nil { + return headerError("response", err, errParse, b) + } + mustDiscard(r, headersLen) + return nil +} + +func headerError(typ string, err, errParse error, b []byte) error { + if errParse != errNeedMore { + return headerErrorMsg(typ, errParse, b) + } + if err == nil { + return errNeedMore + } + + // Buggy servers may leave trailing CRLFs after http body. + // Treat this case as EOF. + if isOnlyCRLF(b) { + return io.EOF + } + + if err != bufio.ErrBufferFull { + return headerErrorMsg(typ, err, b) + } + return &ErrSmallBuffer{ + error: headerErrorMsg(typ, errSmallBuffer, b), + } +} + +func headerErrorMsg(typ string, err error, b []byte) error { + return fmt.Errorf("error when reading %s headers: %s. Buffer size=%d, contents: %s", typ, err, len(b), bufferSnippet(b)) +} + +// Read reads request header from r. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (h *RequestHeader) Read(r *bufio.Reader) error { + n := 1 + for { + err := h.tryRead(r, n) + if err == nil { + return nil + } + if err != errNeedMore { + h.resetSkipNormalize() + return err + } + n = r.Buffered() + 1 + } +} + +func (h *RequestHeader) tryRead(r *bufio.Reader, n int) error { + h.resetSkipNormalize() + b, err := r.Peek(n) + if len(b) == 0 { + // treat all errors on the first byte read as EOF + if n == 1 || err == io.EOF { + return io.EOF + } + + // This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 . + if err == bufio.ErrBufferFull { + return &ErrSmallBuffer{ + error: fmt.Errorf("error when reading request headers: %s", errSmallBuffer), + } + } + + return fmt.Errorf("error when reading request headers: %s", err) + } + b = mustPeekBuffered(r) + headersLen, errParse := h.parse(b) + if errParse != nil { + return headerError("request", err, errParse, b) + } + mustDiscard(r, headersLen) + return nil +} + +func bufferSnippet(b []byte) string { + n := len(b) + start := 200 + end := n - start + if start >= end { + start = n + end = n + } + bStart, bEnd := b[:start], b[end:] + if len(bEnd) == 0 { + return fmt.Sprintf("%q", b) + } + return fmt.Sprintf("%q...%q", bStart, bEnd) +} + +func isOnlyCRLF(b []byte) bool { + for _, ch := range b { + if ch != '\r' && ch != '\n' { + return false + } + } + return true +} + +func init() { + refreshServerDate() + go func() { + for { + time.Sleep(time.Second) + refreshServerDate() + } + }() +} + +var serverDate atomic.Value + +func refreshServerDate() { + b := AppendHTTPDate(nil, time.Now()) + serverDate.Store(b) +} + +// Write writes response header to w. +func (h *ResponseHeader) Write(w *bufio.Writer) error { + _, err := w.Write(h.Header()) + return err +} + +// WriteTo writes response header to w. +// +// WriteTo implements io.WriterTo interface. +func (h *ResponseHeader) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(h.Header()) + return int64(n), err +} + +// Header returns response header representation. +// +// The returned value is valid until the next call to ResponseHeader methods. +func (h *ResponseHeader) Header() []byte { + h.bufKV.value = h.AppendBytes(h.bufKV.value[:0]) + return h.bufKV.value +} + +// String returns response header representation. +func (h *ResponseHeader) String() string { + return string(h.Header()) +} + +// AppendBytes appends response header representation to dst and returns +// the extended dst. +func (h *ResponseHeader) AppendBytes(dst []byte) []byte { + statusCode := h.StatusCode() + if statusCode < 0 { + statusCode = StatusOK + } + dst = append(dst, statusLine(statusCode)...) + + server := h.Server() + if len(server) != 0 { + dst = appendHeaderLine(dst, strServer, server) + } + dst = appendHeaderLine(dst, strDate, serverDate.Load().([]byte)) + + // Append Content-Type only for non-zero responses + // or if it is explicitly set. + // See https://github.com/valyala/fasthttp/issues/28 . + if h.ContentLength() != 0 || len(h.contentType) > 0 { + dst = appendHeaderLine(dst, strContentType, h.ContentType()) + } + + if len(h.contentLengthBytes) > 0 { + dst = appendHeaderLine(dst, strContentLength, h.contentLengthBytes) + } + + for i, n := 0, len(h.h); i < n; i++ { + kv := &h.h[i] + if !bytes.Equal(kv.key, strDate) { + dst = appendHeaderLine(dst, kv.key, kv.value) + } + } + + n := len(h.cookies) + if n > 0 { + for i := 0; i < n; i++ { + kv := &h.cookies[i] + dst = appendHeaderLine(dst, strSetCookie, kv.value) + } + } + + if h.ConnectionClose() { + dst = appendHeaderLine(dst, strConnection, strClose) + } + + return append(dst, strCRLF...) +} + +// Write writes request header to w. +func (h *RequestHeader) Write(w *bufio.Writer) error { + _, err := w.Write(h.Header()) + return err +} + +// WriteTo writes request header to w. +// +// WriteTo implements io.WriterTo interface. +func (h *RequestHeader) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(h.Header()) + return int64(n), err +} + +// Header returns request header representation. +// +// The returned representation is valid until the next call to RequestHeader methods. +func (h *RequestHeader) Header() []byte { + h.bufKV.value = h.AppendBytes(h.bufKV.value[:0]) + return h.bufKV.value +} + +// RawHeaders returns raw header key/value bytes. +// +// Depending on server configuration, header keys may be normalized to +// capital-case in place. +// +// This copy is set aside during parsing, so empty slice is returned for all +// cases where parsing did not happen. Similarly, request line is not stored +// during parsing and can not be returned. +// +// The slice is not safe to use after the handler returns. +func (h *RequestHeader) RawHeaders() []byte { + return h.rawHeaders +} + +// String returns request header representation. +func (h *RequestHeader) String() string { + return string(h.Header()) +} + +// AppendBytes appends request header representation to dst and returns +// the extended dst. +func (h *RequestHeader) AppendBytes(dst []byte) []byte { + // there is no need in h.parseRawHeaders() here - raw headers are specially handled below. + dst = append(dst, h.Method()...) + dst = append(dst, ' ') + dst = append(dst, h.RequestURI()...) + dst = append(dst, ' ') + dst = append(dst, strHTTP11...) + dst = append(dst, strCRLF...) + + if !h.rawHeadersParsed && len(h.rawHeaders) > 0 { + return append(dst, h.rawHeaders...) + } + + userAgent := h.UserAgent() + if len(userAgent) == 0 { + userAgent = defaultUserAgent + } + dst = appendHeaderLine(dst, strUserAgent, userAgent) + + host := h.Host() + if len(host) > 0 { + dst = appendHeaderLine(dst, strHost, host) + } + + contentType := h.ContentType() + if !h.ignoreBody() { + if len(contentType) == 0 { + contentType = strPostArgsContentType + } + dst = appendHeaderLine(dst, strContentType, contentType) + + if len(h.contentLengthBytes) > 0 { + dst = appendHeaderLine(dst, strContentLength, h.contentLengthBytes) + } + } else if len(contentType) > 0 { + dst = appendHeaderLine(dst, strContentType, contentType) + } + + for i, n := 0, len(h.h); i < n; i++ { + kv := &h.h[i] + dst = appendHeaderLine(dst, kv.key, kv.value) + } + + // there is no need in h.collectCookies() here, since if cookies aren't collected yet, + // they all are located in h.h. + n := len(h.cookies) + if n > 0 { + dst = append(dst, strCookie...) + dst = append(dst, strColonSpace...) + dst = appendRequestCookieBytes(dst, h.cookies) + dst = append(dst, strCRLF...) + } + + if h.ConnectionClose() { + dst = appendHeaderLine(dst, strConnection, strClose) + } + + return append(dst, strCRLF...) +} + +func appendHeaderLine(dst, key, value []byte) []byte { + dst = append(dst, key...) + dst = append(dst, strColonSpace...) + dst = append(dst, value...) + return append(dst, strCRLF...) +} + +func (h *ResponseHeader) parse(buf []byte) (int, error) { + m, err := h.parseFirstLine(buf) + if err != nil { + return 0, err + } + n, err := h.parseHeaders(buf[m:]) + if err != nil { + return 0, err + } + return m + n, nil +} + +func (h *RequestHeader) ignoreBody() bool { + return h.IsGet() || h.IsHead() +} + +func (h *RequestHeader) parse(buf []byte) (int, error) { + m, err := h.parseFirstLine(buf) + if err != nil { + return 0, err + } + + var n int + if !h.ignoreBody() || h.noHTTP11 { + n, err = h.parseHeaders(buf[m:]) + if err != nil { + return 0, err + } + h.rawHeaders = append(h.rawHeaders[:0], buf[m:m+n]...) + h.rawHeadersParsed = true + } else { + var rawHeaders []byte + rawHeaders, n, err = readRawHeaders(h.rawHeaders[:0], buf[m:]) + if err != nil { + return 0, err + } + h.rawHeaders = rawHeaders + } + return m + n, nil +} + +func (h *ResponseHeader) parseFirstLine(buf []byte) (int, error) { + bNext := buf + var b []byte + var err error + for len(b) == 0 { + if b, bNext, err = nextLine(bNext); err != nil { + return 0, err + } + } + + // parse protocol + n := bytes.IndexByte(b, ' ') + if n < 0 { + return 0, fmt.Errorf("cannot find whitespace in the first line of response %q", buf) + } + h.noHTTP11 = !bytes.Equal(b[:n], strHTTP11) + b = b[n+1:] + + // parse status code + h.statusCode, n, err = parseUintBuf(b) + if err != nil { + return 0, fmt.Errorf("cannot parse response status code: %s. Response %q", err, buf) + } + if len(b) > n && b[n] != ' ' { + return 0, fmt.Errorf("unexpected char at the end of status code. Response %q", buf) + } + + return len(buf) - len(bNext), nil +} + +func (h *RequestHeader) parseFirstLine(buf []byte) (int, error) { + bNext := buf + var b []byte + var err error + for len(b) == 0 { + if b, bNext, err = nextLine(bNext); err != nil { + return 0, err + } + } + + // parse method + n := bytes.IndexByte(b, ' ') + if n <= 0 { + return 0, fmt.Errorf("cannot find http request method in %q", buf) + } + h.method = append(h.method[:0], b[:n]...) + b = b[n+1:] + + // parse requestURI + n = bytes.LastIndexByte(b, ' ') + if n < 0 { + h.noHTTP11 = true + n = len(b) + } else if n == 0 { + return 0, fmt.Errorf("requestURI cannot be empty in %q", buf) + } else if !bytes.Equal(b[n+1:], strHTTP11) { + h.noHTTP11 = true + } + h.requestURI = append(h.requestURI[:0], b[:n]...) + + return len(buf) - len(bNext), nil +} + +func peekRawHeader(buf, key []byte) []byte { + n := bytes.Index(buf, key) + if n < 0 { + return nil + } + if n > 0 && buf[n-1] != '\n' { + return nil + } + n += len(key) + if n >= len(buf) { + return nil + } + if buf[n] != ':' { + return nil + } + n++ + if buf[n] != ' ' { + return nil + } + n++ + buf = buf[n:] + n = bytes.IndexByte(buf, '\n') + if n < 0 { + return nil + } + if n > 0 && buf[n-1] == '\r' { + n-- + } + return buf[:n] +} + +func readRawHeaders(dst, buf []byte) ([]byte, int, error) { + n := bytes.IndexByte(buf, '\n') + if n < 0 { + return nil, 0, errNeedMore + } + if (n == 1 && buf[0] == '\r') || n == 0 { + // empty headers + return dst, n + 1, nil + } + + n++ + b := buf + m := n + for { + b = b[m:] + m = bytes.IndexByte(b, '\n') + if m < 0 { + return nil, 0, errNeedMore + } + m++ + n += m + if (m == 2 && b[0] == '\r') || m == 1 { + dst = append(dst, buf[:n]...) + return dst, n, nil + } + } +} + +func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) { + // 'identity' content-length by default + h.contentLength = -2 + + var s headerScanner + s.b = buf + s.disableNormalizing = h.disableNormalizing + var err error + var kv *argsKV + for s.next() { + switch string(s.key) { + case "Content-Type": + h.contentType = append(h.contentType[:0], s.value...) + case "Server": + h.server = append(h.server[:0], s.value...) + case "Content-Length": + if h.contentLength != -1 { + if h.contentLength, err = parseContentLength(s.value); err != nil { + h.contentLength = -2 + } else { + h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...) + } + } + case "Transfer-Encoding": + if !bytes.Equal(s.value, strIdentity) { + h.contentLength = -1 + h.h = setArgBytes(h.h, strTransferEncoding, strChunked) + } + case "Set-Cookie": + h.cookies, kv = allocArg(h.cookies) + kv.key = getCookieKey(kv.key, s.value) + kv.value = append(kv.value[:0], s.value...) + case "Connection": + if bytes.Equal(s.value, strClose) { + h.connectionClose = true + } else { + h.connectionClose = false + h.h = appendArgBytes(h.h, s.key, s.value) + } + default: + h.h = appendArgBytes(h.h, s.key, s.value) + } + } + if s.err != nil { + h.connectionClose = true + return 0, s.err + } + + if h.contentLength < 0 { + h.contentLengthBytes = h.contentLengthBytes[:0] + } + if h.contentLength == -2 && !h.ConnectionUpgrade() && !h.mustSkipContentLength() { + h.h = setArgBytes(h.h, strTransferEncoding, strIdentity) + h.connectionClose = true + } + if h.noHTTP11 && !h.connectionClose { + // close connection for non-http/1.1 response unless 'Connection: keep-alive' is set. + v := peekArgBytes(h.h, strConnection) + h.connectionClose = !hasHeaderValue(v, strKeepAlive) && !hasHeaderValue(v, strKeepAliveCamelCase) + } + + return len(buf) - len(s.b), nil +} + +func (h *RequestHeader) parseHeaders(buf []byte) (int, error) { + h.contentLength = -2 + + var s headerScanner + s.b = buf + s.disableNormalizing = h.disableNormalizing + var err error + for s.next() { + switch string(s.key) { + case "Host": + h.host = append(h.host[:0], s.value...) + case "User-Agent": + h.userAgent = append(h.userAgent[:0], s.value...) + case "Content-Type": + h.contentType = append(h.contentType[:0], s.value...) + case "Content-Length": + if h.contentLength != -1 { + if h.contentLength, err = parseContentLength(s.value); err != nil { + h.contentLength = -2 + } else { + h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...) + } + } + case "Transfer-Encoding": + if !bytes.Equal(s.value, strIdentity) { + h.contentLength = -1 + h.h = setArgBytes(h.h, strTransferEncoding, strChunked) + } + case "Connection": + if bytes.Equal(s.value, strClose) { + h.connectionClose = true + } else { + h.connectionClose = false + h.h = appendArgBytes(h.h, s.key, s.value) + } + default: + h.h = appendArgBytes(h.h, s.key, s.value) + } + } + if s.err != nil { + h.connectionClose = true + return 0, s.err + } + + if h.contentLength < 0 { + h.contentLengthBytes = h.contentLengthBytes[:0] + } + if h.noHTTP11 && !h.connectionClose { + // close connection for non-http/1.1 request unless 'Connection: keep-alive' is set. + v := peekArgBytes(h.h, strConnection) + h.connectionClose = !hasHeaderValue(v, strKeepAlive) && !hasHeaderValue(v, strKeepAliveCamelCase) + } + return s.hLen, nil +} + +func (h *RequestHeader) parseRawHeaders() { + if h.rawHeadersParsed { + return + } + h.rawHeadersParsed = true + if len(h.rawHeaders) == 0 { + return + } + h.parseHeaders(h.rawHeaders) +} + +func (h *RequestHeader) collectCookies() { + if h.cookiesCollected { + return + } + + for i, n := 0, len(h.h); i < n; i++ { + kv := &h.h[i] + if bytes.Equal(kv.key, strCookie) { + h.cookies = parseRequestCookies(h.cookies, kv.value) + tmp := *kv + copy(h.h[i:], h.h[i+1:]) + n-- + i-- + h.h[n] = tmp + h.h = h.h[:n] + } + } + h.cookiesCollected = true +} + +func parseContentLength(b []byte) (int, error) { + v, n, err := parseUintBuf(b) + if err != nil { + return -1, err + } + if n != len(b) { + return -1, fmt.Errorf("non-numeric chars at the end of Content-Length") + } + return v, nil +} + +type headerScanner struct { + b []byte + key []byte + value []byte + err error + + // hLen stores header subslice len + hLen int + + disableNormalizing bool +} + +func (s *headerScanner) next() bool { + bLen := len(s.b) + if bLen >= 2 && s.b[0] == '\r' && s.b[1] == '\n' { + s.b = s.b[2:] + s.hLen += 2 + return false + } + if bLen >= 1 && s.b[0] == '\n' { + s.b = s.b[1:] + s.hLen++ + return false + } + n := bytes.IndexByte(s.b, ':') + if n < 0 { + s.err = errNeedMore + return false + } + s.key = s.b[:n] + normalizeHeaderKey(s.key, s.disableNormalizing) + n++ + for len(s.b) > n && s.b[n] == ' ' { + n++ + } + s.hLen += n + s.b = s.b[n:] + n = bytes.IndexByte(s.b, '\n') + if n < 0 { + s.err = errNeedMore + return false + } + s.value = s.b[:n] + s.hLen += n + 1 + s.b = s.b[n+1:] + + if n > 0 && s.value[n-1] == '\r' { + n-- + } + for n > 0 && s.value[n-1] == ' ' { + n-- + } + s.value = s.value[:n] + return true +} + +type headerValueScanner struct { + b []byte + value []byte +} + +func (s *headerValueScanner) next() bool { + b := s.b + if len(b) == 0 { + return false + } + n := bytes.IndexByte(b, ',') + if n < 0 { + s.value = stripSpace(b) + s.b = b[len(b):] + return true + } + s.value = stripSpace(b[:n]) + s.b = b[n+1:] + return true +} + +func stripSpace(b []byte) []byte { + for len(b) > 0 && b[0] == ' ' { + b = b[1:] + } + for len(b) > 0 && b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } + return b +} + +func hasHeaderValue(s, value []byte) bool { + var vs headerValueScanner + vs.b = s + for vs.next() { + if bytes.Equal(vs.value, value) { + return true + } + } + return false +} + +func nextLine(b []byte) ([]byte, []byte, error) { + nNext := bytes.IndexByte(b, '\n') + if nNext < 0 { + return nil, nil, errNeedMore + } + n := nNext + if n > 0 && b[n-1] == '\r' { + n-- + } + return b[:n], b[nNext+1:], nil +} + +func initHeaderKV(kv *argsKV, key, value string, disableNormalizing bool) { + kv.key = getHeaderKeyBytes(kv, key, disableNormalizing) + kv.value = append(kv.value[:0], value...) +} + +func getHeaderKeyBytes(kv *argsKV, key string, disableNormalizing bool) []byte { + kv.key = append(kv.key[:0], key...) + normalizeHeaderKey(kv.key, disableNormalizing) + return kv.key +} + +func normalizeHeaderKey(b []byte, disableNormalizing bool) { + if disableNormalizing { + return + } + + n := len(b) + if n == 0 { + return + } + + b[0] = toUpperTable[b[0]] + for i := 1; i < n; i++ { + p := &b[i] + if *p == '-' { + i++ + if i < n { + b[i] = toUpperTable[b[i]] + } + continue + } + *p = toLowerTable[*p] + } +} + +// AppendNormalizedHeaderKey appends normalized header key (name) to dst +// and returns the resulting dst. +// +// Normalized header key starts with uppercase letter. The first letters +// after dashes are also uppercased. All the other letters are lowercased. +// Examples: +// +// * coNTENT-TYPe -> Content-Type +// * HOST -> Host +// * foo-bar-baz -> Foo-Bar-Baz +func AppendNormalizedHeaderKey(dst []byte, key string) []byte { + dst = append(dst, key...) + normalizeHeaderKey(dst[len(dst)-len(key):], false) + return dst +} + +// AppendNormalizedHeaderKeyBytes appends normalized header key (name) to dst +// and returns the resulting dst. +// +// Normalized header key starts with uppercase letter. The first letters +// after dashes are also uppercased. All the other letters are lowercased. +// Examples: +// +// * coNTENT-TYPe -> Content-Type +// * HOST -> Host +// * foo-bar-baz -> Foo-Bar-Baz +func AppendNormalizedHeaderKeyBytes(dst, key []byte) []byte { + return AppendNormalizedHeaderKey(dst, b2s(key)) +} + +var ( + errNeedMore = errors.New("need more data: cannot find trailing lf") + errSmallBuffer = errors.New("small read buffer. Increase ReadBufferSize") +) + +// ErrSmallBuffer is returned when the provided buffer size is too small +// for reading request and/or response headers. +// +// ReadBufferSize value from Server or clients should reduce the number +// of such errors. +type ErrSmallBuffer struct { + error +} + +func mustPeekBuffered(r *bufio.Reader) []byte { + buf, err := r.Peek(r.Buffered()) + if len(buf) == 0 || err != nil { + panic(fmt.Sprintf("bufio.Reader.Peek() returned unexpected data (%q, %v)", buf, err)) + } + return buf +} + +func mustDiscard(r *bufio.Reader, n int) { + if _, err := r.Discard(n); err != nil { + panic(fmt.Sprintf("bufio.Reader.Discard(%d) failed: %s", n, err)) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/header_regression_test.go b/vendor/github.com/erikdubbelboer/fasthttp/header_regression_test.go new file mode 100644 index 0000000..69a5775 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/header_regression_test.go @@ -0,0 +1,87 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "fmt" + "strings" + "testing" +) + +func TestIssue28ResponseWithoutBodyNoContentType(t *testing.T) { + var r Response + + // Empty response without content-type + s := r.String() + if strings.Contains(s, "Content-Type") { + t.Fatalf("unexpected Content-Type found in response header with empty body: %q", s) + } + + // Explicitly set content-type + r.Header.SetContentType("foo/bar") + s = r.String() + if !strings.Contains(s, "Content-Type: foo/bar\r\n") { + t.Fatalf("missing explicitly set content-type for empty response: %q", s) + } + + // Non-empty response. + r.Reset() + r.SetBodyString("foobar") + s = r.String() + if !strings.Contains(s, fmt.Sprintf("Content-Type: %s\r\n", defaultContentType)) { + t.Fatalf("missing default content-type for non-empty response: %q", s) + } + + // Non-empty response with custom content-type. + r.Header.SetContentType("aaa/bbb") + s = r.String() + if !strings.Contains(s, "Content-Type: aaa/bbb\r\n") { + t.Fatalf("missing custom content-type: %q", s) + } +} + +func TestIssue6RequestHeaderSetContentType(t *testing.T) { + testIssue6RequestHeaderSetContentType(t, "GET") + testIssue6RequestHeaderSetContentType(t, "POST") + testIssue6RequestHeaderSetContentType(t, "PUT") + testIssue6RequestHeaderSetContentType(t, "PATCH") +} + +func testIssue6RequestHeaderSetContentType(t *testing.T, method string) { + contentType := "application/json" + contentLength := 123 + + var h RequestHeader + h.SetMethod(method) + h.SetRequestURI("http://localhost/test") + h.SetContentType(contentType) + h.SetContentLength(contentLength) + + issue6VerifyRequestHeader(t, &h, contentType, contentLength, method) + + s := h.String() + + var h1 RequestHeader + + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h1.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + issue6VerifyRequestHeader(t, &h1, contentType, contentLength, method) +} + +func issue6VerifyRequestHeader(t *testing.T, h *RequestHeader, contentType string, contentLength int, method string) { + if string(h.ContentType()) != contentType { + t.Fatalf("unexpected content-type: %q. Expecting %q. method=%q", h.ContentType(), contentType, method) + } + if string(h.Method()) != method { + t.Fatalf("unexpected method: %q. Expecting %q", h.Method(), method) + } + if method != "GET" { + if h.ContentLength() != contentLength { + t.Fatalf("unexpected content-length: %d. Expecting %d. method=%q", h.ContentLength(), contentLength, method) + } + } else if h.ContentLength() != 0 { + t.Fatalf("unexpected content-length for GET method: %d. Expecting 0", h.ContentLength()) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/header_test.go b/vendor/github.com/erikdubbelboer/fasthttp/header_test.go new file mode 100644 index 0000000..5d83848 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/header_test.go @@ -0,0 +1,2294 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestResponseHeaderEmptyValueFromHeader(t *testing.T) { + var h1 ResponseHeader + h1.SetContentType("foo/bar") + h1.Set("EmptyValue1", "") + h1.Set("EmptyValue2", " ") + s := h1.String() + + var h ResponseHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(h.ContentType()) != string(h1.ContentType()) { + t.Fatalf("unexpected content-type: %q. Expecting %q", h.ContentType(), h1.ContentType()) + } + v1 := h.Peek("EmptyValue1") + if len(v1) > 0 { + t.Fatalf("expecting empty value. Got %q", v1) + } + v2 := h.Peek("EmptyValue2") + if len(v2) > 0 { + t.Fatalf("expecting empty value. Got %q", v2) + } +} + +func TestResponseHeaderEmptyValueFromString(t *testing.T) { + s := "HTTP/1.1 200 OK\r\n" + + "EmptyValue1:\r\n" + + "Content-Type: foo/bar\r\n" + + "EmptyValue2: \r\n" + + "\r\n" + + var h ResponseHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(h.ContentType()) != "foo/bar" { + t.Fatalf("unexpected content-type: %q. Expecting %q", h.ContentType(), "foo/bar") + } + v1 := h.Peek("EmptyValue1") + if len(v1) > 0 { + t.Fatalf("expecting empty value. Got %q", v1) + } + v2 := h.Peek("EmptyValue2") + if len(v2) > 0 { + t.Fatalf("expecting empty value. Got %q", v2) + } +} + +func TestRequestHeaderEmptyValueFromHeader(t *testing.T) { + var h1 RequestHeader + h1.SetRequestURI("/foo/bar") + h1.SetHost("foobar") + h1.Set("EmptyValue1", "") + h1.Set("EmptyValue2", " ") + s := h1.String() + + var h RequestHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(h.Host()) != string(h1.Host()) { + t.Fatalf("unexpected host: %q. Expecting %q", h.Host(), h1.Host()) + } + v1 := h.Peek("EmptyValue1") + if len(v1) > 0 { + t.Fatalf("expecting empty value. Got %q", v1) + } + v2 := h.Peek("EmptyValue2") + if len(v2) > 0 { + t.Fatalf("expecting empty value. Got %q", v2) + } +} + +func TestRequestHeaderEmptyValueFromString(t *testing.T) { + s := "GET / HTTP/1.1\r\n" + + "EmptyValue1:\r\n" + + "Host: foobar\r\n" + + "EmptyValue2: \r\n" + + "\r\n" + var h RequestHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(h.Host()) != "foobar" { + t.Fatalf("unexpected host: %q. Expecting %q", h.Host(), "foobar") + } + v1 := h.Peek("EmptyValue1") + if len(v1) > 0 { + t.Fatalf("expecting empty value. Got %q", v1) + } + v2 := h.Peek("EmptyValue2") + if len(v2) > 0 { + t.Fatalf("expecting empty value. Got %q", v2) + } +} + +func TestRequestRawHeaders(t *testing.T) { + kvs := "host: foobar\r\n" + + "value: b\r\n" + + "\r\n" + t.Run("normalized", func(t *testing.T) { + s := "GET / HTTP/1.1\r\n" + kvs + exp := "Host: foobar\r\n" + + "Value: b\r\n" + + "\r\n" + var h RequestHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(h.Host()) != "foobar" { + t.Fatalf("unexpected host: %q. Expecting %q", h.Host(), "foobar") + } + v2 := h.Peek("Value") + if !bytes.Equal(v2, []byte{'b'}) { + t.Fatalf("expecting non empty value. Got %q", v2) + } + if raw := h.RawHeaders(); string(raw) != exp { + t.Fatalf("expected header %q, got %q", exp, raw) + } + }) + for _, n := range []int{0, 1, 4, 8} { + t.Run(fmt.Sprintf("post-%dk", n), func(t *testing.T) { + l := 1024 * n + body := make([]byte, l) + for i := range body { + body[i] = 'a' + } + cl := fmt.Sprintf("Content-Length: %d\r\n", l) + s := "POST / HTTP/1.1\r\n" + cl + kvs + string(body) + exp := cl + + "Host: foobar\r\n" + + "Value: b\r\n" + + "\r\n" + var h RequestHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(h.Host()) != "foobar" { + t.Fatalf("unexpected host: %q. Expecting %q", h.Host(), "foobar") + } + v2 := h.Peek("Value") + if !bytes.Equal(v2, []byte{'b'}) { + t.Fatalf("expecting non empty value. Got %q", v2) + } + if raw := h.RawHeaders(); string(raw) != exp { + t.Fatalf("expected header %q, got %q", exp, raw) + } + }) + } + t.Run("http10", func(t *testing.T) { + s := "GET / HTTP/1.0\r\n" + kvs + exp := "Host: foobar\r\n" + + "Value: b\r\n" + + "\r\n" + var h RequestHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(h.Host()) != "foobar" { + t.Fatalf("unexpected host: %q. Expecting %q", h.Host(), "foobar") + } + v2 := h.Peek("Value") + if !bytes.Equal(v2, []byte{'b'}) { + t.Fatalf("expecting non empty value. Got %q", v2) + } + if raw := h.RawHeaders(); string(raw) != exp { + t.Fatalf("expected header %q, got %q", exp, raw) + } + }) + t.Run("non-normalized", func(t *testing.T) { + s := "GET / HTTP/1.1\r\n" + kvs + exp := kvs + var h RequestHeader + h.DisableNormalizing() + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(h.Host()) != "" { + t.Fatalf("unexpected host: %q. Expecting %q", h.Host(), "") + } + v2 := h.Peek("value") + if !bytes.Equal(v2, []byte{'b'}) { + t.Fatalf("expecting non empty value. Got %q", v2) + } + if raw := h.RawHeaders(); string(raw) != exp { + t.Fatalf("expected header %q, got %q", exp, raw) + } + }) + t.Run("no-kvs", func(t *testing.T) { + s := "GET / HTTP/1.1\r\n\r\n" + exp := "" + var h RequestHeader + h.DisableNormalizing() + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(h.Host()) != "" { + t.Fatalf("unexpected host: %q. Expecting %q", h.Host(), "") + } + v1 := h.Peek("NoKey") + if len(v1) > 0 { + t.Fatalf("expecting empty value. Got %q", v1) + } + if raw := h.RawHeaders(); string(raw) != exp { + t.Fatalf("expected header %q, got %q", exp, raw) + } + }) +} + +func TestRequestHeaderSetCookieWithSpecialChars(t *testing.T) { + var h RequestHeader + h.Set("Cookie", "ID&14") + s := h.String() + + if !strings.Contains(s, "Cookie: ID&14") { + t.Fatalf("Missing cookie in request header: [%s]", s) + } + + var h1 RequestHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h1.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + cookie := h1.Peek("Cookie") + if string(cookie) != "ID&14" { + t.Fatalf("unexpected cooke: %q. Expecting %q", cookie, "ID&14") + } + + cookie = h1.Cookie("") + if string(cookie) != "ID&14" { + t.Fatalf("unexpected cooke: %q. Expecting %q", cookie, "ID&14") + } +} + +func TestResponseHeaderDefaultStatusCode(t *testing.T) { + var h ResponseHeader + statusCode := h.StatusCode() + if statusCode != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", statusCode, StatusOK) + } +} + +func TestResponseHeaderDelClientCookie(t *testing.T) { + cookieName := "foobar" + + var h ResponseHeader + c := AcquireCookie() + c.SetKey(cookieName) + c.SetValue("aasdfsdaf") + h.SetCookie(c) + + h.DelClientCookieBytes([]byte(cookieName)) + if !h.Cookie(c) { + t.Fatalf("expecting cookie %q", c.Key()) + } + if !c.Expire().Equal(CookieExpireDelete) { + t.Fatalf("unexpected cookie expiration time: %s. Expecting %s", c.Expire(), CookieExpireDelete) + } + if len(c.Value()) > 0 { + t.Fatalf("unexpected cookie value: %q. Expecting empty value", c.Value()) + } + ReleaseCookie(c) +} + +func TestResponseHeaderAdd(t *testing.T) { + m := make(map[string]struct{}) + var h ResponseHeader + h.Add("aaa", "bbb") + m["bbb"] = struct{}{} + for i := 0; i < 10; i++ { + v := fmt.Sprintf("%d", i) + h.Add("Foo-Bar", v) + m[v] = struct{}{} + } + if h.Len() != 12 { + t.Fatalf("unexpected header len %d. Expecting 12", h.Len()) + } + + h.VisitAll(func(k, v []byte) { + switch string(k) { + case "Aaa", "Foo-Bar": + if _, ok := m[string(v)]; !ok { + t.Fatalf("unexpected value found %q. key %q", v, k) + } + delete(m, string(v)) + case "Content-Type": + default: + t.Fatalf("unexpected key found: %q", k) + } + }) + if len(m) > 0 { + t.Fatalf("%d headers are missed", len(m)) + } + + s := h.String() + br := bufio.NewReader(bytes.NewBufferString(s)) + var h1 ResponseHeader + if err := h1.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + h.VisitAll(func(k, v []byte) { + switch string(k) { + case "Aaa", "Foo-Bar": + m[string(v)] = struct{}{} + case "Content-Type": + default: + t.Fatalf("unexpected key found: %q", k) + } + }) + if len(m) != 11 { + t.Fatalf("unexpected number of headers: %d. Expecting 11", len(m)) + } +} + +func TestRequestHeaderAdd(t *testing.T) { + m := make(map[string]struct{}) + var h RequestHeader + h.Add("aaa", "bbb") + m["bbb"] = struct{}{} + for i := 0; i < 10; i++ { + v := fmt.Sprintf("%d", i) + h.Add("Foo-Bar", v) + m[v] = struct{}{} + } + if h.Len() != 11 { + t.Fatalf("unexpected header len %d. Expecting 11", h.Len()) + } + + h.VisitAll(func(k, v []byte) { + switch string(k) { + case "Aaa", "Foo-Bar": + if _, ok := m[string(v)]; !ok { + t.Fatalf("unexpected value found %q. key %q", v, k) + } + delete(m, string(v)) + default: + t.Fatalf("unexpected key found: %q", k) + } + }) + if len(m) > 0 { + t.Fatalf("%d headers are missed", len(m)) + } + + s := h.String() + br := bufio.NewReader(bytes.NewBufferString(s)) + var h1 RequestHeader + if err := h1.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + h.VisitAll(func(k, v []byte) { + switch string(k) { + case "Aaa", "Foo-Bar": + m[string(v)] = struct{}{} + case "User-Agent": + default: + t.Fatalf("unexpected key found: %q", k) + } + }) + if len(m) != 11 { + t.Fatalf("unexpected number of headers: %d. Expecting 11", len(m)) + } + s1 := h1.String() + if s != s1 { + t.Fatalf("unexpected headers %q. Expecting %q", s1, s) + } +} + +func TestHasHeaderValue(t *testing.T) { + testHasHeaderValue(t, "foobar", "foobar", true) + testHasHeaderValue(t, "foobar", "foo", false) + testHasHeaderValue(t, "foobar", "bar", false) + testHasHeaderValue(t, "keep-alive, Upgrade", "keep-alive", true) + testHasHeaderValue(t, "keep-alive , Upgrade", "Upgrade", true) + testHasHeaderValue(t, "keep-alive, Upgrade", "Upgrade-foo", false) + testHasHeaderValue(t, "keep-alive, Upgrade", "Upgr", false) + testHasHeaderValue(t, "foo , bar, baz ,", "foo", true) + testHasHeaderValue(t, "foo , bar, baz ,", "bar", true) + testHasHeaderValue(t, "foo , bar, baz ,", "baz", true) + testHasHeaderValue(t, "foo , bar, baz ,", "ba", false) + testHasHeaderValue(t, "foo, ", "", true) + testHasHeaderValue(t, "foo", "", false) +} + +func testHasHeaderValue(t *testing.T, s, value string, has bool) { + ok := hasHeaderValue([]byte(s), []byte(value)) + if ok != has { + t.Fatalf("unexpected hasHeaderValue(%q, %q)=%v. Expecting %v", s, value, ok, has) + } +} + +func TestRequestHeaderDel(t *testing.T) { + var h RequestHeader + h.Set("Foo-Bar", "baz") + h.Set("aaa", "bbb") + h.Set("Connection", "keep-alive") + h.Set("Content-Type", "aaa") + h.Set("Host", "aaabbb") + h.Set("User-Agent", "asdfas") + h.Set("Content-Length", "1123") + h.Set("Cookie", "foobar=baz") + + h.Del("foo-bar") + h.Del("connection") + h.DelBytes([]byte("content-type")) + h.Del("Host") + h.Del("user-agent") + h.Del("content-length") + h.Del("cookie") + + hv := h.Peek("aaa") + if string(hv) != "bbb" { + t.Fatalf("unexpected header value: %q. Expecting %q", hv, "bbb") + } + hv = h.Peek("Foo-Bar") + if len(hv) > 0 { + t.Fatalf("non-zero value: %q", hv) + } + hv = h.Peek("Connection") + if len(hv) > 0 { + t.Fatalf("non-zero value: %q", hv) + } + hv = h.Peek("Content-Type") + if len(hv) > 0 { + t.Fatalf("non-zero value: %q", hv) + } + hv = h.Peek("Host") + if len(hv) > 0 { + t.Fatalf("non-zero value: %q", hv) + } + hv = h.Peek("User-Agent") + if len(hv) > 0 { + t.Fatalf("non-zero value: %q", hv) + } + hv = h.Peek("Content-Length") + if len(hv) > 0 { + t.Fatalf("non-zero value: %q", hv) + } + hv = h.Peek("Cookie") + if len(hv) > 0 { + t.Fatalf("non-zero value: %q", hv) + } + + cv := h.Cookie("foobar") + if len(cv) > 0 { + t.Fatalf("unexpected cookie obtianed: %q", cv) + } + if h.ContentLength() != 0 { + t.Fatalf("unexpected content-length: %d. Expecting 0", h.ContentLength()) + } +} + +func TestResponseHeaderDel(t *testing.T) { + var h ResponseHeader + h.Set("Foo-Bar", "baz") + h.Set("aaa", "bbb") + h.Set("Connection", "keep-alive") + h.Set("Content-Type", "aaa") + h.Set("Server", "aaabbb") + h.Set("Content-Length", "1123") + + var c Cookie + c.SetKey("foo") + c.SetValue("bar") + h.SetCookie(&c) + + h.Del("foo-bar") + h.Del("connection") + h.DelBytes([]byte("content-type")) + h.Del("Server") + h.Del("content-length") + h.Del("set-cookie") + + hv := h.Peek("aaa") + if string(hv) != "bbb" { + t.Fatalf("unexpected header value: %q. Expecting %q", hv, "bbb") + } + hv = h.Peek("Foo-Bar") + if len(hv) > 0 { + t.Fatalf("non-zero header value: %q", hv) + } + hv = h.Peek("Connection") + if len(hv) > 0 { + t.Fatalf("non-zero value: %q", hv) + } + hv = h.Peek("Content-Type") + if string(hv) != string(defaultContentType) { + t.Fatalf("unexpected content-type: %q. Expecting %q", hv, defaultContentType) + } + hv = h.Peek("Server") + if len(hv) > 0 { + t.Fatalf("non-zero value: %q", hv) + } + hv = h.Peek("Content-Length") + if len(hv) > 0 { + t.Fatalf("non-zero value: %q", hv) + } + + if h.Cookie(&c) { + t.Fatalf("unexpected cookie obtianed: %q", &c) + } + if h.ContentLength() != 0 { + t.Fatalf("unexpected content-length: %d. Expecting 0", h.ContentLength()) + } +} + +func TestAppendNormalizedHeaderKeyBytes(t *testing.T) { + testAppendNormalizedHeaderKeyBytes(t, "", "") + testAppendNormalizedHeaderKeyBytes(t, "Content-Type", "Content-Type") + testAppendNormalizedHeaderKeyBytes(t, "foO-bAr-BAZ", "Foo-Bar-Baz") +} + +func testAppendNormalizedHeaderKeyBytes(t *testing.T, key, expectedKey string) { + buf := []byte("foobar") + result := AppendNormalizedHeaderKeyBytes(buf, []byte(key)) + normalizedKey := result[len(buf):] + if string(normalizedKey) != expectedKey { + t.Fatalf("unexpected normalized key %q. Expecting %q", normalizedKey, expectedKey) + } +} + +func TestRequestHeaderHTTP10ConnectionClose(t *testing.T) { + s := "GET / HTTP/1.0\r\nHost: foobar\r\n\r\n" + var h RequestHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if !h.connectionCloseFast() { + t.Fatalf("expecting 'Connection: close' request header") + } + if !h.ConnectionClose() { + t.Fatalf("expecting 'Connection: close' request header") + } +} + +func TestRequestHeaderHTTP10ConnectionKeepAlive(t *testing.T) { + s := "GET / HTTP/1.0\r\nHost: foobar\r\nConnection: keep-alive\r\n\r\n" + var h RequestHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if h.ConnectionClose() { + t.Fatalf("unexpected 'Connection: close' request header") + } +} + +func TestBufferSnippet(t *testing.T) { + testBufferSnippet(t, "", `""`) + testBufferSnippet(t, "foobar", `"foobar"`) + + b := string(createFixedBody(199)) + bExpected := fmt.Sprintf("%q", b) + testBufferSnippet(t, b, bExpected) + for i := 0; i < 10; i++ { + b += "foobar" + bExpected = fmt.Sprintf("%q", b) + testBufferSnippet(t, b, bExpected) + } + + b = string(createFixedBody(400)) + bExpected = fmt.Sprintf("%q", b) + testBufferSnippet(t, b, bExpected) + for i := 0; i < 10; i++ { + b += "sadfqwer" + bExpected = fmt.Sprintf("%q...%q", b[:200], b[len(b)-200:]) + testBufferSnippet(t, b, bExpected) + } +} + +func testBufferSnippet(t *testing.T, buf, expectedSnippet string) { + snippet := bufferSnippet([]byte(buf)) + if snippet != expectedSnippet { + t.Fatalf("unexpected snippet %s. Expecting %s", snippet, expectedSnippet) + } +} + +func TestResponseHeaderTrailingCRLFSuccess(t *testing.T) { + trailingCRLF := "\r\n\r\n\r\n" + s := "HTTP/1.1 200 OK\r\nContent-Type: aa\r\nContent-Length: 123\r\n\r\n" + trailingCRLF + + var r ResponseHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := r.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // try reading the trailing CRLF. It must return EOF + err := r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err != io.EOF { + t.Fatalf("unexpected error: %s. Expecting %s", err, io.EOF) + } +} + +func TestResponseHeaderTrailingCRLFError(t *testing.T) { + trailingCRLF := "\r\nerror\r\n\r\n" + s := "HTTP/1.1 200 OK\r\nContent-Type: aa\r\nContent-Length: 123\r\n\r\n" + trailingCRLF + + var r ResponseHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := r.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // try reading the trailing CRLF. It must return EOF + err := r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err == io.EOF { + t.Fatalf("unexpected error: %s", err) + } +} + +func TestRequestHeaderTrailingCRLFSuccess(t *testing.T) { + trailingCRLF := "\r\n\r\n\r\n" + s := "GET / HTTP/1.1\r\nHost: aaa.com\r\n\r\n" + trailingCRLF + + var r RequestHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := r.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // try reading the trailing CRLF. It must return EOF + err := r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err != io.EOF { + t.Fatalf("unexpected error: %s. Expecting %s", err, io.EOF) + } +} + +func TestRequestHeaderTrailingCRLFError(t *testing.T) { + trailingCRLF := "\r\nerror\r\n\r\n" + s := "GET / HTTP/1.1\r\nHost: aaa.com\r\n\r\n" + trailingCRLF + + var r RequestHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := r.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // try reading the trailing CRLF. It must return EOF + err := r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err == io.EOF { + t.Fatalf("unexpected error: %s", err) + } +} + +func TestRequestHeaderReadEOF(t *testing.T) { + var r RequestHeader + + br := bufio.NewReader(&bytes.Buffer{}) + err := r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err != io.EOF { + t.Fatalf("unexpected error: %s. Expecting %s", err, io.EOF) + } + + // incomplete request header mustn't return io.EOF + br = bufio.NewReader(bytes.NewBufferString("GET ")) + err = r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err == io.EOF { + t.Fatalf("expecting non-EOF error") + } +} + +func TestResponseHeaderReadEOF(t *testing.T) { + var r ResponseHeader + + br := bufio.NewReader(&bytes.Buffer{}) + err := r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err != io.EOF { + t.Fatalf("unexpected error: %s. Expecting %s", err, io.EOF) + } + + // incomplete response header mustn't return io.EOF + br = bufio.NewReader(bytes.NewBufferString("HTTP/1.1 ")) + err = r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err == io.EOF { + t.Fatalf("expecting non-EOF error") + } +} + +func TestResponseHeaderOldVersion(t *testing.T) { + var h ResponseHeader + + s := "HTTP/1.0 200 OK\r\nContent-Length: 5\r\nContent-Type: aaa\r\n\r\n12345" + s += "HTTP/1.0 200 OK\r\nContent-Length: 2\r\nContent-Type: ass\r\nConnection: keep-alive\r\n\r\n42" + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !h.ConnectionClose() { + t.Fatalf("expecting 'Connection: close' for the response with old http protocol") + } + + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if h.ConnectionClose() { + t.Fatalf("unexpected 'Connection: close' for keep-alive response with old http protocol") + } +} + +func TestRequestHeaderSetByteRange(t *testing.T) { + testRequestHeaderSetByteRange(t, 0, 10, "bytes=0-10") + testRequestHeaderSetByteRange(t, 123, -1, "bytes=123-") + testRequestHeaderSetByteRange(t, -234, 58349, "bytes=-234") +} + +func testRequestHeaderSetByteRange(t *testing.T, startPos, endPos int, expectedV string) { + var h RequestHeader + h.SetByteRange(startPos, endPos) + v := h.Peek("Range") + if string(v) != expectedV { + t.Fatalf("unexpected range: %q. Expecting %q. startPos=%d, endPos=%d", v, expectedV, startPos, endPos) + } +} + +func TestResponseHeaderSetContentRange(t *testing.T) { + testResponseHeaderSetContentRange(t, 0, 0, 1, "bytes 0-0/1") + testResponseHeaderSetContentRange(t, 123, 456, 789, "bytes 123-456/789") +} + +func testResponseHeaderSetContentRange(t *testing.T, startPos, endPos, contentLength int, expectedV string) { + var h ResponseHeader + h.SetContentRange(startPos, endPos, contentLength) + v := h.Peek("Content-Range") + if string(v) != expectedV { + t.Fatalf("unexpected content-range: %q. Expecting %q. startPos=%d, endPos=%d, contentLength=%d", + v, expectedV, startPos, endPos, contentLength) + } +} + +func TestRequestHeaderHasAcceptEncoding(t *testing.T) { + testRequestHeaderHasAcceptEncoding(t, "", "gzip", false) + testRequestHeaderHasAcceptEncoding(t, "gzip", "sdhc", false) + testRequestHeaderHasAcceptEncoding(t, "deflate", "deflate", true) + testRequestHeaderHasAcceptEncoding(t, "gzip, deflate, sdhc", "gzi", false) + testRequestHeaderHasAcceptEncoding(t, "gzip, deflate, sdhc", "dhc", false) + testRequestHeaderHasAcceptEncoding(t, "gzip, deflate, sdhc", "sdh", false) + testRequestHeaderHasAcceptEncoding(t, "gzip, deflate, sdhc", "zip", false) + testRequestHeaderHasAcceptEncoding(t, "gzip, deflate, sdhc", "flat", false) + testRequestHeaderHasAcceptEncoding(t, "gzip, deflate, sdhc", "flate", false) + testRequestHeaderHasAcceptEncoding(t, "gzip, deflate, sdhc", "def", false) + testRequestHeaderHasAcceptEncoding(t, "gzip, deflate, sdhc", "gzip", true) + testRequestHeaderHasAcceptEncoding(t, "gzip, deflate, sdhc", "deflate", true) + testRequestHeaderHasAcceptEncoding(t, "gzip, deflate, sdhc", "sdhc", true) +} + +func testRequestHeaderHasAcceptEncoding(t *testing.T, ae, v string, resultExpected bool) { + var h RequestHeader + h.Set("Accept-Encoding", ae) + result := h.HasAcceptEncoding(v) + if result != resultExpected { + t.Fatalf("unexpected result in HasAcceptEncoding(%q, %q): %v. Expecting %v", ae, v, result, resultExpected) + } +} + +func TestRequestMultipartFormBoundary(t *testing.T) { + testRequestMultipartFormBoundary(t, "POST / HTTP/1.1\r\nContent-Type: multipart/form-data; boundary=foobar\r\n\r\n", "foobar") + + // incorrect content-type + testRequestMultipartFormBoundary(t, "POST / HTTP/1.1\r\nContent-Type: foo/bar\r\n\r\n", "") + + // empty boundary + testRequestMultipartFormBoundary(t, "POST / HTTP/1.1\r\nContent-Type: multipart/form-data; boundary=\r\n\r\n", "") + + // missing boundary + testRequestMultipartFormBoundary(t, "POST / HTTP/1.1\r\nContent-Type: multipart/form-data\r\n\r\n", "") + + // boundary after other content-type params + testRequestMultipartFormBoundary(t, "POST / HTTP/1.1\r\nContent-Type: multipart/form-data; foo=bar; boundary=--aaabb \r\n\r\n", "--aaabb") + + // quoted boundary + testRequestMultipartFormBoundary(t, "POST / HTTP/1.1\r\nContent-Type: multipart/form-data; boundary=\"foobar\"\r\n\r\n", "foobar") + + var h RequestHeader + h.SetMultipartFormBoundary("foobarbaz") + b := h.MultipartFormBoundary() + if string(b) != "foobarbaz" { + t.Fatalf("unexpected boundary %q. Expecting %q", b, "foobarbaz") + } +} + +func testRequestMultipartFormBoundary(t *testing.T, s, boundary string) { + var h RequestHeader + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s. s=%q, boundary=%q", err, s, boundary) + } + + b := h.MultipartFormBoundary() + if string(b) != boundary { + t.Fatalf("unexpected boundary %q. Expecting %q. s=%q", b, boundary, s) + } +} + +func TestResponseHeaderConnectionUpgrade(t *testing.T) { + testResponseHeaderConnectionUpgrade(t, "HTTP/1.1 200 OK\r\nContent-Length: 10\r\nConnection: Upgrade, HTTP2-Settings\r\n\r\n", + true, true) + testResponseHeaderConnectionUpgrade(t, "HTTP/1.1 200 OK\r\nContent-Length: 10\r\nConnection: keep-alive, Upgrade\r\n\r\n", + true, true) + + // non-http/1.1 protocol has 'connection: close' by default, which also disables 'connection: upgrade' + testResponseHeaderConnectionUpgrade(t, "HTTP/1.0 200 OK\r\nContent-Length: 10\r\nConnection: Upgrade, HTTP2-Settings\r\n\r\n", + false, false) + + // explicit keep-alive for non-http/1.1, so 'connection: upgrade' works + testResponseHeaderConnectionUpgrade(t, "HTTP/1.0 200 OK\r\nContent-Length: 10\r\nConnection: Upgrade, keep-alive\r\n\r\n", + true, true) + + // implicit keep-alive for http/1.1 + testResponseHeaderConnectionUpgrade(t, "HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n", false, true) + + // no content-length, so 'connection: close' is assumed + testResponseHeaderConnectionUpgrade(t, "HTTP/1.1 200 OK\r\n\r\n", false, false) +} + +func testResponseHeaderConnectionUpgrade(t *testing.T, s string, isUpgrade, isKeepAlive bool) { + var h ResponseHeader + + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s. Response header %q", err, s) + } + upgrade := h.ConnectionUpgrade() + if upgrade != isUpgrade { + t.Fatalf("unexpected 'connection: upgrade' when parsing response header: %v. Expecting %v. header %q. v=%q", + upgrade, isUpgrade, s, h.Peek("Connection")) + } + keepAlive := !h.ConnectionClose() + if keepAlive != isKeepAlive { + t.Fatalf("unexpected 'connection: keep-alive' when parsing response header: %v. Expecting %v. header %q. v=%q", + keepAlive, isKeepAlive, s, &h) + } +} + +func TestRequestHeaderConnectionUpgrade(t *testing.T) { + testRequestHeaderConnectionUpgrade(t, "GET /foobar HTTP/1.1\r\nConnection: Upgrade, HTTP2-Settings\r\nHost: foobar.com\r\n\r\n", + true, true) + testRequestHeaderConnectionUpgrade(t, "GET /foobar HTTP/1.1\r\nConnection: keep-alive,Upgrade\r\nHost: foobar.com\r\n\r\n", + true, true) + + // non-http/1.1 has 'connection: close' by default, which resets 'connection: upgrade' + testRequestHeaderConnectionUpgrade(t, "GET /foobar HTTP/1.0\r\nConnection: Upgrade, HTTP2-Settings\r\nHost: foobar.com\r\n\r\n", + false, false) + + // explicit 'connection: keep-alive' in non-http/1.1 + testRequestHeaderConnectionUpgrade(t, "GET /foobar HTTP/1.0\r\nConnection: foo, Upgrade, keep-alive\r\nHost: foobar.com\r\n\r\n", + true, true) + + // no upgrade + testRequestHeaderConnectionUpgrade(t, "GET /foobar HTTP/1.1\r\nConnection: Upgradess, foobar\r\nHost: foobar.com\r\n\r\n", + false, true) + testRequestHeaderConnectionUpgrade(t, "GET /foobar HTTP/1.1\r\nHost: foobar.com\r\n\r\n", + false, true) + + // explicit connection close + testRequestHeaderConnectionUpgrade(t, "GET /foobar HTTP/1.1\r\nConnection: close\r\nHost: foobar.com\r\n\r\n", + false, false) +} + +func testRequestHeaderConnectionUpgrade(t *testing.T, s string, isUpgrade, isKeepAlive bool) { + var h RequestHeader + + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s. Request header %q", err, s) + } + upgrade := h.ConnectionUpgrade() + if upgrade != isUpgrade { + t.Fatalf("unexpected 'connection: upgrade' when parsing request header: %v. Expecting %v. header %q", + upgrade, isUpgrade, s) + } + keepAlive := !h.ConnectionClose() + if keepAlive != isKeepAlive { + t.Fatalf("unexpected 'connection: keep-alive' when parsing request header: %v. Expecting %v. header %q", + keepAlive, isKeepAlive, s) + } +} + +func TestRequestHeaderProxyWithCookie(t *testing.T) { + // Proxy request header (read it, then write it without touching any headers). + var h RequestHeader + r := bytes.NewBufferString("GET /foo HTTP/1.1\r\nFoo: bar\r\nHost: aaa.com\r\nCookie: foo=bar; bazzz=aaaaaaa; x=y\r\nCookie: aqqqqq=123\r\n\r\n") + br := bufio.NewReader(r) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + if err := h.Write(bw); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + var h1 RequestHeader + br.Reset(w) + if err := h1.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(h1.RequestURI()) != "/foo" { + t.Fatalf("unexpected requestURI: %q. Expecting %q", h1.RequestURI(), "/foo") + } + if string(h1.Host()) != "aaa.com" { + t.Fatalf("unexpected host: %q. Expecting %q", h1.Host(), "aaa.com") + } + if string(h1.Peek("Foo")) != "bar" { + t.Fatalf("unexpected Foo: %q. Expecting %q", h1.Peek("Foo"), "bar") + } + if string(h1.Cookie("foo")) != "bar" { + t.Fatalf("unexpected coookie foo=%q. Expecting %q", h1.Cookie("foo"), "bar") + } + if string(h1.Cookie("bazzz")) != "aaaaaaa" { + t.Fatalf("unexpected cookie bazzz=%q. Expecting %q", h1.Cookie("bazzz"), "aaaaaaa") + } + if string(h1.Cookie("x")) != "y" { + t.Fatalf("unexpected cookie x=%q. Expecting %q", h1.Cookie("x"), "y") + } + if string(h1.Cookie("aqqqqq")) != "123" { + t.Fatalf("unexpected cookie aqqqqq=%q. Expecting %q", h1.Cookie("aqqqqq"), "123") + } +} + +func TestPeekRawHeader(t *testing.T) { + // empty header + testPeekRawHeader(t, "", "Foo-Bar", "") + + // different case + testPeekRawHeader(t, "Content-Length: 3443\r\n", "content-length", "") + + // no trailing crlf + testPeekRawHeader(t, "Content-Length: 234", "Content-Length", "") + + // single header + testPeekRawHeader(t, "Content-Length: 12345\r\n", "Content-Length", "12345") + + // multiple headers + testPeekRawHeader(t, "Host: foobar\r\nContent-Length: 434\r\nFoo: bar\r\n\r\n", "Content-Length", "434") + + // lf without cr + testPeekRawHeader(t, "Foo: bar\nConnection: close\nAaa: bbb\ncc: ddd\n", "Connection", "close") +} + +func testPeekRawHeader(t *testing.T, rawHeaders, key string, expectedValue string) { + v := peekRawHeader([]byte(rawHeaders), []byte(key)) + if string(v) != expectedValue { + t.Fatalf("unexpected raw headers value %q. Expected %q. key %q, rawHeaders %q", v, expectedValue, key, rawHeaders) + } +} + +func TestResponseHeaderFirstByteReadEOF(t *testing.T) { + var h ResponseHeader + + r := &errorReader{fmt.Errorf("non-eof error")} + br := bufio.NewReader(r) + err := h.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err != io.EOF { + t.Fatalf("unexpected error %s. Expecting %s", err, io.EOF) + } +} + +func TestRequestHeaderFirstByteReadEOF(t *testing.T) { + var h RequestHeader + + r := &errorReader{fmt.Errorf("non-eof error")} + br := bufio.NewReader(r) + err := h.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err != io.EOF { + t.Fatalf("unexpected error %s. Expecting %s", err, io.EOF) + } +} + +type errorReader struct { + err error +} + +func (r *errorReader) Read(p []byte) (int, error) { + return 0, r.err +} + +func TestRequestHeaderEmptyMethod(t *testing.T) { + var h RequestHeader + + if !h.IsGet() { + t.Fatalf("empty method must be equivalent to GET") + } +} + +func TestResponseHeaderHTTPVer(t *testing.T) { + // non-http/1.1 + testResponseHeaderHTTPVer(t, "HTTP/1.0 200 OK\r\nContent-Type: aaa\r\nContent-Length: 123\r\n\r\n", true) + testResponseHeaderHTTPVer(t, "HTTP/0.9 200 OK\r\nContent-Type: aaa\r\nContent-Length: 123\r\n\r\n", true) + testResponseHeaderHTTPVer(t, "foobar 200 OK\r\nContent-Type: aaa\r\nContent-Length: 123\r\n\r\n", true) + + // http/1.1 + testResponseHeaderHTTPVer(t, "HTTP/1.1 200 OK\r\nContent-Type: aaa\r\nContent-Length: 123\r\n\r\n", false) +} + +func TestRequestHeaderHTTPVer(t *testing.T) { + // non-http/1.1 + testRequestHeaderHTTPVer(t, "GET / HTTP/1.0\r\nHost: aa.com\r\n\r\n", true) + testRequestHeaderHTTPVer(t, "GET / HTTP/0.9\r\nHost: aa.com\r\n\r\n", true) + testRequestHeaderHTTPVer(t, "GET / foobar\r\nHost: aa.com\r\n\r\n", true) + + // empty http version + testRequestHeaderHTTPVer(t, "GET /\r\nHost: aaa.com\r\n\r\n", true) + testRequestHeaderHTTPVer(t, "GET / \r\nHost: aaa.com\r\n\r\n", true) + + // http/1.1 + testRequestHeaderHTTPVer(t, "GET / HTTP/1.1\r\nHost: a.com\r\n\r\n", false) +} + +func testResponseHeaderHTTPVer(t *testing.T, s string, connectionClose bool) { + var h ResponseHeader + + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s. response=%q", err, s) + } + if h.ConnectionClose() != connectionClose { + t.Fatalf("unexpected connectionClose %v. Expecting %v. response=%q", h.ConnectionClose(), connectionClose, s) + } +} + +func testRequestHeaderHTTPVer(t *testing.T, s string, connectionClose bool) { + var h RequestHeader + + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + if err := h.Read(br); err != nil { + t.Fatalf("unexpected error: %s. request=%q", err, s) + } + if h.ConnectionClose() != connectionClose { + t.Fatalf("unexpected connectionClose %v. Expecting %v. request=%q", h.ConnectionClose(), connectionClose, s) + } +} + +func TestResponseHeaderCopyTo(t *testing.T) { + var h ResponseHeader + + h.Set("Set-Cookie", "foo=bar") + h.Set("Content-Type", "foobar") + h.Set("AAA-BBB", "aaaa") + + var h1 ResponseHeader + h.CopyTo(&h1) + if !bytes.Equal(h1.Peek("Set-cookie"), h.Peek("Set-Cookie")) { + t.Fatalf("unexpected cookie %q. Expected %q", h1.Peek("set-cookie"), h.Peek("set-cookie")) + } + if !bytes.Equal(h1.Peek("Content-Type"), h.Peek("Content-Type")) { + t.Fatalf("unexpected content-type %q. Expected %q", h1.Peek("content-type"), h.Peek("content-type")) + } + if !bytes.Equal(h1.Peek("aaa-bbb"), h.Peek("AAA-BBB")) { + t.Fatalf("unexpected aaa-bbb %q. Expected %q", h1.Peek("aaa-bbb"), h.Peek("aaa-bbb")) + } +} + +func TestRequestHeaderCopyTo(t *testing.T) { + var h RequestHeader + + h.Set("Cookie", "aa=bb; cc=dd") + h.Set("Content-Type", "foobar") + h.Set("Host", "aaaa") + h.Set("aaaxxx", "123") + + var h1 RequestHeader + h.CopyTo(&h1) + if !bytes.Equal(h1.Peek("cookie"), h.Peek("Cookie")) { + t.Fatalf("unexpected cookie after copying: %q. Expected %q", h1.Peek("cookie"), h.Peek("cookie")) + } + if !bytes.Equal(h1.Peek("content-type"), h.Peek("Content-Type")) { + t.Fatalf("unexpected content-type %q. Expected %q", h1.Peek("content-type"), h.Peek("content-type")) + } + if !bytes.Equal(h1.Peek("host"), h.Peek("host")) { + t.Fatalf("unexpected host %q. Expected %q", h1.Peek("host"), h.Peek("host")) + } + if !bytes.Equal(h1.Peek("aaaxxx"), h.Peek("aaaxxx")) { + t.Fatalf("unexpected aaaxxx %q. Expected %q", h1.Peek("aaaxxx"), h.Peek("aaaxxx")) + } +} + +func TestRequestHeaderConnectionClose(t *testing.T) { + var h RequestHeader + + h.Set("Connection", "close") + h.Set("Host", "foobar") + if !h.ConnectionClose() { + t.Fatalf("connection: close not set") + } + + var w bytes.Buffer + bw := bufio.NewWriter(&w) + if err := h.Write(bw); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + var h1 RequestHeader + br := bufio.NewReader(&w) + if err := h1.Read(br); err != nil { + t.Fatalf("error when reading request header: %s", err) + } + + if !h1.ConnectionClose() { + t.Fatalf("unexpected connection: close value: %v", h1.ConnectionClose()) + } + if string(h1.Peek("Connection")) != "close" { + t.Fatalf("unexpected connection value: %q. Expecting %q", h.Peek("Connection"), "close") + } +} + +func TestRequestHeaderSetCookie(t *testing.T) { + var h RequestHeader + + h.Set("Cookie", "foo=bar; baz=aaa") + h.Set("cOOkie", "xx=yyy") + + if string(h.Cookie("foo")) != "bar" { + t.Fatalf("Unexpected cookie %q. Expecting %q", h.Cookie("foo"), "bar") + } + if string(h.Cookie("baz")) != "aaa" { + t.Fatalf("Unexpected cookie %q. Expecting %q", h.Cookie("baz"), "aaa") + } + if string(h.Cookie("xx")) != "yyy" { + t.Fatalf("unexpected cookie %q. Expecting %q", h.Cookie("xx"), "yyy") + } +} + +func TestResponseHeaderSetCookie(t *testing.T) { + var h ResponseHeader + + h.Set("set-cookie", "foo=bar; path=/aa/bb; domain=aaa.com") + h.Set("Set-Cookie", "aaaaa=bxx") + + var c Cookie + c.SetKey("foo") + if !h.Cookie(&c) { + t.Fatalf("cannot obtain %q cookie", c.Key()) + } + if string(c.Value()) != "bar" { + t.Fatalf("unexpected cookie value %q. Expected %q", c.Value(), "bar") + } + if string(c.Path()) != "/aa/bb" { + t.Fatalf("unexpected cookie path %q. Expected %q", c.Path(), "/aa/bb") + } + if string(c.Domain()) != "aaa.com" { + t.Fatalf("unexpected cookie domain %q. Expected %q", c.Domain(), "aaa.com") + } + + c.SetKey("aaaaa") + if !h.Cookie(&c) { + t.Fatalf("cannot obtain %q cookie", c.Key()) + } + if string(c.Value()) != "bxx" { + t.Fatalf("unexpected cookie value %q. Expecting %q", c.Value(), "bxx") + } +} + +func TestResponseHeaderVisitAll(t *testing.T) { + var h ResponseHeader + + r := bytes.NewBufferString("HTTP/1.1 200 OK\r\nContent-Type: aa\r\nContent-Length: 123\r\nSet-Cookie: aa=bb; path=/foo/bar\r\nSet-Cookie: ccc\r\n\r\n") + br := bufio.NewReader(r) + if err := h.Read(br); err != nil { + t.Fatalf("Unepxected error: %s", err) + } + + if h.Len() != 4 { + t.Fatalf("Unexpected number of headers: %d. Expected 4", h.Len()) + } + contentLengthCount := 0 + contentTypeCount := 0 + cookieCount := 0 + h.VisitAll(func(key, value []byte) { + k := string(key) + v := string(value) + switch k { + case "Content-Length": + if v != string(h.Peek(k)) { + t.Fatalf("unexpected content-length: %q. Expecting %q", v, h.Peek(k)) + } + contentLengthCount++ + case "Content-Type": + if v != string(h.Peek(k)) { + t.Fatalf("Unexpected content-type: %q. Expected %q", v, h.Peek(k)) + } + contentTypeCount++ + case "Set-Cookie": + if cookieCount == 0 && v != "aa=bb; path=/foo/bar" { + t.Fatalf("unexpected cookie header: %q. Expected %q", v, "aa=bb; path=/foo/bar") + } + if cookieCount == 1 && v != "ccc" { + t.Fatalf("unexpected cookie header: %q. Expected %q", v, "ccc") + } + cookieCount++ + default: + t.Fatalf("unexpected header %q=%q", k, v) + } + }) + if contentLengthCount != 1 { + t.Fatalf("unexpected number of content-length headers: %d. Expected 1", contentLengthCount) + } + if contentTypeCount != 1 { + t.Fatalf("unexpected number of content-type headers: %d. Expected 1", contentTypeCount) + } + if cookieCount != 2 { + t.Fatalf("unexpected number of cookie header: %d. Expected 2", cookieCount) + } +} + +func TestRequestHeaderVisitAll(t *testing.T) { + var h RequestHeader + + r := bytes.NewBufferString("GET / HTTP/1.1\r\nHost: aa.com\r\nXX: YYY\r\nXX: ZZ\r\nCookie: a=b; c=d\r\n\r\n") + br := bufio.NewReader(r) + if err := h.Read(br); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if h.Len() != 4 { + t.Fatalf("Unexpected number of header: %d. Expected 4", h.Len()) + } + hostCount := 0 + xxCount := 0 + cookieCount := 0 + h.VisitAll(func(key, value []byte) { + k := string(key) + v := string(value) + switch k { + case "Host": + if v != string(h.Peek(k)) { + t.Fatalf("Unexpected host value %q. Expected %q", v, h.Peek(k)) + } + hostCount++ + case "Xx": + if xxCount == 0 && v != "YYY" { + t.Fatalf("Unexpected value %q. Expected %q", v, "YYY") + } + if xxCount == 1 && v != "ZZ" { + t.Fatalf("Unexpected value %q. Expected %q", v, "ZZ") + } + xxCount++ + case "Cookie": + if v != "a=b; c=d" { + t.Fatalf("Unexpected cookie %q. Expected %q", v, "a=b; c=d") + } + cookieCount++ + default: + t.Fatalf("Unepxected header %q=%q", k, v) + } + }) + if hostCount != 1 { + t.Fatalf("Unepxected number of host headers detected %d. Expected 1", hostCount) + } + if xxCount != 2 { + t.Fatalf("Unexpected number of xx headers detected %d. Expected 2", xxCount) + } + if cookieCount != 1 { + t.Fatalf("Unexpected number of cookie headers %d. Expected 1", cookieCount) + } +} + +func TestResponseHeaderCookie(t *testing.T) { + var h ResponseHeader + var c Cookie + + c.SetKey("foobar") + c.SetValue("aaa") + h.SetCookie(&c) + + c.SetKey("йцук") + c.SetDomain("foobar.com") + h.SetCookie(&c) + + c.Reset() + c.SetKey("foobar") + if !h.Cookie(&c) { + t.Fatalf("Cannot find cookie %q", c.Key()) + } + + var expectedC1 Cookie + expectedC1.SetKey("foobar") + expectedC1.SetValue("aaa") + if !equalCookie(&expectedC1, &c) { + t.Fatalf("unexpected cookie\n%#v\nExpected\n%#v\n", &c, &expectedC1) + } + + c.SetKey("йцук") + if !h.Cookie(&c) { + t.Fatalf("cannot find cookie %q", c.Key()) + } + + var expectedC2 Cookie + expectedC2.SetKey("йцук") + expectedC2.SetValue("aaa") + expectedC2.SetDomain("foobar.com") + if !equalCookie(&expectedC2, &c) { + t.Fatalf("unexpected cookie\n%v\nExpected\n%v\n", &c, &expectedC2) + } + + h.VisitAllCookie(func(key, value []byte) { + var cc Cookie + cc.ParseBytes(value) + if !bytes.Equal(key, cc.Key()) { + t.Fatalf("Unexpected cookie key %q. Expected %q", key, cc.Key()) + } + switch { + case bytes.Equal(key, []byte("foobar")): + if !equalCookie(&expectedC1, &cc) { + t.Fatalf("unexpected cookie\n%v\nExpected\n%v\n", &cc, &expectedC1) + } + case bytes.Equal(key, []byte("йцук")): + if !equalCookie(&expectedC2, &cc) { + t.Fatalf("unexpected cookie\n%v\nExpected\n%v\n", &cc, &expectedC2) + } + default: + t.Fatalf("unexpected cookie key %q", key) + } + }) + + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + if err := h.Write(bw); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + h.DelAllCookies() + + var h1 ResponseHeader + br := bufio.NewReader(w) + if err := h1.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + c.SetKey("foobar") + if !h1.Cookie(&c) { + t.Fatalf("Cannot find cookie %q", c.Key()) + } + if !equalCookie(&expectedC1, &c) { + t.Fatalf("unexpected cookie\n%v\nExpected\n%v\n", &c, &expectedC1) + } + + h1.DelCookie("foobar") + if h.Cookie(&c) { + t.Fatalf("Unexpected cookie found: %v", &c) + } + if h1.Cookie(&c) { + t.Fatalf("Unexpected cookie found: %v", &c) + } + + c.SetKey("йцук") + if !h1.Cookie(&c) { + t.Fatalf("cannot find cookie %q", c.Key()) + } + if !equalCookie(&expectedC2, &c) { + t.Fatalf("unexpected cookie\n%v\nExpected\n%v\n", &c, &expectedC2) + } + + h1.DelCookie("йцук") + if h.Cookie(&c) { + t.Fatalf("Unexpected cookie found: %v", &c) + } + if h1.Cookie(&c) { + t.Fatalf("Unexpected cookie found: %v", &c) + } +} + +func equalCookie(c1, c2 *Cookie) bool { + if !bytes.Equal(c1.Key(), c2.Key()) { + return false + } + if !bytes.Equal(c1.Value(), c2.Value()) { + return false + } + if !c1.Expire().Equal(c2.Expire()) { + return false + } + if !bytes.Equal(c1.Domain(), c2.Domain()) { + return false + } + if !bytes.Equal(c1.Path(), c2.Path()) { + return false + } + return true +} + +func TestRequestHeaderCookie(t *testing.T) { + var h RequestHeader + h.SetRequestURI("/foobar") + h.Set("Host", "foobar.com") + + h.SetCookie("foo", "bar") + h.SetCookie("привет", "мир") + + if string(h.Cookie("foo")) != "bar" { + t.Fatalf("Unexpected cookie value %q. Exepcted %q", h.Cookie("foo"), "bar") + } + if string(h.Cookie("привет")) != "мир" { + t.Fatalf("Unexpected cookie value %q. Expected %q", h.Cookie("привет"), "мир") + } + + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + if err := h.Write(bw); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + var h1 RequestHeader + br := bufio.NewReader(w) + if err := h1.Read(br); err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + if !bytes.Equal(h1.Cookie("foo"), h.Cookie("foo")) { + t.Fatalf("Unexpected cookie value %q. Exepcted %q", h1.Cookie("foo"), h.Cookie("foo")) + } + h1.DelCookie("foo") + if len(h1.Cookie("foo")) > 0 { + t.Fatalf("Unexpected cookie found: %q", h1.Cookie("foo")) + } + if !bytes.Equal(h1.Cookie("привет"), h.Cookie("привет")) { + t.Fatalf("Unexpected cookie value %q. Expected %q", h1.Cookie("привет"), h.Cookie("привет")) + } + h1.DelCookie("привет") + if len(h1.Cookie("привет")) > 0 { + t.Fatalf("Unexpected cookie found: %q", h1.Cookie("привет")) + } + + h.DelAllCookies() + if len(h.Cookie("foo")) > 0 { + t.Fatalf("Unexpected cookie found: %q", h.Cookie("foo")) + } + if len(h.Cookie("привет")) > 0 { + t.Fatalf("Unexpected cookie found: %q", h.Cookie("привет")) + } +} + +func TestRequestHeaderCookieIssue313(t *testing.T) { + var h RequestHeader + h.SetRequestURI("/") + h.Set("Host", "foobar.com") + + h.SetCookie("foo", "bar") + + if string(h.Peek("Cookie")) != "foo=bar" { + t.Fatalf("Unexpected Cookie header %q. Expected %q", h.Peek("Cookie"), "foo=bar") + } + cookieSeen := false + h.VisitAll(func(key, value []byte) { + switch string(key) { + case "Cookie": + cookieSeen = true + } + }) + if !cookieSeen { + t.Fatalf("Cookie not present in VisitAll") + } + + if string(h.Cookie("foo")) != "bar" { + t.Fatalf("Unexpected cookie value %q. Exepcted %q", h.Cookie("foo"), "bar") + } + + if string(h.Peek("Cookie")) != "foo=bar" { + t.Fatalf("Unexpected Cookie header %q. Expected %q", h.Peek("Cookie"), "foo=bar") + } + cookieSeen = false + h.VisitAll(func(key, value []byte) { + switch string(key) { + case "Cookie": + cookieSeen = true + } + }) + if !cookieSeen { + t.Fatalf("Cookie not present in VisitAll") + } +} + +func TestResponseHeaderCookieIssue4(t *testing.T) { + var h ResponseHeader + + c := AcquireCookie() + c.SetKey("foo") + c.SetValue("bar") + h.SetCookie(c) + + if string(h.Peek("Set-Cookie")) != "foo=bar" { + t.Fatalf("Unexpected Set-Cookie header %q. Expected %q", h.Peek("Set-Cookie"), "foo=bar") + } + cookieSeen := false + h.VisitAll(func(key, value []byte) { + switch string(key) { + case "Set-Cookie": + cookieSeen = true + } + }) + if !cookieSeen { + t.Fatalf("Set-Cookie not present in VisitAll") + } + + c = AcquireCookie() + c.SetKey("foo") + h.Cookie(c) + if string(c.Value()) != "bar" { + t.Fatalf("Unexpected cookie value %q. Exepcted %q", c.Value(), "bar") + } + + if string(h.Peek("Set-Cookie")) != "foo=bar" { + t.Fatalf("Unexpected Set-Cookie header %q. Expected %q", h.Peek("Set-Cookie"), "foo=bar") + } + cookieSeen = false + h.VisitAll(func(key, value []byte) { + switch string(key) { + case "Set-Cookie": + cookieSeen = true + } + }) + if !cookieSeen { + t.Fatalf("Set-Cookie not present in VisitAll") + } +} + +func TestRequestHeaderMethod(t *testing.T) { + // common http methods + testRequestHeaderMethod(t, "GET") + testRequestHeaderMethod(t, "POST") + testRequestHeaderMethod(t, "HEAD") + testRequestHeaderMethod(t, "DELETE") + + // non-http methods + testRequestHeaderMethod(t, "foobar") + testRequestHeaderMethod(t, "ABC") +} + +func testRequestHeaderMethod(t *testing.T, expectedMethod string) { + var h RequestHeader + h.SetMethod(expectedMethod) + m := h.Method() + if string(m) != expectedMethod { + t.Fatalf("unexpected method: %q. Expecting %q", m, expectedMethod) + } + + s := h.String() + var h1 RequestHeader + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := h1.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + m1 := h1.Method() + if string(m) != string(m1) { + t.Fatalf("unexpected method: %q. Expecting %q", m, m1) + } +} + +func TestRequestHeaderSetGet(t *testing.T) { + h := &RequestHeader{} + h.SetRequestURI("/aa/bbb") + h.SetMethod("POST") + h.Set("foo", "bar") + h.Set("host", "12345") + h.Set("content-type", "aaa/bbb") + h.Set("content-length", "1234") + h.Set("user-agent", "aaabbb") + h.Set("referer", "axcv") + h.Set("baz", "xxxxx") + h.Set("transfer-encoding", "chunked") + h.Set("connection", "close") + + expectRequestHeaderGet(t, h, "Foo", "bar") + expectRequestHeaderGet(t, h, "Host", "12345") + expectRequestHeaderGet(t, h, "Content-Type", "aaa/bbb") + expectRequestHeaderGet(t, h, "Content-Length", "1234") + expectRequestHeaderGet(t, h, "USER-AGent", "aaabbb") + expectRequestHeaderGet(t, h, "Referer", "axcv") + expectRequestHeaderGet(t, h, "baz", "xxxxx") + expectRequestHeaderGet(t, h, "Transfer-Encoding", "") + expectRequestHeaderGet(t, h, "connecTION", "close") + if !h.ConnectionClose() { + t.Fatalf("unset connection: close") + } + + if h.ContentLength() != 1234 { + t.Fatalf("Unexpected content-length %d. Expected %d", h.ContentLength(), 1234) + } + + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + err := h.Write(bw) + if err != nil { + t.Fatalf("Unexpected error when writing request header: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("Unexpected error when flushing request header: %s", err) + } + + var h1 RequestHeader + br := bufio.NewReader(w) + if err = h1.Read(br); err != nil { + t.Fatalf("Unexpected error when reading request header: %s", err) + } + + if h1.ContentLength() != h.ContentLength() { + t.Fatalf("Unexpected Content-Length %d. Expected %d", h1.ContentLength(), h.ContentLength()) + } + + expectRequestHeaderGet(t, &h1, "Foo", "bar") + expectRequestHeaderGet(t, &h1, "HOST", "12345") + expectRequestHeaderGet(t, &h1, "Content-Type", "aaa/bbb") + expectRequestHeaderGet(t, &h1, "Content-Length", "1234") + expectRequestHeaderGet(t, &h1, "USER-AGent", "aaabbb") + expectRequestHeaderGet(t, &h1, "Referer", "axcv") + expectRequestHeaderGet(t, &h1, "baz", "xxxxx") + expectRequestHeaderGet(t, &h1, "Transfer-Encoding", "") + expectRequestHeaderGet(t, &h1, "Connection", "close") + if !h1.ConnectionClose() { + t.Fatalf("unset connection: close") + } +} + +func TestResponseHeaderSetGet(t *testing.T) { + h := &ResponseHeader{} + h.Set("foo", "bar") + h.Set("content-type", "aaa/bbb") + h.Set("connection", "close") + h.Set("content-length", "1234") + h.Set("Server", "aaaa") + h.Set("baz", "xxxxx") + h.Set("Transfer-Encoding", "chunked") + + expectResponseHeaderGet(t, h, "Foo", "bar") + expectResponseHeaderGet(t, h, "Content-Type", "aaa/bbb") + expectResponseHeaderGet(t, h, "Connection", "close") + expectResponseHeaderGet(t, h, "Content-Length", "1234") + expectResponseHeaderGet(t, h, "seRVer", "aaaa") + expectResponseHeaderGet(t, h, "baz", "xxxxx") + expectResponseHeaderGet(t, h, "Transfer-Encoding", "") + + if h.ContentLength() != 1234 { + t.Fatalf("Unexpected content-length %d. Expected %d", h.ContentLength(), 1234) + } + if !h.ConnectionClose() { + t.Fatalf("Unexpected Connection: close value %v. Expected %v", h.ConnectionClose(), true) + } + + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + err := h.Write(bw) + if err != nil { + t.Fatalf("Unexpected error when writing response header: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("Unexpected error when flushing response header: %s", err) + } + + var h1 ResponseHeader + br := bufio.NewReader(w) + if err = h1.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response header: %s", err) + } + + if h1.ContentLength() != h.ContentLength() { + t.Fatalf("Unexpected Content-Length %d. Expected %d", h1.ContentLength(), h.ContentLength()) + } + if h1.ConnectionClose() != h.ConnectionClose() { + t.Fatalf("unexpected connection: close %v. Expected %v", h1.ConnectionClose(), h.ConnectionClose()) + } + + expectResponseHeaderGet(t, &h1, "Foo", "bar") + expectResponseHeaderGet(t, &h1, "Content-Type", "aaa/bbb") + expectResponseHeaderGet(t, &h1, "Connection", "close") + expectResponseHeaderGet(t, &h1, "seRVer", "aaaa") + expectResponseHeaderGet(t, &h1, "baz", "xxxxx") +} + +func expectRequestHeaderGet(t *testing.T, h *RequestHeader, key, expectedValue string) { + if string(h.Peek(key)) != expectedValue { + t.Fatalf("Unexpected value for key %q: %q. Expected %q", key, h.Peek(key), expectedValue) + } +} + +func expectResponseHeaderGet(t *testing.T, h *ResponseHeader, key, expectedValue string) { + if string(h.Peek(key)) != expectedValue { + t.Fatalf("Unexpected value for key %q: %q. Expected %q", key, h.Peek(key), expectedValue) + } +} + +func TestResponseHeaderConnectionClose(t *testing.T) { + testResponseHeaderConnectionClose(t, true) + testResponseHeaderConnectionClose(t, false) +} + +func testResponseHeaderConnectionClose(t *testing.T, connectionClose bool) { + h := &ResponseHeader{} + if connectionClose { + h.SetConnectionClose() + } + h.SetContentLength(123) + + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + err := h.Write(bw) + if err != nil { + t.Fatalf("Unexpected error when writing response header: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("Unexpected error when flushing response header: %s", err) + } + + var h1 ResponseHeader + br := bufio.NewReader(w) + err = h1.Read(br) + if err != nil { + t.Fatalf("Unexpected error when reading response header: %s", err) + } + if h1.ConnectionClose() != h.ConnectionClose() { + t.Fatalf("Unexpected value for ConnectionClose: %v. Expected %v", h1.ConnectionClose(), h.ConnectionClose()) + } +} + +func TestRequestHeaderTooBig(t *testing.T) { + s := "GET / HTTP/1.1\r\nHost: aaa.com\r\n" + getHeaders(10500) + "\r\n" + r := bytes.NewBufferString(s) + br := bufio.NewReaderSize(r, 4096) + h := &RequestHeader{} + err := h.Read(br) + if err == nil { + t.Fatalf("Expecting error when reading too big header") + } +} + +func TestResponseHeaderTooBig(t *testing.T) { + s := "HTTP/1.1 200 OK\r\nContent-Type: sss\r\nContent-Length: 0\r\n" + getHeaders(100500) + "\r\n" + r := bytes.NewBufferString(s) + br := bufio.NewReaderSize(r, 4096) + h := &ResponseHeader{} + err := h.Read(br) + if err == nil { + t.Fatalf("Expecting error when reading too big header") + } +} + +type bufioPeekReader struct { + s string + n int +} + +func (r *bufioPeekReader) Read(b []byte) (int, error) { + if len(r.s) == 0 { + return 0, io.EOF + } + + r.n++ + n := r.n + if len(r.s) < n { + n = len(r.s) + } + src := []byte(r.s[:n]) + r.s = r.s[n:] + n = copy(b, src) + return n, nil +} + +func TestRequestHeaderBufioPeek(t *testing.T) { + r := &bufioPeekReader{ + s: "GET / HTTP/1.1\r\nHost: foobar.com\r\n" + getHeaders(10) + "\r\naaaa", + } + br := bufio.NewReaderSize(r, 4096) + h := &RequestHeader{} + if err := h.Read(br); err != nil { + t.Fatalf("Unexpected error when reading request: %s", err) + } + verifyRequestHeader(t, h, 0, "/", "foobar.com", "", "") + verifyTrailer(t, br, "aaaa") +} + +func TestResponseHeaderBufioPeek(t *testing.T) { + r := &bufioPeekReader{ + s: "HTTP/1.1 200 OK\r\nContent-Length: 10\r\nContent-Type: aaa\r\n" + getHeaders(10) + "\r\n0123456789", + } + br := bufio.NewReaderSize(r, 4096) + h := &ResponseHeader{} + if err := h.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + verifyResponseHeader(t, h, 200, 10, "aaa") + verifyTrailer(t, br, "0123456789") +} + +func getHeaders(n int) string { + var h []string + for i := 0; i < n; i++ { + h = append(h, fmt.Sprintf("Header_%d: Value_%d\r\n", i, i)) + } + return strings.Join(h, "") +} + +func TestResponseHeaderReadSuccess(t *testing.T) { + h := &ResponseHeader{} + + // straight order of content-length and content-type + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\r\nContent-Length: 123\r\nContent-Type: text/html\r\n\r\n", + 200, 123, "text/html", "") + if h.ConnectionClose() { + t.Fatalf("unexpected connection: close") + } + + // reverse order of content-length and content-type + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 202 OK\r\nContent-Type: text/plain; encoding=utf-8\r\nContent-Length: 543\r\nConnection: close\r\n\r\n", + 202, 543, "text/plain; encoding=utf-8", "") + if !h.ConnectionClose() { + t.Fatalf("expecting connection: close") + } + + // tranfer-encoding: chunked + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 505 Internal error\r\nContent-Type: text/html\r\nTransfer-Encoding: chunked\r\n\r\n", + 505, -1, "text/html", "") + if h.ConnectionClose() { + t.Fatalf("unexpected connection: close") + } + + // reverse order of content-type and tranfer-encoding + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 343 foobar\r\nTransfer-Encoding: chunked\r\nContent-Type: text/json\r\n\r\n", + 343, -1, "text/json", "") + + // additional headers + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 100 Continue\r\nFoobar: baz\r\nContent-Type: aaa/bbb\r\nUser-Agent: x\r\nContent-Length: 123\r\nZZZ: werer\r\n\r\n", + 100, 123, "aaa/bbb", "") + + // trailer (aka body) + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 32245\r\n\r\nqwert aaa", + 200, 32245, "text/plain", "qwert aaa") + + // ancient http protocol + testResponseHeaderReadSuccess(t, h, "HTTP/0.9 300 OK\r\nContent-Length: 123\r\nContent-Type: text/html\r\n\r\nqqqq", + 300, 123, "text/html", "qqqq") + + // lf instead of crlf + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\nContent-Length: 123\nContent-Type: text/html\n\n", + 200, 123, "text/html", "") + + // Zero-length headers with mixed crlf and lf + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 400 OK\nContent-Length: 345\nZero-Value: \r\nContent-Type: aaa\n: zero-key\r\n\r\nooa", + 400, 345, "aaa", "ooa") + + // No space after colon + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\nContent-Length:34\nContent-Type: sss\n\naaaa", + 200, 34, "sss", "aaaa") + + // invalid case + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 400 OK\nconTEnt-leNGTH: 123\nConTENT-TYPE: ass\n\n", + 400, 123, "ass", "") + + // duplicate content-length + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\r\nContent-Length: 456\r\nContent-Type: foo/bar\r\nContent-Length: 321\r\n\r\n", + 200, 321, "foo/bar", "") + + // duplicate content-type + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\r\nContent-Length: 234\r\nContent-Type: foo/bar\r\nContent-Type: baz/bar\r\n\r\n", + 200, 234, "baz/bar", "") + + // both transfer-encoding: chunked and content-length + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\r\nContent-Type: foo/bar\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", + 200, -1, "foo/bar", "") + + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 300 OK\r\nContent-Type: foo/barr\r\nTransfer-Encoding: chunked\r\nContent-Length: 354\r\n\r\n", + 300, -1, "foo/barr", "") + + // duplicate transfer-encoding: chunked + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\r\nContent-Type: text/html\r\nTransfer-Encoding: chunked\r\nTransfer-Encoding: chunked\r\n\r\n", + 200, -1, "text/html", "") + + // no reason string in the first line + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 456\r\nContent-Type: xxx/yyy\r\nContent-Length: 134\r\n\r\naaaxxx", + 456, 134, "xxx/yyy", "aaaxxx") + + // blank lines before the first line + testResponseHeaderReadSuccess(t, h, "\r\nHTTP/1.1 200 OK\r\nContent-Type: aa\r\nContent-Length: 0\r\n\r\nsss", + 200, 0, "aa", "sss") + if h.ConnectionClose() { + t.Fatalf("unexpected connection: close") + } + + // no content-length (informational responses) + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 101 OK\r\n\r\n", + 101, -2, "text/plain; charset=utf-8", "") + if h.ConnectionClose() { + t.Fatalf("expecting connection: keep-alive for informational response") + } + + // no content-length (no-content responses) + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 204 OK\r\n\r\n", + 204, -2, "text/plain; charset=utf-8", "") + if h.ConnectionClose() { + t.Fatalf("expecting connection: keep-alive for no-content response") + } + + // no content-length (not-modified responses) + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 304 OK\r\n\r\n", + 304, -2, "text/plain; charset=utf-8", "") + if h.ConnectionClose() { + t.Fatalf("expecting connection: keep-alive for not-modified response") + } + + // no content-length (identity transfer-encoding) + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\r\nContent-Type: foo/bar\r\n\r\nabcdefg", + 200, -2, "foo/bar", "abcdefg") + if !h.ConnectionClose() { + t.Fatalf("expecting connection: close for identity response") + } + + // non-numeric content-length + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\r\nContent-Length: faaa\r\nContent-Type: text/html\r\n\r\nfoobar", + 200, -2, "text/html", "foobar") + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 201 OK\r\nContent-Length: 123aa\r\nContent-Type: text/ht\r\n\r\naaa", + 201, -2, "text/ht", "aaa") + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\r\nContent-Length: aa124\r\nContent-Type: html\r\n\r\nxx", + 200, -2, "html", "xx") + + // no content-type + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 400 OK\r\nContent-Length: 123\r\n\r\nfoiaaa", + 400, 123, string(defaultContentType), "foiaaa") + + // no headers + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\r\n\r\naaaabbb", + 200, -2, string(defaultContentType), "aaaabbb") + if !h.IsHTTP11() { + t.Fatalf("expecting http/1.1 protocol") + } + + // ancient http protocol + testResponseHeaderReadSuccess(t, h, "HTTP/1.0 203 OK\r\nContent-Length: 123\r\nContent-Type: foobar\r\n\r\naaa", + 203, 123, "foobar", "aaa") + if h.IsHTTP11() { + t.Fatalf("ancient protocol must be non-http/1.1") + } + if !h.ConnectionClose() { + t.Fatalf("expecting connection: close for ancient protocol") + } + + // ancient http protocol with 'Connection: keep-alive' header. + testResponseHeaderReadSuccess(t, h, "HTTP/1.0 403 aa\r\nContent-Length: 0\r\nContent-Type: 2\r\nConnection: Keep-Alive\r\n\r\nww", + 403, 0, "2", "ww") + if h.IsHTTP11() { + t.Fatalf("ancient protocol must be non-http/1.1") + } + if h.ConnectionClose() { + t.Fatalf("expecting connection: keep-alive for ancient protocol") + } +} + +func TestRequestHeaderReadSuccess(t *testing.T) { + h := &RequestHeader{} + + // simple headers + testRequestHeaderReadSuccess(t, h, "GET /foo/bar HTTP/1.1\r\nHost: google.com\r\n\r\n", + 0, "/foo/bar", "google.com", "", "", "") + if h.ConnectionClose() { + t.Fatalf("unexpected connection: close header") + } + + // simple headers with body + testRequestHeaderReadSuccess(t, h, "GET /a/bar HTTP/1.1\r\nHost: gole.com\r\nconneCTION: close\r\n\r\nfoobar", + 0, "/a/bar", "gole.com", "", "", "foobar") + if !h.ConnectionClose() { + t.Fatalf("connection: close unset") + } + + // ancient http protocol + testRequestHeaderReadSuccess(t, h, "GET /bar HTTP/1.0\r\nHost: gole\r\n\r\npppp", + 0, "/bar", "gole", "", "", "pppp") + if h.IsHTTP11() { + t.Fatalf("ancient http protocol cannot be http/1.1") + } + if !h.ConnectionClose() { + t.Fatalf("expecting connectionClose for ancient http protocol") + } + + // ancient http protocol with 'Connection: keep-alive' header + testRequestHeaderReadSuccess(t, h, "GET /aa HTTP/1.0\r\nHost: bb\r\nConnection: keep-alive\r\n\r\nxxx", + 0, "/aa", "bb", "", "", "xxx") + if h.IsHTTP11() { + t.Fatalf("ancient http protocol cannot be http/1.1") + } + if h.ConnectionClose() { + t.Fatalf("unexpected 'connection: close' for ancient http protocol") + } + + // complex headers with body + testRequestHeaderReadSuccess(t, h, "GET /aabar HTTP/1.1\r\nAAA: bbb\r\nHost: ole.com\r\nAA: bb\r\n\r\nzzz", + 0, "/aabar", "ole.com", "", "", "zzz") + if !h.IsHTTP11() { + t.Fatalf("expecting http/1.1 protocol") + } + if h.ConnectionClose() { + t.Fatalf("unexpected connection: close") + } + + // lf instead of crlf + testRequestHeaderReadSuccess(t, h, "GET /foo/bar HTTP/1.1\nHost: google.com\n\n", + 0, "/foo/bar", "google.com", "", "", "") + + // post method + testRequestHeaderReadSuccess(t, h, "POST /aaa?bbb HTTP/1.1\r\nHost: foobar.com\r\nContent-Length: 1235\r\nContent-Type: aaa\r\n\r\nabcdef", + 1235, "/aaa?bbb", "foobar.com", "", "aaa", "abcdef") + + // zero-length headers with mixed crlf and lf + testRequestHeaderReadSuccess(t, h, "GET /a HTTP/1.1\nHost: aaa\r\nZero: \n: Zero-Value\n\r\nxccv", + 0, "/a", "aaa", "", "", "xccv") + + // no space after colon + testRequestHeaderReadSuccess(t, h, "GET /a HTTP/1.1\nHost:aaaxd\n\nsdfds", + 0, "/a", "aaaxd", "", "", "sdfds") + + // get with zero content-length + testRequestHeaderReadSuccess(t, h, "GET /xxx HTTP/1.1\nHost: aaa.com\nContent-Length: 0\n\n", + 0, "/xxx", "aaa.com", "", "", "") + + // get with non-zero content-length + testRequestHeaderReadSuccess(t, h, "GET /xxx HTTP/1.1\nHost: aaa.com\nContent-Length: 123\n\n", + 0, "/xxx", "aaa.com", "", "", "") + + // invalid case + testRequestHeaderReadSuccess(t, h, "GET /aaa HTTP/1.1\nhoST: bbb.com\n\naas", + 0, "/aaa", "bbb.com", "", "", "aas") + + // referer + testRequestHeaderReadSuccess(t, h, "GET /asdf HTTP/1.1\nHost: aaa.com\nReferer: bb.com\n\naaa", + 0, "/asdf", "aaa.com", "bb.com", "", "aaa") + + // duplicate host + testRequestHeaderReadSuccess(t, h, "GET /aa HTTP/1.1\r\nHost: aaaaaa.com\r\nHost: bb.com\r\n\r\n", + 0, "/aa", "bb.com", "", "", "") + + // post with duplicate content-type + testRequestHeaderReadSuccess(t, h, "POST /a HTTP/1.1\r\nHost: aa\r\nContent-Type: ab\r\nContent-Length: 123\r\nContent-Type: xx\r\n\r\n", + 123, "/a", "aa", "", "xx", "") + + // post with duplicate content-length + testRequestHeaderReadSuccess(t, h, "POST /xx HTTP/1.1\r\nHost: aa\r\nContent-Type: s\r\nContent-Length: 13\r\nContent-Length: 1\r\n\r\n", + 1, "/xx", "aa", "", "s", "") + + // non-post with content-type + testRequestHeaderReadSuccess(t, h, "GET /aaa HTTP/1.1\r\nHost: bbb.com\r\nContent-Type: aaab\r\n\r\n", + 0, "/aaa", "bbb.com", "", "aaab", "") + + // non-post with content-length + testRequestHeaderReadSuccess(t, h, "HEAD / HTTP/1.1\r\nHost: aaa.com\r\nContent-Length: 123\r\n\r\n", + 0, "/", "aaa.com", "", "", "") + + // non-post with content-type and content-length + testRequestHeaderReadSuccess(t, h, "GET /aa HTTP/1.1\r\nHost: aa.com\r\nContent-Type: abd/test\r\nContent-Length: 123\r\n\r\n", + 0, "/aa", "aa.com", "", "abd/test", "") + + // request uri with hostname + testRequestHeaderReadSuccess(t, h, "GET http://gooGle.com/foO/%20bar?xxx#aaa HTTP/1.1\r\nHost: aa.cOM\r\n\r\ntrail", + 0, "http://gooGle.com/foO/%20bar?xxx#aaa", "aa.cOM", "", "", "trail") + + // no protocol in the first line + testRequestHeaderReadSuccess(t, h, "GET /foo/bar\r\nHost: google.com\r\n\r\nisdD", + 0, "/foo/bar", "google.com", "", "", "isdD") + + // blank lines before the first line + testRequestHeaderReadSuccess(t, h, "\r\n\n\r\nGET /aaa HTTP/1.1\r\nHost: aaa.com\r\n\r\nsss", + 0, "/aaa", "aaa.com", "", "", "sss") + + // request uri with spaces + testRequestHeaderReadSuccess(t, h, "GET /foo/ bar baz HTTP/1.1\r\nHost: aa.com\r\n\r\nxxx", + 0, "/foo/ bar baz", "aa.com", "", "", "xxx") + + // no host + testRequestHeaderReadSuccess(t, h, "GET /foo/bar HTTP/1.1\r\nFOObar: assdfd\r\n\r\naaa", + 0, "/foo/bar", "", "", "", "aaa") + + // no host, no headers + testRequestHeaderReadSuccess(t, h, "GET /foo/bar HTTP/1.1\r\n\r\nfoobar", + 0, "/foo/bar", "", "", "", "foobar") + + // post with invalid content-length + testRequestHeaderReadSuccess(t, h, "POST /a HTTP/1.1\r\nHost: bb\r\nContent-Type: aa\r\nContent-Length: dff\r\n\r\nqwerty", + -2, "/a", "bb", "", "aa", "qwerty") + + // post without content-length and content-type + testRequestHeaderReadSuccess(t, h, "POST /aaa HTTP/1.1\r\nHost: aaa.com\r\n\r\nzxc", + -2, "/aaa", "aaa.com", "", "", "zxc") + + // post without content-type + testRequestHeaderReadSuccess(t, h, "POST /abc HTTP/1.1\r\nHost: aa.com\r\nContent-Length: 123\r\n\r\npoiuy", + 123, "/abc", "aa.com", "", "", "poiuy") + + // post without content-length + testRequestHeaderReadSuccess(t, h, "POST /abc HTTP/1.1\r\nHost: aa.com\r\nContent-Type: adv\r\n\r\n123456", + -2, "/abc", "aa.com", "", "adv", "123456") + + // invalid method + testRequestHeaderReadSuccess(t, h, "POST /foo/bar HTTP/1.1\r\nHost: google.com\r\n\r\nmnbv", + -2, "/foo/bar", "google.com", "", "", "mnbv") + + // put request + testRequestHeaderReadSuccess(t, h, "PUT /faa HTTP/1.1\r\nHost: aaa.com\r\nContent-Length: 123\r\nContent-Type: aaa\r\n\r\nxwwere", + 123, "/faa", "aaa.com", "", "aaa", "xwwere") +} + +func TestResponseHeaderReadError(t *testing.T) { + h := &ResponseHeader{} + + // incorrect first line + testResponseHeaderReadError(t, h, "") + testResponseHeaderReadError(t, h, "fo") + testResponseHeaderReadError(t, h, "foobarbaz") + testResponseHeaderReadError(t, h, "HTTP/1.1") + testResponseHeaderReadError(t, h, "HTTP/1.1 ") + testResponseHeaderReadError(t, h, "HTTP/1.1 s") + + // non-numeric status code + testResponseHeaderReadError(t, h, "HTTP/1.1 foobar OK\r\nContent-Length: 123\r\nContent-Type: text/html\r\n\r\n") + testResponseHeaderReadError(t, h, "HTTP/1.1 123foobar OK\r\nContent-Length: 123\r\nContent-Type: text/html\r\n\r\n") + testResponseHeaderReadError(t, h, "HTTP/1.1 foobar344 OK\r\nContent-Length: 123\r\nContent-Type: text/html\r\n\r\n") + + // no headers + testResponseHeaderReadError(t, h, "HTTP/1.1 200 OK\r\n") + + // no trailing crlf + testResponseHeaderReadError(t, h, "HTTP/1.1 200 OK\r\nContent-Length: 123\r\nContent-Type: text/html\r\n") +} + +func TestRequestHeaderReadError(t *testing.T) { + h := &RequestHeader{} + + // incorrect first line + testRequestHeaderReadError(t, h, "") + testRequestHeaderReadError(t, h, "fo") + testRequestHeaderReadError(t, h, "GET ") + testRequestHeaderReadError(t, h, "GET / HTTP/1.1\r") + + // missing RequestURI + testRequestHeaderReadError(t, h, "GET HTTP/1.1\r\nHost: google.com\r\n\r\n") +} + +func testResponseHeaderReadError(t *testing.T, h *ResponseHeader, headers string) { + r := bytes.NewBufferString(headers) + br := bufio.NewReader(r) + err := h.Read(br) + if err == nil { + t.Fatalf("Expecting error when reading response header %q", headers) + } + + // make sure response header works after error + testResponseHeaderReadSuccess(t, h, "HTTP/1.1 200 OK\r\nContent-Type: foo/bar\r\nContent-Length: 12345\r\n\r\nsss", + 200, 12345, "foo/bar", "sss") +} + +func testRequestHeaderReadError(t *testing.T, h *RequestHeader, headers string) { + r := bytes.NewBufferString(headers) + br := bufio.NewReader(r) + err := h.Read(br) + if err == nil { + t.Fatalf("Expecting error when reading request header %q", headers) + } + + // make sure request header works after error + testRequestHeaderReadSuccess(t, h, "GET /foo/bar HTTP/1.1\r\nHost: aaaa\r\n\r\nxxx", + 0, "/foo/bar", "aaaa", "", "", "xxx") +} + +func testResponseHeaderReadSuccess(t *testing.T, h *ResponseHeader, headers string, expectedStatusCode, expectedContentLength int, + expectedContentType, expectedTrailer string) { + r := bytes.NewBufferString(headers) + br := bufio.NewReader(r) + err := h.Read(br) + if err != nil { + t.Fatalf("Unexpected error when parsing response headers: %s. headers=%q", err, headers) + } + verifyResponseHeader(t, h, expectedStatusCode, expectedContentLength, expectedContentType) + verifyTrailer(t, br, expectedTrailer) +} + +func testRequestHeaderReadSuccess(t *testing.T, h *RequestHeader, headers string, expectedContentLength int, + expectedRequestURI, expectedHost, expectedReferer, expectedContentType, expectedTrailer string) { + r := bytes.NewBufferString(headers) + br := bufio.NewReader(r) + err := h.Read(br) + if err != nil { + t.Fatalf("Unexpected error when parsing request headers: %s. headers=%q", err, headers) + } + verifyRequestHeader(t, h, expectedContentLength, expectedRequestURI, expectedHost, expectedReferer, expectedContentType) + verifyTrailer(t, br, expectedTrailer) +} + +func verifyResponseHeader(t *testing.T, h *ResponseHeader, expectedStatusCode, expectedContentLength int, expectedContentType string) { + if h.StatusCode() != expectedStatusCode { + t.Fatalf("Unexpected status code %d. Expected %d", h.StatusCode(), expectedStatusCode) + } + if h.ContentLength() != expectedContentLength { + t.Fatalf("Unexpected content length %d. Expected %d", h.ContentLength(), expectedContentLength) + } + if string(h.Peek("Content-Type")) != expectedContentType { + t.Fatalf("Unexpected content type %q. Expected %q", h.Peek("Content-Type"), expectedContentType) + } +} + +func verifyRequestHeader(t *testing.T, h *RequestHeader, expectedContentLength int, + expectedRequestURI, expectedHost, expectedReferer, expectedContentType string) { + if h.ContentLength() != expectedContentLength { + t.Fatalf("Unexpected Content-Length %d. Expected %d", h.ContentLength(), expectedContentLength) + } + if string(h.RequestURI()) != expectedRequestURI { + t.Fatalf("Unexpected RequestURI %q. Expected %q", h.RequestURI(), expectedRequestURI) + } + if string(h.Peek("Host")) != expectedHost { + t.Fatalf("Unexpected host %q. Expected %q", h.Peek("Host"), expectedHost) + } + if string(h.Peek("Referer")) != expectedReferer { + t.Fatalf("Unexpected referer %q. Expected %q", h.Peek("Referer"), expectedReferer) + } + if string(h.Peek("Content-Type")) != expectedContentType { + t.Fatalf("Unexpected content-type %q. Expected %q", h.Peek("Content-Type"), expectedContentType) + } +} + +func verifyTrailer(t *testing.T, r *bufio.Reader, expectedTrailer string) { + trailer, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("Cannot read trailer: %s", err) + } + if !bytes.Equal(trailer, []byte(expectedTrailer)) { + t.Fatalf("Unexpected trailer %q. Expected %q", trailer, expectedTrailer) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/header_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/header_timing_test.go new file mode 100644 index 0000000..a6f7a11 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/header_timing_test.go @@ -0,0 +1,146 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "io" + "testing" +) + +var strFoobar = []byte("foobar.com") + +type benchReadBuf struct { + s []byte + n int +} + +func (r *benchReadBuf) Read(p []byte) (int, error) { + if r.n == len(r.s) { + return 0, io.EOF + } + + n := copy(p, r.s[r.n:]) + r.n += n + return n, nil +} + +func BenchmarkRequestHeaderRead(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + var h RequestHeader + buf := &benchReadBuf{ + s: []byte("GET /foo/bar HTTP/1.1\r\nHost: foobar.com\r\nUser-Agent: aaa.bbb\r\nReferer: http://google.com/aaa/bbb\r\n\r\n"), + } + br := bufio.NewReader(buf) + for pb.Next() { + buf.n = 0 + br.Reset(buf) + if err := h.Read(br); err != nil { + b.Fatalf("unexpected error when reading header: %s", err) + } + } + }) +} + +func BenchmarkResponseHeaderRead(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + var h ResponseHeader + buf := &benchReadBuf{ + s: []byte("HTTP/1.1 200 OK\r\nContent-Type: text/html\r\nContent-Length: 1256\r\nServer: aaa 1/2.3\r\nTest: 1.2.3\r\n\r\n"), + } + br := bufio.NewReader(buf) + for pb.Next() { + buf.n = 0 + br.Reset(buf) + if err := h.Read(br); err != nil { + b.Fatalf("unexpected error when reading header: %s", err) + } + } + }) +} + +func BenchmarkRequestHeaderWrite(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + var h RequestHeader + h.SetRequestURI("/foo/bar") + h.SetHost("foobar.com") + h.SetUserAgent("aaa.bbb") + h.SetReferer("http://google.com/aaa/bbb") + var w ByteBuffer + for pb.Next() { + if _, err := h.WriteTo(&w); err != nil { + b.Fatalf("unexpected error when writing header: %s", err) + } + w.Reset() + } + }) +} + +func BenchmarkResponseHeaderWrite(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + var h ResponseHeader + h.SetStatusCode(200) + h.SetContentType("text/html") + h.SetContentLength(1256) + h.SetServer("aaa 1/2.3") + h.Set("Test", "1.2.3") + var w ByteBuffer + for pb.Next() { + if _, err := h.WriteTo(&w); err != nil { + b.Fatalf("unexpected error when writing header: %s", err) + } + w.Reset() + } + }) +} + +func BenchmarkRequestHeaderPeekBytesCanonical(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + var h RequestHeader + h.SetBytesV("Host", strFoobar) + for pb.Next() { + v := h.PeekBytes(strHost) + if !bytes.Equal(v, strFoobar) { + b.Fatalf("unexpected result: %q. Expected %q", v, strFoobar) + } + } + }) +} + +func BenchmarkRequestHeaderPeekBytesNonCanonical(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + var h RequestHeader + h.SetBytesV("Host", strFoobar) + hostBytes := []byte("HOST") + for pb.Next() { + v := h.PeekBytes(hostBytes) + if !bytes.Equal(v, strFoobar) { + b.Fatalf("unexpected result: %q. Expected %q", v, strFoobar) + } + } + }) +} + +func BenchmarkNormalizeHeaderKeyCommonCase(b *testing.B) { + src := []byte("User-Agent-Host-Content-Type-Content-Length-Server") + benchmarkNormalizeHeaderKey(b, src) +} + +func BenchmarkNormalizeHeaderKeyLowercase(b *testing.B) { + src := []byte("user-agent-host-content-type-content-length-server") + benchmarkNormalizeHeaderKey(b, src) +} + +func BenchmarkNormalizeHeaderKeyUppercase(b *testing.B) { + src := []byte("USER-AGENT-HOST-CONTENT-TYPE-CONTENT-LENGTH-SERVER") + benchmarkNormalizeHeaderKey(b, src) +} + +func benchmarkNormalizeHeaderKey(b *testing.B, src []byte) { + b.RunParallel(func(pb *testing.PB) { + buf := make([]byte, len(src)) + for pb.Next() { + copy(buf, src) + normalizeHeaderKey(buf, false) + } + }) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/http.go b/vendor/github.com/erikdubbelboer/fasthttp/http.go new file mode 100644 index 0000000..cdff191 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/http.go @@ -0,0 +1,1717 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "mime/multipart" + "os" + "sync" + + "github.com/valyala/bytebufferpool" +) + +// Request represents HTTP request. +// +// It is forbidden copying Request instances. Create new instances +// and use CopyTo instead. +// +// Request instance MUST NOT be used from concurrently running goroutines. +type Request struct { + noCopy noCopy + + // Request header + // + // Copying Header by value is forbidden. Use pointer to Header instead. + Header RequestHeader + + uri URI + postArgs Args + + bodyStream io.Reader + w requestBodyWriter + body *bytebufferpool.ByteBuffer + + multipartForm *multipart.Form + multipartFormBoundary string + + // Group bool members in order to reduce Request object size. + parsedURI bool + parsedPostArgs bool + + keepBodyBuffer bool + + isTLS bool + + // To detect scheme changes in redirects + schemaUpdate bool +} + +// Response represents HTTP response. +// +// It is forbidden copying Response instances. Create new instances +// and use CopyTo instead. +// +// Response instance MUST NOT be used from concurrently running goroutines. +type Response struct { + noCopy noCopy + + // Response header + // + // Copying Header by value is forbidden. Use pointer to Header instead. + Header ResponseHeader + + bodyStream io.Reader + w responseBodyWriter + body *bytebufferpool.ByteBuffer + + // Response.Read() skips reading body if set to true. + // Use it for reading HEAD responses. + // + // Response.Write() skips writing body if set to true. + // Use it for writing HEAD responses. + SkipBody bool + + keepBodyBuffer bool +} + +// SetHost sets host for the request. +func (req *Request) SetHost(host string) { + req.URI().SetHost(host) +} + +// SetHostBytes sets host for the request. +func (req *Request) SetHostBytes(host []byte) { + req.URI().SetHostBytes(host) +} + +// Host returns the host for the given request. +func (req *Request) Host() []byte { + return req.URI().Host() +} + +// SetRequestURI sets RequestURI. +func (req *Request) SetRequestURI(requestURI string) { + req.Header.SetRequestURI(requestURI) + req.parsedURI = false +} + +// SetRequestURIBytes sets RequestURI. +func (req *Request) SetRequestURIBytes(requestURI []byte) { + req.Header.SetRequestURIBytes(requestURI) + req.parsedURI = false +} + +// RequestURI returns request's URI. +func (req *Request) RequestURI() []byte { + if req.parsedURI { + requestURI := req.uri.RequestURI() + req.SetRequestURIBytes(requestURI) + } + return req.Header.RequestURI() +} + +// StatusCode returns response status code. +func (resp *Response) StatusCode() int { + return resp.Header.StatusCode() +} + +// SetStatusCode sets response status code. +func (resp *Response) SetStatusCode(statusCode int) { + resp.Header.SetStatusCode(statusCode) +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (resp *Response) ConnectionClose() bool { + return resp.Header.ConnectionClose() +} + +// SetConnectionClose sets 'Connection: close' header. +func (resp *Response) SetConnectionClose() { + resp.Header.SetConnectionClose() +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (req *Request) ConnectionClose() bool { + return req.Header.ConnectionClose() +} + +// SetConnectionClose sets 'Connection: close' header. +func (req *Request) SetConnectionClose() { + req.Header.SetConnectionClose() +} + +// SendFile registers file on the given path to be used as response body +// when Write is called. +// +// Note that SendFile doesn't set Content-Type, so set it yourself +// with Header.SetContentType. +func (resp *Response) SendFile(path string) error { + f, err := os.Open(path) + if err != nil { + return err + } + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return err + } + size64 := fileInfo.Size() + size := int(size64) + if int64(size) != size64 { + size = -1 + } + + resp.Header.SetLastModified(fileInfo.ModTime()) + resp.SetBodyStream(f, size) + return nil +} + +// SetBodyStream sets request body stream and, optionally body size. +// +// If bodySize is >= 0, then the bodyStream must provide exactly bodySize bytes +// before returning io.EOF. +// +// If bodySize < 0, then bodyStream is read until io.EOF. +// +// bodyStream.Close() is called after finishing reading all body data +// if it implements io.Closer. +// +// Note that GET and HEAD requests cannot have body. +// +// See also SetBodyStreamWriter. +func (req *Request) SetBodyStream(bodyStream io.Reader, bodySize int) { + req.ResetBody() + req.bodyStream = bodyStream + req.Header.SetContentLength(bodySize) +} + +// SetBodyStream sets response body stream and, optionally body size. +// +// If bodySize is >= 0, then the bodyStream must provide exactly bodySize bytes +// before returning io.EOF. +// +// If bodySize < 0, then bodyStream is read until io.EOF. +// +// bodyStream.Close() is called after finishing reading all body data +// if it implements io.Closer. +// +// See also SetBodyStreamWriter. +func (resp *Response) SetBodyStream(bodyStream io.Reader, bodySize int) { + resp.ResetBody() + resp.bodyStream = bodyStream + resp.Header.SetContentLength(bodySize) +} + +// IsBodyStream returns true if body is set via SetBodyStream* +func (req *Request) IsBodyStream() bool { + return req.bodyStream != nil +} + +// IsBodyStream returns true if body is set via SetBodyStream* +func (resp *Response) IsBodyStream() bool { + return resp.bodyStream != nil +} + +// SetBodyStreamWriter registers the given sw for populating request body. +// +// This function may be used in the following cases: +// +// * if request body is too big (more than 10MB). +// * if request body is streamed from slow external sources. +// * if request body must be streamed to the server in chunks +// (aka `http client push` or `chunked transfer-encoding`). +// +// Note that GET and HEAD requests cannot have body. +// +/// See also SetBodyStream. +func (req *Request) SetBodyStreamWriter(sw StreamWriter) { + sr := NewStreamReader(sw) + req.SetBodyStream(sr, -1) +} + +// SetBodyStreamWriter registers the given sw for populating response body. +// +// This function may be used in the following cases: +// +// * if response body is too big (more than 10MB). +// * if response body is streamed from slow external sources. +// * if response body must be streamed to the client in chunks +// (aka `http server push` or `chunked transfer-encoding`). +// +// See also SetBodyStream. +func (resp *Response) SetBodyStreamWriter(sw StreamWriter) { + sr := NewStreamReader(sw) + resp.SetBodyStream(sr, -1) +} + +// BodyWriter returns writer for populating response body. +// +// If used inside RequestHandler, the returned writer must not be used +// after returning from RequestHandler. Use RequestCtx.Write +// or SetBodyStreamWriter in this case. +func (resp *Response) BodyWriter() io.Writer { + resp.w.r = resp + return &resp.w +} + +// BodyWriter returns writer for populating request body. +func (req *Request) BodyWriter() io.Writer { + req.w.r = req + return &req.w +} + +type responseBodyWriter struct { + r *Response +} + +func (w *responseBodyWriter) Write(p []byte) (int, error) { + w.r.AppendBody(p) + return len(p), nil +} + +type requestBodyWriter struct { + r *Request +} + +func (w *requestBodyWriter) Write(p []byte) (int, error) { + w.r.AppendBody(p) + return len(p), nil +} + +// Body returns response body. +// +// The returned body is valid until the response modification. +func (resp *Response) Body() []byte { + if resp.bodyStream != nil { + bodyBuf := resp.bodyBuffer() + bodyBuf.Reset() + _, err := copyZeroAlloc(bodyBuf, resp.bodyStream) + resp.closeBodyStream() + if err != nil { + bodyBuf.SetString(err.Error()) + } + } + return resp.bodyBytes() +} + +func (resp *Response) bodyBytes() []byte { + if resp.body == nil { + return nil + } + return resp.body.B +} + +func (req *Request) bodyBytes() []byte { + if req.body == nil { + return nil + } + return req.body.B +} + +func (resp *Response) bodyBuffer() *bytebufferpool.ByteBuffer { + if resp.body == nil { + resp.body = responseBodyPool.Get() + } + return resp.body +} + +func (req *Request) bodyBuffer() *bytebufferpool.ByteBuffer { + if req.body == nil { + req.body = requestBodyPool.Get() + } + return req.body +} + +var ( + responseBodyPool bytebufferpool.Pool + requestBodyPool bytebufferpool.Pool +) + +// BodyGunzip returns un-gzipped body data. +// +// This method may be used if the request header contains +// 'Content-Encoding: gzip' for reading un-gzipped body. +// Use Body for reading gzipped request body. +func (req *Request) BodyGunzip() ([]byte, error) { + return gunzipData(req.Body()) +} + +// BodyGunzip returns un-gzipped body data. +// +// This method may be used if the response header contains +// 'Content-Encoding: gzip' for reading un-gzipped body. +// Use Body for reading gzipped response body. +func (resp *Response) BodyGunzip() ([]byte, error) { + return gunzipData(resp.Body()) +} + +func gunzipData(p []byte) ([]byte, error) { + var bb ByteBuffer + _, err := WriteGunzip(&bb, p) + if err != nil { + return nil, err + } + return bb.B, nil +} + +// BodyInflate returns inflated body data. +// +// This method may be used if the response header contains +// 'Content-Encoding: deflate' for reading inflated request body. +// Use Body for reading deflated request body. +func (req *Request) BodyInflate() ([]byte, error) { + return inflateData(req.Body()) +} + +// BodyInflate returns inflated body data. +// +// This method may be used if the response header contains +// 'Content-Encoding: deflate' for reading inflated response body. +// Use Body for reading deflated response body. +func (resp *Response) BodyInflate() ([]byte, error) { + return inflateData(resp.Body()) +} + +func inflateData(p []byte) ([]byte, error) { + var bb ByteBuffer + _, err := WriteInflate(&bb, p) + if err != nil { + return nil, err + } + return bb.B, nil +} + +// BodyWriteTo writes request body to w. +func (req *Request) BodyWriteTo(w io.Writer) error { + if req.bodyStream != nil { + _, err := copyZeroAlloc(w, req.bodyStream) + req.closeBodyStream() + return err + } + if req.onlyMultipartForm() { + return WriteMultipartForm(w, req.multipartForm, req.multipartFormBoundary) + } + _, err := w.Write(req.bodyBytes()) + return err +} + +// BodyWriteTo writes response body to w. +func (resp *Response) BodyWriteTo(w io.Writer) error { + if resp.bodyStream != nil { + _, err := copyZeroAlloc(w, resp.bodyStream) + resp.closeBodyStream() + return err + } + _, err := w.Write(resp.bodyBytes()) + return err +} + +// AppendBody appends p to response body. +// +// It is safe re-using p after the function returns. +func (resp *Response) AppendBody(p []byte) { + resp.AppendBodyString(b2s(p)) +} + +// AppendBodyString appends s to response body. +func (resp *Response) AppendBodyString(s string) { + resp.closeBodyStream() + resp.bodyBuffer().WriteString(s) +} + +// SetBody sets response body. +// +// It is safe re-using body argument after the function returns. +func (resp *Response) SetBody(body []byte) { + resp.SetBodyString(b2s(body)) +} + +// SetBodyString sets response body. +func (resp *Response) SetBodyString(body string) { + resp.closeBodyStream() + bodyBuf := resp.bodyBuffer() + bodyBuf.Reset() + bodyBuf.WriteString(body) +} + +// ResetBody resets response body. +func (resp *Response) ResetBody() { + resp.closeBodyStream() + if resp.body != nil { + if resp.keepBodyBuffer { + resp.body.Reset() + } else { + responseBodyPool.Put(resp.body) + resp.body = nil + } + } +} + +// ReleaseBody retires the response body if it is greater than "size" bytes. +// +// This permits GC to reclaim the large buffer. If used, must be before +// ReleaseResponse. +// +// Use this method only if you really understand how it works. +// The majority of workloads don't need this method. +func (resp *Response) ReleaseBody(size int) { + if cap(resp.body.B) > size { + resp.closeBodyStream() + resp.body = nil + } +} + +// ReleaseBody retires the request body if it is greater than "size" bytes. +// +// This permits GC to reclaim the large buffer. If used, must be before +// ReleaseRequest. +// +// Use this method only if you really understand how it works. +// The majority of workloads don't need this method. +func (req *Request) ReleaseBody(size int) { + if cap(req.body.B) > size { + req.closeBodyStream() + req.body = nil + } +} + +// SwapBody swaps response body with the given body and returns +// the previous response body. +// +// It is forbidden to use the body passed to SwapBody after +// the function returns. +func (resp *Response) SwapBody(body []byte) []byte { + bb := resp.bodyBuffer() + + if resp.bodyStream != nil { + bb.Reset() + _, err := copyZeroAlloc(bb, resp.bodyStream) + resp.closeBodyStream() + if err != nil { + bb.Reset() + bb.SetString(err.Error()) + } + } + + oldBody := bb.B + bb.B = body + return oldBody +} + +// SwapBody swaps request body with the given body and returns +// the previous request body. +// +// It is forbidden to use the body passed to SwapBody after +// the function returns. +func (req *Request) SwapBody(body []byte) []byte { + bb := req.bodyBuffer() + + if req.bodyStream != nil { + bb.Reset() + _, err := copyZeroAlloc(bb, req.bodyStream) + req.closeBodyStream() + if err != nil { + bb.Reset() + bb.SetString(err.Error()) + } + } + + oldBody := bb.B + bb.B = body + return oldBody +} + +// Body returns request body. +// +// The returned body is valid until the request modification. +func (req *Request) Body() []byte { + if req.bodyStream != nil { + bodyBuf := req.bodyBuffer() + bodyBuf.Reset() + _, err := copyZeroAlloc(bodyBuf, req.bodyStream) + req.closeBodyStream() + if err != nil { + bodyBuf.SetString(err.Error()) + } + } else if req.onlyMultipartForm() { + body, err := marshalMultipartForm(req.multipartForm, req.multipartFormBoundary) + if err != nil { + return []byte(err.Error()) + } + return body + } + return req.bodyBytes() +} + +// AppendBody appends p to request body. +// +// It is safe re-using p after the function returns. +func (req *Request) AppendBody(p []byte) { + req.AppendBodyString(b2s(p)) +} + +// AppendBodyString appends s to request body. +func (req *Request) AppendBodyString(s string) { + req.RemoveMultipartFormFiles() + req.closeBodyStream() + req.bodyBuffer().WriteString(s) +} + +// SetBody sets request body. +// +// It is safe re-using body argument after the function returns. +func (req *Request) SetBody(body []byte) { + req.SetBodyString(b2s(body)) +} + +// SetBodyString sets request body. +func (req *Request) SetBodyString(body string) { + req.RemoveMultipartFormFiles() + req.closeBodyStream() + req.bodyBuffer().SetString(body) +} + +// ResetBody resets request body. +func (req *Request) ResetBody() { + req.RemoveMultipartFormFiles() + req.closeBodyStream() + if req.body != nil { + if req.keepBodyBuffer { + req.body.Reset() + } else { + requestBodyPool.Put(req.body) + req.body = nil + } + } +} + +// CopyTo copies req contents to dst except of body stream. +func (req *Request) CopyTo(dst *Request) { + req.copyToSkipBody(dst) + if req.body != nil { + dst.bodyBuffer().Set(req.body.B) + } else if dst.body != nil { + dst.body.Reset() + } +} + +func (req *Request) copyToSkipBody(dst *Request) { + dst.Reset() + req.Header.CopyTo(&dst.Header) + + req.uri.CopyTo(&dst.uri) + dst.parsedURI = req.parsedURI + + req.postArgs.CopyTo(&dst.postArgs) + dst.parsedPostArgs = req.parsedPostArgs + dst.isTLS = req.isTLS + + // do not copy multipartForm - it will be automatically + // re-created on the first call to MultipartForm. +} + +// CopyTo copies resp contents to dst except of body stream. +func (resp *Response) CopyTo(dst *Response) { + resp.copyToSkipBody(dst) + if resp.body != nil { + dst.bodyBuffer().Set(resp.body.B) + } else if dst.body != nil { + dst.body.Reset() + } +} + +func (resp *Response) copyToSkipBody(dst *Response) { + dst.Reset() + resp.Header.CopyTo(&dst.Header) + dst.SkipBody = resp.SkipBody +} + +func swapRequestBody(a, b *Request) { + a.body, b.body = b.body, a.body + a.bodyStream, b.bodyStream = b.bodyStream, a.bodyStream +} + +func swapResponseBody(a, b *Response) { + a.body, b.body = b.body, a.body + a.bodyStream, b.bodyStream = b.bodyStream, a.bodyStream +} + +// URI returns request URI +func (req *Request) URI() *URI { + req.parseURI() + return &req.uri +} + +func (req *Request) parseURI() { + if req.parsedURI { + return + } + req.parsedURI = true + + req.uri.parseQuick(req.Header.RequestURI(), &req.Header, req.isTLS) +} + +// PostArgs returns POST arguments. +func (req *Request) PostArgs() *Args { + req.parsePostArgs() + return &req.postArgs +} + +func (req *Request) parsePostArgs() { + if req.parsedPostArgs { + return + } + req.parsedPostArgs = true + + if !bytes.HasPrefix(req.Header.ContentType(), strPostArgsContentType) { + return + } + req.postArgs.ParseBytes(req.bodyBytes()) +} + +// ErrNoMultipartForm means that the request's Content-Type +// isn't 'multipart/form-data'. +var ErrNoMultipartForm = errors.New("request has no multipart/form-data Content-Type") + +// MultipartForm returns requests's multipart form. +// +// Returns ErrNoMultipartForm if request's Content-Type +// isn't 'multipart/form-data'. +// +// RemoveMultipartFormFiles must be called after returned multipart form +// is processed. +func (req *Request) MultipartForm() (*multipart.Form, error) { + if req.multipartForm != nil { + return req.multipartForm, nil + } + + req.multipartFormBoundary = string(req.Header.MultipartFormBoundary()) + if len(req.multipartFormBoundary) == 0 { + return nil, ErrNoMultipartForm + } + + ce := req.Header.peek(strContentEncoding) + body := req.bodyBytes() + if bytes.Equal(ce, strGzip) { + // Do not care about memory usage here. + var err error + if body, err = AppendGunzipBytes(nil, body); err != nil { + return nil, fmt.Errorf("cannot gunzip request body: %s", err) + } + } else if len(ce) > 0 { + return nil, fmt.Errorf("unsupported Content-Encoding: %q", ce) + } + + f, err := readMultipartForm(bytes.NewReader(body), req.multipartFormBoundary, len(body), len(body)) + if err != nil { + return nil, err + } + req.multipartForm = f + return f, nil +} + +func marshalMultipartForm(f *multipart.Form, boundary string) ([]byte, error) { + var buf ByteBuffer + if err := WriteMultipartForm(&buf, f, boundary); err != nil { + return nil, err + } + return buf.B, nil +} + +// WriteMultipartForm writes the given multipart form f with the given +// boundary to w. +func WriteMultipartForm(w io.Writer, f *multipart.Form, boundary string) error { + // Do not care about memory allocations here, since multipart + // form processing is slooow. + if len(boundary) == 0 { + panic("BUG: form boundary cannot be empty") + } + + mw := multipart.NewWriter(w) + if err := mw.SetBoundary(boundary); err != nil { + return fmt.Errorf("cannot use form boundary %q: %s", boundary, err) + } + + // marshal values + for k, vv := range f.Value { + for _, v := range vv { + if err := mw.WriteField(k, v); err != nil { + return fmt.Errorf("cannot write form field %q value %q: %s", k, v, err) + } + } + } + + // marshal files + for k, fvv := range f.File { + for _, fv := range fvv { + vw, err := mw.CreateFormFile(k, fv.Filename) + if err != nil { + return fmt.Errorf("cannot create form file %q (%q): %s", k, fv.Filename, err) + } + fh, err := fv.Open() + if err != nil { + return fmt.Errorf("cannot open form file %q (%q): %s", k, fv.Filename, err) + } + if _, err = copyZeroAlloc(vw, fh); err != nil { + return fmt.Errorf("error when copying form file %q (%q): %s", k, fv.Filename, err) + } + if err = fh.Close(); err != nil { + return fmt.Errorf("cannot close form file %q (%q): %s", k, fv.Filename, err) + } + } + } + + if err := mw.Close(); err != nil { + return fmt.Errorf("error when closing multipart form writer: %s", err) + } + + return nil +} + +func readMultipartForm(r io.Reader, boundary string, size, maxInMemoryFileSize int) (*multipart.Form, error) { + // Do not care about memory allocations here, since they are tiny + // compared to multipart data (aka multi-MB files) usually sent + // in multipart/form-data requests. + + if size <= 0 { + panic(fmt.Sprintf("BUG: form size must be greater than 0. Given %d", size)) + } + lr := io.LimitReader(r, int64(size)) + mr := multipart.NewReader(lr, boundary) + f, err := mr.ReadForm(int64(maxInMemoryFileSize)) + if err != nil { + return nil, fmt.Errorf("cannot read multipart/form-data body: %s", err) + } + return f, nil +} + +// Reset clears request contents. +func (req *Request) Reset() { + req.Header.Reset() + req.resetSkipHeader() +} + +func (req *Request) resetSkipHeader() { + req.ResetBody() + req.uri.Reset() + req.parsedURI = false + req.postArgs.Reset() + req.parsedPostArgs = false + req.isTLS = false +} + +// RemoveMultipartFormFiles removes multipart/form-data temporary files +// associated with the request. +func (req *Request) RemoveMultipartFormFiles() { + if req.multipartForm != nil { + // Do not check for error, since these files may be deleted or moved + // to new places by user code. + req.multipartForm.RemoveAll() + req.multipartForm = nil + } + req.multipartFormBoundary = "" +} + +// Reset clears response contents. +func (resp *Response) Reset() { + resp.Header.Reset() + resp.resetSkipHeader() + resp.SkipBody = false +} + +func (resp *Response) resetSkipHeader() { + resp.ResetBody() +} + +// Read reads request (including body) from the given r. +// +// RemoveMultipartFormFiles or Reset must be called after +// reading multipart/form-data request in order to delete temporarily +// uploaded files. +// +// If MayContinue returns true, the caller must: +// +// - Either send StatusExpectationFailed response if request headers don't +// satisfy the caller. +// - Or send StatusContinue response before reading request body +// with ContinueReadBody. +// - Or close the connection. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (req *Request) Read(r *bufio.Reader) error { + return req.ReadLimitBody(r, 0) +} + +const defaultMaxInMemoryFileSize = 16 * 1024 * 1024 + +var errGetOnly = errors.New("non-GET request received") + +// ReadLimitBody reads request from the given r, limiting the body size. +// +// If maxBodySize > 0 and the body size exceeds maxBodySize, +// then ErrBodyTooLarge is returned. +// +// RemoveMultipartFormFiles or Reset must be called after +// reading multipart/form-data request in order to delete temporarily +// uploaded files. +// +// If MayContinue returns true, the caller must: +// +// - Either send StatusExpectationFailed response if request headers don't +// satisfy the caller. +// - Or send StatusContinue response before reading request body +// with ContinueReadBody. +// - Or close the connection. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (req *Request) ReadLimitBody(r *bufio.Reader, maxBodySize int) error { + req.resetSkipHeader() + return req.readLimitBody(r, maxBodySize, false) +} + +func (req *Request) readLimitBody(r *bufio.Reader, maxBodySize int, getOnly bool) error { + // Do not reset the request here - the caller must reset it before + // calling this method. + + err := req.Header.Read(r) + if err != nil { + return err + } + if getOnly && !req.Header.IsGet() { + return errGetOnly + } + + if req.MayContinue() { + // 'Expect: 100-continue' header found. Let the caller deciding + // whether to read request body or + // to return StatusExpectationFailed. + return nil + } + + return req.ContinueReadBody(r, maxBodySize) +} + +// MayContinue returns true if the request contains +// 'Expect: 100-continue' header. +// +// The caller must do one of the following actions if MayContinue returns true: +// +// - Either send StatusExpectationFailed response if request headers don't +// satisfy the caller. +// - Or send StatusContinue response before reading request body +// with ContinueReadBody. +// - Or close the connection. +func (req *Request) MayContinue() bool { + return bytes.Equal(req.Header.peek(strExpect), str100Continue) +} + +// ContinueReadBody reads request body if request header contains +// 'Expect: 100-continue'. +// +// The caller must send StatusContinue response before calling this method. +// +// If maxBodySize > 0 and the body size exceeds maxBodySize, +// then ErrBodyTooLarge is returned. +func (req *Request) ContinueReadBody(r *bufio.Reader, maxBodySize int) error { + var err error + contentLength := req.Header.realContentLength() + if contentLength > 0 { + if maxBodySize > 0 && contentLength > maxBodySize { + return ErrBodyTooLarge + } + + // Pre-read multipart form data of known length. + // This way we limit memory usage for large file uploads, since their contents + // is streamed into temporary files if file size exceeds defaultMaxInMemoryFileSize. + req.multipartFormBoundary = string(req.Header.MultipartFormBoundary()) + if len(req.multipartFormBoundary) > 0 && len(req.Header.peek(strContentEncoding)) == 0 { + req.multipartForm, err = readMultipartForm(r, req.multipartFormBoundary, contentLength, defaultMaxInMemoryFileSize) + if err != nil { + req.Reset() + } + return err + } + } + + if contentLength == -2 { + // identity body has no sense for http requests, since + // the end of body is determined by connection close. + // So just ignore request body for requests without + // 'Content-Length' and 'Transfer-Encoding' headers. + req.Header.SetContentLength(0) + return nil + } + + bodyBuf := req.bodyBuffer() + bodyBuf.Reset() + bodyBuf.B, err = readBody(r, contentLength, maxBodySize, bodyBuf.B) + if err != nil { + req.Reset() + return err + } + req.Header.SetContentLength(len(bodyBuf.B)) + return nil +} + +// Read reads response (including body) from the given r. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (resp *Response) Read(r *bufio.Reader) error { + return resp.ReadLimitBody(r, 0) +} + +// ReadLimitBody reads response from the given r, limiting the body size. +// +// If maxBodySize > 0 and the body size exceeds maxBodySize, +// then ErrBodyTooLarge is returned. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (resp *Response) ReadLimitBody(r *bufio.Reader, maxBodySize int) error { + resp.resetSkipHeader() + err := resp.Header.Read(r) + if err != nil { + return err + } + if resp.Header.StatusCode() == StatusContinue { + // Read the next response according to http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html . + if err = resp.Header.Read(r); err != nil { + return err + } + } + + if !resp.mustSkipBody() { + bodyBuf := resp.bodyBuffer() + bodyBuf.Reset() + bodyBuf.B, err = readBody(r, resp.Header.ContentLength(), maxBodySize, bodyBuf.B) + if err != nil { + resp.Reset() + return err + } + resp.Header.SetContentLength(len(bodyBuf.B)) + } + return nil +} + +func (resp *Response) mustSkipBody() bool { + return resp.SkipBody || resp.Header.mustSkipContentLength() +} + +var errRequestHostRequired = errors.New("missing required Host header in request") + +// WriteTo writes request to w. It implements io.WriterTo. +func (req *Request) WriteTo(w io.Writer) (int64, error) { + return writeBufio(req, w) +} + +// WriteTo writes response to w. It implements io.WriterTo. +func (resp *Response) WriteTo(w io.Writer) (int64, error) { + return writeBufio(resp, w) +} + +func writeBufio(hw httpWriter, w io.Writer) (int64, error) { + sw := acquireStatsWriter(w) + bw := acquireBufioWriter(sw) + err1 := hw.Write(bw) + err2 := bw.Flush() + releaseBufioWriter(bw) + n := sw.bytesWritten + releaseStatsWriter(sw) + + err := err1 + if err == nil { + err = err2 + } + return n, err +} + +type statsWriter struct { + w io.Writer + bytesWritten int64 +} + +func (w *statsWriter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.bytesWritten += int64(n) + return n, err +} + +func acquireStatsWriter(w io.Writer) *statsWriter { + v := statsWriterPool.Get() + if v == nil { + return &statsWriter{ + w: w, + } + } + sw := v.(*statsWriter) + sw.w = w + return sw +} + +func releaseStatsWriter(sw *statsWriter) { + sw.w = nil + sw.bytesWritten = 0 + statsWriterPool.Put(sw) +} + +var statsWriterPool sync.Pool + +func acquireBufioWriter(w io.Writer) *bufio.Writer { + v := bufioWriterPool.Get() + if v == nil { + return bufio.NewWriter(w) + } + bw := v.(*bufio.Writer) + bw.Reset(w) + return bw +} + +func releaseBufioWriter(bw *bufio.Writer) { + bufioWriterPool.Put(bw) +} + +var bufioWriterPool sync.Pool + +func (req *Request) onlyMultipartForm() bool { + return req.multipartForm != nil && (req.body == nil || len(req.body.B) == 0) +} + +// Write writes request to w. +// +// Write doesn't flush request to w for performance reasons. +// +// See also WriteTo. +func (req *Request) Write(w *bufio.Writer) error { + if len(req.Header.Host()) == 0 || req.parsedURI { + uri := req.URI() + host := uri.Host() + if len(host) == 0 { + return errRequestHostRequired + } + req.Header.SetHostBytes(host) + req.Header.SetRequestURIBytes(uri.RequestURI()) + } + + if req.bodyStream != nil { + return req.writeBodyStream(w) + } + + body := req.bodyBytes() + var err error + if req.onlyMultipartForm() { + body, err = marshalMultipartForm(req.multipartForm, req.multipartFormBoundary) + if err != nil { + return fmt.Errorf("error when marshaling multipart form: %s", err) + } + req.Header.SetMultipartFormBoundary(req.multipartFormBoundary) + } + + hasBody := !req.Header.ignoreBody() + if hasBody { + // is POST + if len(body) == 0 { + body = req.postArgs.QueryString() + } + req.Header.SetContentLength(len(body)) + } + if err = req.Header.Write(w); err != nil { + return err + } + if hasBody { + _, err = w.Write(body) + } else if len(body) > 0 { + return fmt.Errorf("non-zero body for non-POST request. body=%q", body) + } + return err +} + +// WriteGzip writes response with gzipped body to w. +// +// The method gzips response body and sets 'Content-Encoding: gzip' +// header before writing response to w. +// +// WriteGzip doesn't flush response to w for performance reasons. +func (resp *Response) WriteGzip(w *bufio.Writer) error { + return resp.WriteGzipLevel(w, CompressDefaultCompression) +} + +// WriteGzipLevel writes response with gzipped body to w. +// +// Level is the desired compression level: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +// +// The method gzips response body and sets 'Content-Encoding: gzip' +// header before writing response to w. +// +// WriteGzipLevel doesn't flush response to w for performance reasons. +func (resp *Response) WriteGzipLevel(w *bufio.Writer, level int) error { + if err := resp.gzipBody(level); err != nil { + return err + } + return resp.Write(w) +} + +// WriteDeflate writes response with deflated body to w. +// +// The method deflates response body and sets 'Content-Encoding: deflate' +// header before writing response to w. +// +// WriteDeflate doesn't flush response to w for performance reasons. +func (resp *Response) WriteDeflate(w *bufio.Writer) error { + return resp.WriteDeflateLevel(w, CompressDefaultCompression) +} + +// WriteDeflateLevel writes response with deflated body to w. +// +// Level is the desired compression level: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +// +// The method deflates response body and sets 'Content-Encoding: deflate' +// header before writing response to w. +// +// WriteDeflateLevel doesn't flush response to w for performance reasons. +func (resp *Response) WriteDeflateLevel(w *bufio.Writer, level int) error { + if err := resp.deflateBody(level); err != nil { + return err + } + return resp.Write(w) +} + +func (resp *Response) gzipBody(level int) error { + if len(resp.Header.peek(strContentEncoding)) > 0 { + // It looks like the body is already compressed. + // Do not compress it again. + return nil + } + + if !resp.Header.isCompressibleContentType() { + // The content-type cannot be compressed. + return nil + } + + if resp.bodyStream != nil { + // Reset Content-Length to -1, since it is impossible + // to determine body size beforehand of streamed compression. + // For https://github.com/valyala/fasthttp/issues/176 . + resp.Header.SetContentLength(-1) + + // Do not care about memory allocations here, since gzip is slow + // and allocates a lot of memory by itself. + bs := resp.bodyStream + resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) { + zw := acquireStacklessGzipWriter(sw, level) + fw := &flushWriter{ + wf: zw, + bw: sw, + } + copyZeroAlloc(fw, bs) + releaseStacklessGzipWriter(zw, level) + if bsc, ok := bs.(io.Closer); ok { + bsc.Close() + } + }) + } else { + bodyBytes := resp.bodyBytes() + if len(bodyBytes) < minCompressLen { + // There is no sense in spending CPU time on small body compression, + // since there is a very high probability that the compressed + // body size will be bigger than the original body size. + return nil + } + w := responseBodyPool.Get() + w.B = AppendGzipBytesLevel(w.B, bodyBytes, level) + + // Hack: swap resp.body with w. + if resp.body != nil { + responseBodyPool.Put(resp.body) + } + resp.body = w + } + resp.Header.SetCanonical(strContentEncoding, strGzip) + return nil +} + +func (resp *Response) deflateBody(level int) error { + if len(resp.Header.peek(strContentEncoding)) > 0 { + // It looks like the body is already compressed. + // Do not compress it again. + return nil + } + + if !resp.Header.isCompressibleContentType() { + // The content-type cannot be compressed. + return nil + } + + if resp.bodyStream != nil { + // Reset Content-Length to -1, since it is impossible + // to determine body size beforehand of streamed compression. + // For https://github.com/valyala/fasthttp/issues/176 . + resp.Header.SetContentLength(-1) + + // Do not care about memory allocations here, since flate is slow + // and allocates a lot of memory by itself. + bs := resp.bodyStream + resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) { + zw := acquireStacklessDeflateWriter(sw, level) + fw := &flushWriter{ + wf: zw, + bw: sw, + } + copyZeroAlloc(fw, bs) + releaseStacklessDeflateWriter(zw, level) + if bsc, ok := bs.(io.Closer); ok { + bsc.Close() + } + }) + } else { + bodyBytes := resp.bodyBytes() + if len(bodyBytes) < minCompressLen { + // There is no sense in spending CPU time on small body compression, + // since there is a very high probability that the compressed + // body size will be bigger than the original body size. + return nil + } + w := responseBodyPool.Get() + w.B = AppendDeflateBytesLevel(w.B, bodyBytes, level) + + // Hack: swap resp.body with w. + if resp.body != nil { + responseBodyPool.Put(resp.body) + } + resp.body = w + } + resp.Header.SetCanonical(strContentEncoding, strDeflate) + return nil +} + +// Bodies with sizes smaller than minCompressLen aren't compressed at all +const minCompressLen = 200 + +type writeFlusher interface { + io.Writer + Flush() error +} + +type flushWriter struct { + wf writeFlusher + bw *bufio.Writer +} + +func (w *flushWriter) Write(p []byte) (int, error) { + n, err := w.wf.Write(p) + if err != nil { + return 0, err + } + if err = w.wf.Flush(); err != nil { + return 0, err + } + if err = w.bw.Flush(); err != nil { + return 0, err + } + return n, nil +} + +// Write writes response to w. +// +// Write doesn't flush response to w for performance reasons. +// +// See also WriteTo. +func (resp *Response) Write(w *bufio.Writer) error { + sendBody := !resp.mustSkipBody() + + if resp.bodyStream != nil { + return resp.writeBodyStream(w, sendBody) + } + + body := resp.bodyBytes() + bodyLen := len(body) + if sendBody || bodyLen > 0 { + resp.Header.SetContentLength(bodyLen) + } + if err := resp.Header.Write(w); err != nil { + return err + } + if sendBody { + if _, err := w.Write(body); err != nil { + return err + } + } + return nil +} + +func (req *Request) writeBodyStream(w *bufio.Writer) error { + var err error + + contentLength := req.Header.ContentLength() + if contentLength < 0 { + lrSize := limitedReaderSize(req.bodyStream) + if lrSize >= 0 { + contentLength = int(lrSize) + if int64(contentLength) != lrSize { + contentLength = -1 + } + if contentLength >= 0 { + req.Header.SetContentLength(contentLength) + } + } + } + if contentLength >= 0 { + if err = req.Header.Write(w); err == nil { + err = writeBodyFixedSize(w, req.bodyStream, int64(contentLength)) + } + } else { + req.Header.SetContentLength(-1) + if err = req.Header.Write(w); err == nil { + err = writeBodyChunked(w, req.bodyStream) + } + } + err1 := req.closeBodyStream() + if err == nil { + err = err1 + } + return err +} + +func (resp *Response) writeBodyStream(w *bufio.Writer, sendBody bool) error { + var err error + + contentLength := resp.Header.ContentLength() + if contentLength < 0 { + lrSize := limitedReaderSize(resp.bodyStream) + if lrSize >= 0 { + contentLength = int(lrSize) + if int64(contentLength) != lrSize { + contentLength = -1 + } + if contentLength >= 0 { + resp.Header.SetContentLength(contentLength) + } + } + } + if contentLength >= 0 { + if err = resp.Header.Write(w); err == nil && sendBody { + err = writeBodyFixedSize(w, resp.bodyStream, int64(contentLength)) + } + } else { + resp.Header.SetContentLength(-1) + if err = resp.Header.Write(w); err == nil && sendBody { + err = writeBodyChunked(w, resp.bodyStream) + } + } + err1 := resp.closeBodyStream() + if err == nil { + err = err1 + } + return err +} + +func (req *Request) closeBodyStream() error { + if req.bodyStream == nil { + return nil + } + var err error + if bsc, ok := req.bodyStream.(io.Closer); ok { + err = bsc.Close() + } + req.bodyStream = nil + return err +} + +func (resp *Response) closeBodyStream() error { + if resp.bodyStream == nil { + return nil + } + var err error + if bsc, ok := resp.bodyStream.(io.Closer); ok { + err = bsc.Close() + } + resp.bodyStream = nil + return err +} + +// String returns request representation. +// +// Returns error message instead of request representation on error. +// +// Use Write instead of String for performance-critical code. +func (req *Request) String() string { + return getHTTPString(req) +} + +// String returns response representation. +// +// Returns error message instead of response representation on error. +// +// Use Write instead of String for performance-critical code. +func (resp *Response) String() string { + return getHTTPString(resp) +} + +func getHTTPString(hw httpWriter) string { + w := AcquireByteBuffer() + bw := bufio.NewWriter(w) + if err := hw.Write(bw); err != nil { + return err.Error() + } + if err := bw.Flush(); err != nil { + return err.Error() + } + s := string(w.B) + ReleaseByteBuffer(w) + return s +} + +type httpWriter interface { + Write(w *bufio.Writer) error +} + +func writeBodyChunked(w *bufio.Writer, r io.Reader) error { + vbuf := copyBufPool.Get() + buf := vbuf.([]byte) + + var err error + var n int + for { + n, err = r.Read(buf) + if n == 0 { + if err == nil { + panic("BUG: io.Reader returned 0, nil") + } + if err == io.EOF { + if err = writeChunk(w, buf[:0]); err != nil { + break + } + err = nil + } + break + } + if err = writeChunk(w, buf[:n]); err != nil { + break + } + } + + copyBufPool.Put(vbuf) + return err +} + +func limitedReaderSize(r io.Reader) int64 { + lr, ok := r.(*io.LimitedReader) + if !ok { + return -1 + } + return lr.N +} + +func writeBodyFixedSize(w *bufio.Writer, r io.Reader, size int64) error { + if size > maxSmallFileSize { + // w buffer must be empty for triggering + // sendfile path in bufio.Writer.ReadFrom. + if err := w.Flush(); err != nil { + return err + } + } + + // Unwrap a single limited reader for triggering sendfile path + // in net.TCPConn.ReadFrom. + lr, ok := r.(*io.LimitedReader) + if ok { + r = lr.R + } + + n, err := copyZeroAlloc(w, r) + + if ok { + lr.N -= n + } + + if n != size && err == nil { + err = fmt.Errorf("copied %d bytes from body stream instead of %d bytes", n, size) + } + return err +} + +func copyZeroAlloc(w io.Writer, r io.Reader) (int64, error) { + vbuf := copyBufPool.Get() + buf := vbuf.([]byte) + n, err := io.CopyBuffer(w, r, buf) + copyBufPool.Put(vbuf) + return n, err +} + +var copyBufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, 4096) + }, +} + +func writeChunk(w *bufio.Writer, b []byte) error { + n := len(b) + writeHexInt(w, n) + w.Write(strCRLF) + w.Write(b) + _, err := w.Write(strCRLF) + err1 := w.Flush() + if err == nil { + err = err1 + } + return err +} + +// ErrBodyTooLarge is returned if either request or response body exceeds +// the given limit. +var ErrBodyTooLarge = errors.New("body size exceeds the given limit") + +func readBody(r *bufio.Reader, contentLength int, maxBodySize int, dst []byte) ([]byte, error) { + dst = dst[:0] + if contentLength >= 0 { + if maxBodySize > 0 && contentLength > maxBodySize { + return dst, ErrBodyTooLarge + } + return appendBodyFixedSize(r, dst, contentLength) + } + if contentLength == -1 { + return readBodyChunked(r, maxBodySize, dst) + } + return readBodyIdentity(r, maxBodySize, dst) +} + +func readBodyIdentity(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, error) { + dst = dst[:cap(dst)] + if len(dst) == 0 { + dst = make([]byte, 1024) + } + offset := 0 + for { + nn, err := r.Read(dst[offset:]) + if nn <= 0 { + if err != nil { + if err == io.EOF { + return dst[:offset], nil + } + return dst[:offset], err + } + panic(fmt.Sprintf("BUG: bufio.Read() returned (%d, nil)", nn)) + } + offset += nn + if maxBodySize > 0 && offset > maxBodySize { + return dst[:offset], ErrBodyTooLarge + } + if len(dst) == offset { + n := round2(2 * offset) + if maxBodySize > 0 && n > maxBodySize { + n = maxBodySize + 1 + } + b := make([]byte, n) + copy(b, dst) + dst = b + } + } +} + +func appendBodyFixedSize(r *bufio.Reader, dst []byte, n int) ([]byte, error) { + if n == 0 { + return dst, nil + } + + offset := len(dst) + dstLen := offset + n + if cap(dst) < dstLen { + b := make([]byte, round2(dstLen)) + copy(b, dst) + dst = b + } + dst = dst[:dstLen] + + for { + nn, err := r.Read(dst[offset:]) + if nn <= 0 { + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return dst[:offset], err + } + panic(fmt.Sprintf("BUG: bufio.Read() returned (%d, nil)", nn)) + } + offset += nn + if offset == dstLen { + return dst, nil + } + } +} + +func readBodyChunked(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, error) { + if len(dst) > 0 { + panic("BUG: expected zero-length buffer") + } + + strCRLFLen := len(strCRLF) + for { + chunkSize, err := parseChunkSize(r) + if err != nil { + return dst, err + } + if maxBodySize > 0 && len(dst)+chunkSize > maxBodySize { + return dst, ErrBodyTooLarge + } + dst, err = appendBodyFixedSize(r, dst, chunkSize+strCRLFLen) + if err != nil { + return dst, err + } + if !bytes.Equal(dst[len(dst)-strCRLFLen:], strCRLF) { + return dst, fmt.Errorf("cannot find crlf at the end of chunk") + } + dst = dst[:len(dst)-strCRLFLen] + if chunkSize == 0 { + return dst, nil + } + } +} + +func parseChunkSize(r *bufio.Reader) (int, error) { + n, err := readHexInt(r) + if err != nil { + return -1, err + } + c, err := r.ReadByte() + if err != nil { + return -1, fmt.Errorf("cannot read '\r' char at the end of chunk size: %s", err) + } + if c != '\r' { + return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\r') + } + c, err = r.ReadByte() + if err != nil { + return -1, fmt.Errorf("cannot read '\n' char at the end of chunk size: %s", err) + } + if c != '\n' { + return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\n') + } + return n, nil +} + +func round2(n int) int { + if n <= 0 { + return 0 + } + n-- + x := uint(0) + for n > 0 { + n >>= 1 + x++ + } + return 1 << x +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/http_test.go b/vendor/github.com/erikdubbelboer/fasthttp/http_test.go new file mode 100644 index 0000000..28e2028 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/http_test.go @@ -0,0 +1,1794 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "strings" + "testing" + "time" +) + +func TestResponseBodyStreamDeflate(t *testing.T) { + body := createFixedBody(1e5) + + // Verifies https://github.com/valyala/fasthttp/issues/176 + // when Content-Length is explicitly set. + testResponseBodyStreamDeflate(t, body, len(body)) + + // Verifies that 'transfer-encoding: chunked' works as expected. + testResponseBodyStreamDeflate(t, body, -1) +} + +func TestResponseBodyStreamGzip(t *testing.T) { + body := createFixedBody(1e5) + + // Verifies https://github.com/valyala/fasthttp/issues/176 + // when Content-Length is explicitly set. + testResponseBodyStreamGzip(t, body, len(body)) + + // Verifies that 'transfer-encoding: chunked' works as expected. + testResponseBodyStreamGzip(t, body, -1) +} + +func testResponseBodyStreamDeflate(t *testing.T, body []byte, bodySize int) { + var r Response + r.SetBodyStream(bytes.NewReader(body), bodySize) + + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + if err := r.WriteDeflate(bw); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + var resp Response + br := bufio.NewReader(w) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + respBody, err := resp.BodyInflate() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !bytes.Equal(respBody, body) { + t.Fatalf("unexpected body: %q. Expecting %q", respBody, body) + } +} + +func testResponseBodyStreamGzip(t *testing.T, body []byte, bodySize int) { + var r Response + r.SetBodyStream(bytes.NewReader(body), bodySize) + + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + if err := r.WriteGzip(bw); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + var resp Response + br := bufio.NewReader(w) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + respBody, err := resp.BodyGunzip() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !bytes.Equal(respBody, body) { + t.Fatalf("unexpected body: %q. Expecting %q", respBody, body) + } +} + +func TestResponseWriteGzipNilBody(t *testing.T) { + var r Response + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + if err := r.WriteGzip(bw); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } +} + +func TestResponseWriteDeflateNilBody(t *testing.T) { + var r Response + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + if err := r.WriteDeflate(bw); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } +} + +func TestResponseSwapBodySerial(t *testing.T) { + testResponseSwapBody(t) +} + +func TestResponseSwapBodyConcurrent(t *testing.T) { + ch := make(chan struct{}) + for i := 0; i < 10; i++ { + go func() { + testResponseSwapBody(t) + ch <- struct{}{} + }() + } + + for i := 0; i < 10; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } +} + +func testResponseSwapBody(t *testing.T) { + var b []byte + r := AcquireResponse() + for i := 0; i < 20; i++ { + bOrig := r.Body() + b = r.SwapBody(b) + if !bytes.Equal(bOrig, b) { + t.Fatalf("unexpected body returned: %q. Expecting %q", b, bOrig) + } + r.AppendBodyString("foobar") + } + + s := "aaaabbbbcccc" + b = b[:0] + for i := 0; i < 10; i++ { + r.SetBodyStream(bytes.NewBufferString(s), len(s)) + b = r.SwapBody(b) + if string(b) != s { + t.Fatalf("unexpected body returned: %q. Expecting %q", b, s) + } + b = r.SwapBody(b) + if len(b) > 0 { + t.Fatalf("unexpected body with non-zero size returned: %q", b) + } + } + ReleaseResponse(r) +} + +func TestRequestSwapBodySerial(t *testing.T) { + testRequestSwapBody(t) +} + +func TestRequestSwapBodyConcurrent(t *testing.T) { + ch := make(chan struct{}) + for i := 0; i < 10; i++ { + go func() { + testRequestSwapBody(t) + ch <- struct{}{} + }() + } + + for i := 0; i < 10; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } +} + +func testRequestSwapBody(t *testing.T) { + var b []byte + r := AcquireRequest() + for i := 0; i < 20; i++ { + bOrig := r.Body() + b = r.SwapBody(b) + if !bytes.Equal(bOrig, b) { + t.Fatalf("unexpected body returned: %q. Expecting %q", b, bOrig) + } + r.AppendBodyString("foobar") + } + + s := "aaaabbbbcccc" + b = b[:0] + for i := 0; i < 10; i++ { + r.SetBodyStream(bytes.NewBufferString(s), len(s)) + b = r.SwapBody(b) + if string(b) != s { + t.Fatalf("unexpected body returned: %q. Expecting %q", b, s) + } + b = r.SwapBody(b) + if len(b) > 0 { + t.Fatalf("unexpected body with non-zero size returned: %q", b) + } + } + ReleaseRequest(r) +} + +func TestRequestHostFromRequestURI(t *testing.T) { + hExpected := "foobar.com" + var req Request + req.SetRequestURI("http://proxy-host:123/foobar?baz") + req.SetHost(hExpected) + h := req.Host() + if string(h) != hExpected { + t.Fatalf("unexpected host set: %q. Expecting %q", h, hExpected) + } +} + +func TestRequestHostFromHeader(t *testing.T) { + hExpected := "foobar.com" + var req Request + req.Header.SetHost(hExpected) + h := req.Host() + if string(h) != hExpected { + t.Fatalf("unexpected host set: %q. Expecting %q", h, hExpected) + } +} + +func TestRequestContentTypeWithCharsetIssue100(t *testing.T) { + expectedContentType := "application/x-www-form-urlencoded; charset=UTF-8" + expectedBody := "0123=56789" + s := fmt.Sprintf("POST / HTTP/1.1\r\nContent-Type: %s\r\nContent-Length: %d\r\n\r\n%s", + expectedContentType, len(expectedBody), expectedBody) + + br := bufio.NewReader(bytes.NewBufferString(s)) + var r Request + if err := r.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + body := r.Body() + if string(body) != expectedBody { + t.Fatalf("unexpected body %q. Expecting %q", body, expectedBody) + } + ct := r.Header.ContentType() + if string(ct) != expectedContentType { + t.Fatalf("unexpected content-type %q. Expecting %q", ct, expectedContentType) + } + args := r.PostArgs() + if args.Len() != 1 { + t.Fatalf("unexpected number of POST args: %d. Expecting 1", args.Len()) + } + av := args.Peek("0123") + if string(av) != "56789" { + t.Fatalf("unexpected POST arg value: %q. Expecting %q", av, "56789") + } +} + +func TestRequestReadMultipartFormWithFile(t *testing.T) { + s := `POST /upload HTTP/1.1 +Host: localhost:10000 +Content-Length: 521 +Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryJwfATyF8tmxSJnLg + +------WebKitFormBoundaryJwfATyF8tmxSJnLg +Content-Disposition: form-data; name="f1" + +value1 +------WebKitFormBoundaryJwfATyF8tmxSJnLg +Content-Disposition: form-data; name="fileaaa"; filename="TODO" +Content-Type: application/octet-stream + +- SessionClient with referer and cookies support. +- Client with requests' pipelining support. +- ProxyHandler similar to FSHandler. +- WebSockets. See https://tools.ietf.org/html/rfc6455 . +- HTTP/2.0. See https://tools.ietf.org/html/rfc7540 . + +------WebKitFormBoundaryJwfATyF8tmxSJnLg-- +tailfoobar` + + br := bufio.NewReader(bytes.NewBufferString(s)) + + var r Request + if err := r.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + tail, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(tail) != "tailfoobar" { + t.Fatalf("unexpected tail %q. Expecting %q", tail, "tailfoobar") + } + + f, err := r.MultipartForm() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer r.RemoveMultipartFormFiles() + + // verify values + if len(f.Value) != 1 { + t.Fatalf("unexpected number of values in multipart form: %d. Expecting 1", len(f.Value)) + } + for k, vv := range f.Value { + if k != "f1" { + t.Fatalf("unexpected value name %q. Expecting %q", k, "f1") + } + if len(vv) != 1 { + t.Fatalf("unexpected number of values %d. Expecting 1", len(vv)) + } + v := vv[0] + if v != "value1" { + t.Fatalf("unexpected value %q. Expecting %q", v, "value1") + } + } + + // verify files + if len(f.File) != 1 { + t.Fatalf("unexpected number of file values in multipart form: %d. Expecting 1", len(f.File)) + } + for k, vv := range f.File { + if k != "fileaaa" { + t.Fatalf("unexpected file value name %q. Expecting %q", k, "fileaaa") + } + if len(vv) != 1 { + t.Fatalf("unexpected number of file values %d. Expecting 1", len(vv)) + } + v := vv[0] + if v.Filename != "TODO" { + t.Fatalf("unexpected filename %q. Expecting %q", v.Filename, "TODO") + } + ct := v.Header.Get("Content-Type") + if ct != "application/octet-stream" { + t.Fatalf("unexpected content-type %q. Expecting %q", ct, "application/octet-stream") + } + } +} + +func TestRequestRequestURI(t *testing.T) { + var r Request + + // Set request uri via SetRequestURI() + uri := "/foo/bar?baz" + r.SetRequestURI(uri) + if string(r.RequestURI()) != uri { + t.Fatalf("unexpected request uri %q. Expecting %q", r.RequestURI(), uri) + } + + // Set request uri via Request.URI().Update() + r.Reset() + uri = "/aa/bbb?ccc=sdfsdf" + r.URI().Update(uri) + if string(r.RequestURI()) != uri { + t.Fatalf("unexpected request uri %q. Expecting %q", r.RequestURI(), uri) + } + + // update query args in the request uri + qa := r.URI().QueryArgs() + qa.Reset() + qa.Set("foo", "bar") + uri = "/aa/bbb?foo=bar" + if string(r.RequestURI()) != uri { + t.Fatalf("unexpected request uri %q. Expecting %q", r.RequestURI(), uri) + } +} + +func TestRequestUpdateURI(t *testing.T) { + var r Request + r.Header.SetHost("aaa.bbb") + r.SetRequestURI("/lkjkl/kjl") + + // Modify request uri and host via URI() object and make sure + // the requestURI and Host header are properly updated + u := r.URI() + u.SetPath("/123/432.html") + u.SetHost("foobar.com") + a := u.QueryArgs() + a.Set("aaa", "bcse") + + s := r.String() + if !strings.HasPrefix(s, "GET /123/432.html?aaa=bcse") { + t.Fatalf("cannot find %q in %q", "GET /123/432.html?aaa=bcse", s) + } + if strings.Index(s, "\r\nHost: foobar.com\r\n") < 0 { + t.Fatalf("cannot find %q in %q", "\r\nHost: foobar.com\r\n", s) + } +} + +func TestRequestBodyStreamMultipleBodyCalls(t *testing.T) { + var r Request + + s := "foobar baz abc" + if r.IsBodyStream() { + t.Fatalf("IsBodyStream must return false") + } + r.SetBodyStream(bytes.NewBufferString(s), len(s)) + if !r.IsBodyStream() { + t.Fatalf("IsBodyStream must return true") + } + for i := 0; i < 10; i++ { + body := r.Body() + if string(body) != s { + t.Fatalf("unexpected body %q. Expecting %q. iteration %d", body, s, i) + } + } +} + +func TestResponseBodyStreamMultipleBodyCalls(t *testing.T) { + var r Response + + s := "foobar baz abc" + if r.IsBodyStream() { + t.Fatalf("IsBodyStream must return false") + } + r.SetBodyStream(bytes.NewBufferString(s), len(s)) + if !r.IsBodyStream() { + t.Fatalf("IsBodyStream must return true") + } + for i := 0; i < 10; i++ { + body := r.Body() + if string(body) != s { + t.Fatalf("unexpected body %q. Expecting %q. iteration %d", body, s, i) + } + } +} + +func TestRequestBodyWriteToPlain(t *testing.T) { + var r Request + + expectedS := "foobarbaz" + r.AppendBodyString(expectedS) + + testBodyWriteTo(t, &r, expectedS, true) +} + +func TestResponseBodyWriteToPlain(t *testing.T) { + var r Response + + expectedS := "foobarbaz" + r.AppendBodyString(expectedS) + + testBodyWriteTo(t, &r, expectedS, true) +} + +func TestResponseBodyWriteToStream(t *testing.T) { + var r Response + + expectedS := "aaabbbccc" + buf := bytes.NewBufferString(expectedS) + if r.IsBodyStream() { + t.Fatalf("IsBodyStream must return false") + } + r.SetBodyStream(buf, len(expectedS)) + if !r.IsBodyStream() { + t.Fatalf("IsBodyStream must return true") + } + + testBodyWriteTo(t, &r, expectedS, false) +} + +func TestRequestBodyWriteToMultipart(t *testing.T) { + expectedS := "--foobar\r\nContent-Disposition: form-data; name=\"key_0\"\r\n\r\nvalue_0\r\n--foobar--\r\n" + s := fmt.Sprintf("POST / HTTP/1.1\r\nHost: aaa\r\nContent-Type: multipart/form-data; boundary=foobar\r\nContent-Length: %d\r\n\r\n%s", + len(expectedS), expectedS) + + var r Request + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := r.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + testBodyWriteTo(t, &r, expectedS, true) +} + +type bodyWriterTo interface { + BodyWriteTo(io.Writer) error + Body() []byte +} + +func testBodyWriteTo(t *testing.T, bw bodyWriterTo, expectedS string, isRetainedBody bool) { + var buf ByteBuffer + if err := bw.BodyWriteTo(&buf); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + s := buf.B + if string(s) != expectedS { + t.Fatalf("unexpected result %q. Expecting %q", s, expectedS) + } + + body := bw.Body() + if isRetainedBody { + if string(body) != expectedS { + t.Fatalf("unexpected body %q. Expecting %q", body, expectedS) + } + } else { + if len(body) > 0 { + t.Fatalf("unexpected non-zero body after BodyWriteTo: %q", body) + } + } +} + +func TestRequestReadEOF(t *testing.T) { + var r Request + + br := bufio.NewReader(&bytes.Buffer{}) + err := r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err != io.EOF { + t.Fatalf("unexpected error: %s. Expecting %s", err, io.EOF) + } + + // incomplete request mustn't return io.EOF + br = bufio.NewReader(bytes.NewBufferString("POST / HTTP/1.1\r\nContent-Type: aa\r\nContent-Length: 1234\r\n\r\nIncomplete body")) + err = r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err == io.EOF { + t.Fatalf("expecting non-EOF error") + } +} + +func TestResponseReadEOF(t *testing.T) { + var r Response + + br := bufio.NewReader(&bytes.Buffer{}) + err := r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err != io.EOF { + t.Fatalf("unexpected error: %s. Expecting %s", err, io.EOF) + } + + // incomplete response mustn't return io.EOF + br = bufio.NewReader(bytes.NewBufferString("HTTP/1.1 200 OK\r\nContent-Type: aaa\r\nContent-Length: 123\r\n\r\nIncomplete body")) + err = r.Read(br) + if err == nil { + t.Fatalf("expecting error") + } + if err == io.EOF { + t.Fatalf("expecting non-EOF error") + } +} + +func TestResponseWriteTo(t *testing.T) { + var r Response + + r.SetBodyString("foobar") + + s := r.String() + var buf ByteBuffer + n, err := r.WriteTo(&buf) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if n != int64(len(s)) { + t.Fatalf("unexpected response length %d. Expecting %d", n, len(s)) + } + if string(buf.B) != s { + t.Fatalf("unexpected response %q. Expecting %q", buf.B, s) + } +} + +func TestRequestWriteTo(t *testing.T) { + var r Request + + r.SetRequestURI("http://foobar.com/aaa/bbb") + + s := r.String() + var buf ByteBuffer + n, err := r.WriteTo(&buf) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if n != int64(len(s)) { + t.Fatalf("unexpected request length %d. Expecting %d", n, len(s)) + } + if string(buf.B) != s { + t.Fatalf("unexpected request %q. Expecting %q", buf.B, s) + } +} + +func TestResponseSkipBody(t *testing.T) { + var r Response + + // set StatusNotModified + r.Header.SetStatusCode(StatusNotModified) + r.SetBodyString("foobar") + s := r.String() + if strings.Contains(s, "\r\n\r\nfoobar") { + t.Fatalf("unexpected non-zero body in response %q", s) + } + if strings.Contains(s, "Content-Length: ") { + t.Fatalf("unexpected content-length in response %q", s) + } + if strings.Contains(s, "Content-Type: ") { + t.Fatalf("unexpected content-type in response %q", s) + } + + // set StatusNoContent + r.Header.SetStatusCode(StatusNoContent) + r.SetBodyString("foobar") + s = r.String() + if strings.Contains(s, "\r\n\r\nfoobar") { + t.Fatalf("unexpected non-zero body in response %q", s) + } + if strings.Contains(s, "Content-Length: ") { + t.Fatalf("unexpected content-length in response %q", s) + } + if strings.Contains(s, "Content-Type: ") { + t.Fatalf("unexpected content-type in response %q", s) + } + + // explicitly skip body + r.Header.SetStatusCode(StatusOK) + r.SkipBody = true + r.SetBodyString("foobar") + s = r.String() + if strings.Contains(s, "\r\n\r\nfoobar") { + t.Fatalf("unexpected non-zero body in response %q", s) + } + if !strings.Contains(s, "Content-Length: 6\r\n") { + t.Fatalf("expecting content-length in response %q", s) + } + if !strings.Contains(s, "Content-Type: ") { + t.Fatalf("expecting content-type in response %q", s) + } +} + +func TestRequestNoContentLength(t *testing.T) { + var r Request + + r.Header.SetMethod("HEAD") + r.Header.SetHost("foobar") + + s := r.String() + if strings.Contains(s, "Content-Length: ") { + t.Fatalf("unexpected content-length in HEAD request %q", s) + } + + r.Header.SetMethod("POST") + fmt.Fprintf(r.BodyWriter(), "foobar body") + s = r.String() + if !strings.Contains(s, "Content-Length: ") { + t.Fatalf("missing content-length header in non-GET request %q", s) + } +} + +func TestRequestReadGzippedBody(t *testing.T) { + var r Request + + bodyOriginal := "foo bar baz compress me better!" + body := AppendGzipBytes(nil, []byte(bodyOriginal)) + s := fmt.Sprintf("POST /foobar HTTP/1.1\r\nContent-Type: foo/bar\r\nContent-Encoding: gzip\r\nContent-Length: %d\r\n\r\n%s", + len(body), body) + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := r.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if string(r.Header.Peek("Content-Encoding")) != "gzip" { + t.Fatalf("unexpected content-encoding: %q. Expecting %q", r.Header.Peek("Content-Encoding"), "gzip") + } + if r.Header.ContentLength() != len(body) { + t.Fatalf("unexpected content-length: %d. Expecting %d", r.Header.ContentLength(), len(body)) + } + if string(r.Body()) != string(body) { + t.Fatalf("unexpected body: %q. Expecting %q", r.Body(), body) + } + + bodyGunzipped, err := AppendGunzipBytes(nil, r.Body()) + if err != nil { + t.Fatalf("unexpected error when uncompressing data: %s", err) + } + if string(bodyGunzipped) != bodyOriginal { + t.Fatalf("unexpected uncompressed body %q. Expecting %q", bodyGunzipped, bodyOriginal) + } +} + +func TestRequestReadPostNoBody(t *testing.T) { + var r Request + + s := "POST /foo/bar HTTP/1.1\r\nContent-Type: aaa/bbb\r\n\r\naaaa" + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := r.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if string(r.Header.RequestURI()) != "/foo/bar" { + t.Fatalf("unexpected request uri %q. Expecting %q", r.Header.RequestURI(), "/foo/bar") + } + if string(r.Header.ContentType()) != "aaa/bbb" { + t.Fatalf("unexpected content-type %q. Expecting %q", r.Header.ContentType(), "aaa/bbb") + } + if len(r.Body()) != 0 { + t.Fatalf("unexpected body found %q. Expecting empty body", r.Body()) + } + if r.Header.ContentLength() != 0 { + t.Fatalf("unexpected content-length: %d. Expecting 0", r.Header.ContentLength()) + } + + tail, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(tail) != "aaaa" { + t.Fatalf("unexpected tail %q. Expecting %q", tail, "aaaa") + } +} + +func TestRequestContinueReadBody(t *testing.T) { + s := "PUT /foo/bar HTTP/1.1\r\nExpect: 100-continue\r\nContent-Length: 5\r\nContent-Type: foo/bar\r\n\r\nabcdef4343" + br := bufio.NewReader(bytes.NewBufferString(s)) + + var r Request + if err := r.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !r.MayContinue() { + t.Fatalf("MayContinue must return true") + } + + if err := r.ContinueReadBody(br, 0); err != nil { + t.Fatalf("error when reading request body: %s", err) + } + body := r.Body() + if string(body) != "abcde" { + t.Fatalf("unexpected body %q. Expecting %q", body, "abcde") + } + + tail, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(tail) != "f4343" { + t.Fatalf("unexpected tail %q. Expecting %q", tail, "f4343") + } +} + +func TestRequestMayContinue(t *testing.T) { + var r Request + if r.MayContinue() { + t.Fatalf("MayContinue on empty request must return false") + } + + r.Header.Set("Expect", "123sdfds") + if r.MayContinue() { + t.Fatalf("MayContinue on invalid Expect header must return false") + } + + r.Header.Set("Expect", "100-continue") + if !r.MayContinue() { + t.Fatalf("MayContinue on 'Expect: 100-continue' header must return true") + } +} + +func TestResponseGzipStream(t *testing.T) { + var r Response + if r.IsBodyStream() { + t.Fatalf("IsBodyStream must return false") + } + r.SetBodyStreamWriter(func(w *bufio.Writer) { + fmt.Fprintf(w, "foo") + w.Flush() + time.Sleep(time.Millisecond) + w.Write([]byte("barbaz")) + w.Flush() + time.Sleep(time.Millisecond) + fmt.Fprintf(w, "1234") + if err := w.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + }) + if !r.IsBodyStream() { + t.Fatalf("IsBodyStream must return true") + } + testResponseGzipExt(t, &r, "foobarbaz1234") +} + +func TestResponseDeflateStream(t *testing.T) { + var r Response + if r.IsBodyStream() { + t.Fatalf("IsBodyStream must return false") + } + r.SetBodyStreamWriter(func(w *bufio.Writer) { + w.Write([]byte("foo")) + w.Flush() + fmt.Fprintf(w, "barbaz") + w.Flush() + w.Write([]byte("1234")) + if err := w.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + }) + if !r.IsBodyStream() { + t.Fatalf("IsBodyStream must return true") + } + testResponseDeflateExt(t, &r, "foobarbaz1234") +} + +func TestResponseDeflate(t *testing.T) { + for _, s := range compressTestcases { + testResponseDeflate(t, s) + } +} + +func TestResponseGzip(t *testing.T) { + for _, s := range compressTestcases { + testResponseGzip(t, s) + } +} + +func testResponseDeflate(t *testing.T, s string) { + var r Response + r.SetBodyString(s) + testResponseDeflateExt(t, &r, s) + + // make sure the uncompressible Content-Type isn't compressed + r.Reset() + r.Header.SetContentType("image/jpeg") + r.SetBodyString(s) + testResponseDeflateExt(t, &r, s) +} + +func testResponseDeflateExt(t *testing.T, r *Response, s string) { + isCompressible := isCompressibleResponse(r, s) + + var buf bytes.Buffer + var err error + bw := bufio.NewWriter(&buf) + if err = r.WriteDeflate(bw); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err = bw.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + var r1 Response + br := bufio.NewReader(&buf) + if err = r1.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + ce := r1.Header.Peek("Content-Encoding") + var body []byte + if isCompressible { + if string(ce) != "deflate" { + t.Fatalf("unexpected Content-Encoding %q. Expecting %q. len(s)=%d, Content-Type: %q", + ce, "deflate", len(s), r.Header.ContentType()) + } + body, err = r1.BodyInflate() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + } else { + if len(ce) > 0 { + t.Fatalf("expecting empty Content-Encoding. Got %q", ce) + } + body = r1.Body() + } + if string(body) != s { + t.Fatalf("unexpected body %q. Expecting %q", body, s) + } +} + +func testResponseGzip(t *testing.T, s string) { + var r Response + r.SetBodyString(s) + testResponseGzipExt(t, &r, s) + + // make sure the uncompressible Content-Type isn't compressed + r.Reset() + r.Header.SetContentType("image/jpeg") + r.SetBodyString(s) + testResponseGzipExt(t, &r, s) +} + +func testResponseGzipExt(t *testing.T, r *Response, s string) { + isCompressible := isCompressibleResponse(r, s) + + var buf bytes.Buffer + var err error + bw := bufio.NewWriter(&buf) + if err = r.WriteGzip(bw); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err = bw.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + var r1 Response + br := bufio.NewReader(&buf) + if err = r1.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + ce := r1.Header.Peek("Content-Encoding") + var body []byte + if isCompressible { + if string(ce) != "gzip" { + t.Fatalf("unexpected Content-Encoding %q. Expecting %q. len(s)=%d, Content-Type: %q", + ce, "gzip", len(s), r.Header.ContentType()) + } + body, err = r1.BodyGunzip() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + } else { + if len(ce) > 0 { + t.Fatalf("Expecting empty Content-Encoding. Got %q", ce) + } + body = r1.Body() + } + if string(body) != s { + t.Fatalf("unexpected body %q. Expecting %q", body, s) + } +} + +func isCompressibleResponse(r *Response, s string) bool { + isCompressible := r.Header.isCompressibleContentType() + if isCompressible && len(s) < minCompressLen && !r.IsBodyStream() { + isCompressible = false + } + return isCompressible +} + +func TestRequestMultipartForm(t *testing.T) { + var w bytes.Buffer + mw := multipart.NewWriter(&w) + for i := 0; i < 10; i++ { + k := fmt.Sprintf("key_%d", i) + v := fmt.Sprintf("value_%d", i) + if err := mw.WriteField(k, v); err != nil { + t.Fatalf("unexpected error: %s", err) + } + } + boundary := mw.Boundary() + if err := mw.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + formData := w.Bytes() + for i := 0; i < 5; i++ { + formData = testRequestMultipartForm(t, boundary, formData, 10) + } + + // verify request unmarshalling / marshalling + s := "POST / HTTP/1.1\r\nHost: aaa\r\nContent-Type: multipart/form-data; boundary=foobar\r\nContent-Length: 213\r\n\r\n--foobar\r\nContent-Disposition: form-data; name=\"key_0\"\r\n\r\nvalue_0\r\n--foobar\r\nContent-Disposition: form-data; name=\"key_1\"\r\n\r\nvalue_1\r\n--foobar\r\nContent-Disposition: form-data; name=\"key_2\"\r\n\r\nvalue_2\r\n--foobar--\r\n" + + var req Request + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := req.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + s = req.String() + br = bufio.NewReader(bytes.NewBufferString(s)) + if err := req.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + testRequestMultipartForm(t, "foobar", req.Body(), 3) +} + +func testRequestMultipartForm(t *testing.T, boundary string, formData []byte, partsCount int) []byte { + s := fmt.Sprintf("POST / HTTP/1.1\r\nHost: aaa\r\nContent-Type: multipart/form-data; boundary=%s\r\nContent-Length: %d\r\n\r\n%s", + boundary, len(formData), formData) + + var req Request + + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + if err := req.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + f, err := req.MultipartForm() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer req.RemoveMultipartFormFiles() + + if len(f.File) > 0 { + t.Fatalf("unexpected files found in the multipart form: %d", len(f.File)) + } + + if len(f.Value) != partsCount { + t.Fatalf("unexpected number of values found: %d. Expecting %d", len(f.Value), partsCount) + } + + for k, vv := range f.Value { + if len(vv) != 1 { + t.Fatalf("unexpected number of values found for key=%q: %d. Expecting 1", k, len(vv)) + } + if !strings.HasPrefix(k, "key_") { + t.Fatalf("unexpected key prefix=%q. Expecting %q", k, "key_") + } + v := vv[0] + if !strings.HasPrefix(v, "value_") { + t.Fatalf("unexpected value prefix=%q. expecting %q", v, "value_") + } + if k[len("key_"):] != v[len("value_"):] { + t.Fatalf("key and value suffixes don't match: %q vs %q", k, v) + } + } + + return req.Body() +} + +func TestResponseReadLimitBody(t *testing.T) { + // response with content-length + testResponseReadLimitBodySuccess(t, "HTTP/1.1 200 OK\r\nContent-Type: aa\r\nContent-Length: 10\r\n\r\n9876543210", 10) + testResponseReadLimitBodySuccess(t, "HTTP/1.1 200 OK\r\nContent-Type: aa\r\nContent-Length: 10\r\n\r\n9876543210", 100) + testResponseReadLimitBodyError(t, "HTTP/1.1 200 OK\r\nContent-Type: aa\r\nContent-Length: 10\r\n\r\n9876543210", 9) + + // chunked response + testResponseReadLimitBodySuccess(t, "HTTP/1.1 200 OK\r\nContent-Type: aa\r\nTransfer-Encoding: chunked\r\n\r\n6\r\nfoobar\r\n3\r\nbaz\r\n0\r\n\r\n", 9) + testResponseReadLimitBodySuccess(t, "HTTP/1.1 200 OK\r\nContent-Type: aa\r\nTransfer-Encoding: chunked\r\n\r\n6\r\nfoobar\r\n3\r\nbaz\r\n0\r\n\r\n", 100) + testResponseReadLimitBodyError(t, "HTTP/1.1 200 OK\r\nContent-Type: aa\r\nTransfer-Encoding: chunked\r\n\r\n6\r\nfoobar\r\n3\r\nbaz\r\n0\r\n\r\n", 2) + + // identity response + testResponseReadLimitBodySuccess(t, "HTTP/1.1 400 OK\r\nContent-Type: aa\r\n\r\n123456", 6) + testResponseReadLimitBodySuccess(t, "HTTP/1.1 400 OK\r\nContent-Type: aa\r\n\r\n123456", 106) + testResponseReadLimitBodyError(t, "HTTP/1.1 400 OK\r\nContent-Type: aa\r\n\r\n123456", 5) +} + +func TestRequestReadLimitBody(t *testing.T) { + // request with content-length + testRequestReadLimitBodySuccess(t, "POST /foo HTTP/1.1\r\nHost: aaa.com\r\nContent-Length: 9\r\nContent-Type: aaa\r\n\r\n123456789", 9) + testRequestReadLimitBodySuccess(t, "POST /foo HTTP/1.1\r\nHost: aaa.com\r\nContent-Length: 9\r\nContent-Type: aaa\r\n\r\n123456789", 92) + testRequestReadLimitBodyError(t, "POST /foo HTTP/1.1\r\nHost: aaa.com\r\nContent-Length: 9\r\nContent-Type: aaa\r\n\r\n123456789", 5) + + // chunked request + testRequestReadLimitBodySuccess(t, "POST /a HTTP/1.1\r\nHost: a.com\r\nTransfer-Encoding: chunked\r\nContent-Type: aa\r\n\r\n6\r\nfoobar\r\n3\r\nbaz\r\n0\r\n\r\n", 9) + testRequestReadLimitBodySuccess(t, "POST /a HTTP/1.1\r\nHost: a.com\r\nTransfer-Encoding: chunked\r\nContent-Type: aa\r\n\r\n6\r\nfoobar\r\n3\r\nbaz\r\n0\r\n\r\n", 999) + testRequestReadLimitBodyError(t, "POST /a HTTP/1.1\r\nHost: a.com\r\nTransfer-Encoding: chunked\r\nContent-Type: aa\r\n\r\n6\r\nfoobar\r\n3\r\nbaz\r\n0\r\n\r\n", 8) +} + +func testResponseReadLimitBodyError(t *testing.T, s string, maxBodySize int) { + var req Response + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + err := req.ReadLimitBody(br, maxBodySize) + if err == nil { + t.Fatalf("expecting error. s=%q, maxBodySize=%d", s, maxBodySize) + } + if err != ErrBodyTooLarge { + t.Fatalf("unexpected error: %s. Expecting %s. s=%q, maxBodySize=%d", err, ErrBodyTooLarge, s, maxBodySize) + } +} + +func testResponseReadLimitBodySuccess(t *testing.T, s string, maxBodySize int) { + var req Response + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + if err := req.ReadLimitBody(br, maxBodySize); err != nil { + t.Fatalf("unexpected error: %s. s=%q, maxBodySize=%d", err, s, maxBodySize) + } +} + +func testRequestReadLimitBodyError(t *testing.T, s string, maxBodySize int) { + var req Request + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + err := req.ReadLimitBody(br, maxBodySize) + if err == nil { + t.Fatalf("expecting error. s=%q, maxBodySize=%d", s, maxBodySize) + } + if err != ErrBodyTooLarge { + t.Fatalf("unexpected error: %s. Expecting %s. s=%q, maxBodySize=%d", err, ErrBodyTooLarge, s, maxBodySize) + } +} + +func testRequestReadLimitBodySuccess(t *testing.T, s string, maxBodySize int) { + var req Request + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + if err := req.ReadLimitBody(br, maxBodySize); err != nil { + t.Fatalf("unexpected error: %s. s=%q, maxBodySize=%d", err, s, maxBodySize) + } +} + +func TestRequestString(t *testing.T) { + var r Request + r.SetRequestURI("http://foobar.com/aaa") + s := r.String() + expectedS := "GET /aaa HTTP/1.1\r\nUser-Agent: fasthttp\r\nHost: foobar.com\r\n\r\n" + if s != expectedS { + t.Fatalf("unexpected request: %q. Expecting %q", s, expectedS) + } +} + +func TestRequestBodyWriter(t *testing.T) { + var r Request + w := r.BodyWriter() + for i := 0; i < 10; i++ { + fmt.Fprintf(w, "%d", i) + } + if string(r.Body()) != "0123456789" { + t.Fatalf("unexpected body %q. Expecting %q", r.Body(), "0123456789") + } +} + +func TestResponseBodyWriter(t *testing.T) { + var r Response + w := r.BodyWriter() + for i := 0; i < 10; i++ { + fmt.Fprintf(w, "%d", i) + } + if string(r.Body()) != "0123456789" { + t.Fatalf("unexpected body %q. Expecting %q", r.Body(), "0123456789") + } +} + +func TestRequestWriteRequestURINoHost(t *testing.T) { + var req Request + req.Header.SetRequestURI("http://google.com/foo/bar?baz=aaa") + var w bytes.Buffer + bw := bufio.NewWriter(&w) + if err := req.Write(bw); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("unexepcted error: %s", err) + } + + var req1 Request + br := bufio.NewReader(&w) + if err := req1.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(req1.Header.Host()) != "google.com" { + t.Fatalf("unexpected host: %q. Expecting %q", req1.Header.Host(), "google.com") + } + if string(req.Header.RequestURI()) != "/foo/bar?baz=aaa" { + t.Fatalf("unexpected requestURI: %q. Expecting %q", req.Header.RequestURI(), "/foo/bar?baz=aaa") + } + + // verify that Request.Write returns error on non-absolute RequestURI + req.Reset() + req.Header.SetRequestURI("/foo/bar") + w.Reset() + bw.Reset(&w) + if err := req.Write(bw); err == nil { + t.Fatalf("expecting error") + } +} + +func TestSetRequestBodyStreamFixedSize(t *testing.T) { + testSetRequestBodyStream(t, "a", false) + testSetRequestBodyStream(t, string(createFixedBody(4097)), false) + testSetRequestBodyStream(t, string(createFixedBody(100500)), false) +} + +func TestSetResponseBodyStreamFixedSize(t *testing.T) { + testSetResponseBodyStream(t, "a", false) + testSetResponseBodyStream(t, string(createFixedBody(4097)), false) + testSetResponseBodyStream(t, string(createFixedBody(100500)), false) +} + +func TestSetRequestBodyStreamChunked(t *testing.T) { + testSetRequestBodyStream(t, "", true) + + body := "foobar baz aaa bbb ccc" + testSetRequestBodyStream(t, body, true) + + body = string(createFixedBody(10001)) + testSetRequestBodyStream(t, body, true) +} + +func TestSetResponseBodyStreamChunked(t *testing.T) { + testSetResponseBodyStream(t, "", true) + + body := "foobar baz aaa bbb ccc" + testSetResponseBodyStream(t, body, true) + + body = string(createFixedBody(10001)) + testSetResponseBodyStream(t, body, true) +} + +func testSetRequestBodyStream(t *testing.T, body string, chunked bool) { + var req Request + req.Header.SetHost("foobar.com") + req.Header.SetMethod("POST") + + bodySize := len(body) + if chunked { + bodySize = -1 + } + if req.IsBodyStream() { + t.Fatalf("IsBodyStream must return false") + } + req.SetBodyStream(bytes.NewBufferString(body), bodySize) + if !req.IsBodyStream() { + t.Fatalf("IsBodyStream must return true") + } + + var w bytes.Buffer + bw := bufio.NewWriter(&w) + if err := req.Write(bw); err != nil { + t.Fatalf("unexpected error when writing request: %s. body=%q", err, body) + } + if err := bw.Flush(); err != nil { + t.Fatalf("unexpected error when flushing request: %s. body=%q", err, body) + } + + var req1 Request + br := bufio.NewReader(&w) + if err := req1.Read(br); err != nil { + t.Fatalf("unexpected error when reading request: %s. body=%q", err, body) + } + if string(req1.Body()) != body { + t.Fatalf("unexpected body %q. Expecting %q", req1.Body(), body) + } +} + +func testSetResponseBodyStream(t *testing.T, body string, chunked bool) { + var resp Response + bodySize := len(body) + if chunked { + bodySize = -1 + } + if resp.IsBodyStream() { + t.Fatalf("IsBodyStream must return false") + } + resp.SetBodyStream(bytes.NewBufferString(body), bodySize) + if !resp.IsBodyStream() { + t.Fatalf("IsBodyStream must return true") + } + + var w bytes.Buffer + bw := bufio.NewWriter(&w) + if err := resp.Write(bw); err != nil { + t.Fatalf("unexpected error when writing response: %s. body=%q", err, body) + } + if err := bw.Flush(); err != nil { + t.Fatalf("unexpected error when flushing response: %s. body=%q", err, body) + } + + var resp1 Response + br := bufio.NewReader(&w) + if err := resp1.Read(br); err != nil { + t.Fatalf("unexpected error when reading response: %s. body=%q", err, body) + } + if string(resp1.Body()) != body { + t.Fatalf("unexpected body %q. Expecting %q", resp1.Body(), body) + } +} + +func TestRound2(t *testing.T) { + testRound2(t, 0, 0) + testRound2(t, 1, 1) + testRound2(t, 2, 2) + testRound2(t, 3, 4) + testRound2(t, 4, 4) + testRound2(t, 5, 8) + testRound2(t, 7, 8) + testRound2(t, 8, 8) + testRound2(t, 9, 16) + testRound2(t, 0x10001, 0x20000) +} + +func testRound2(t *testing.T, n, expectedRound2 int) { + if round2(n) != expectedRound2 { + t.Fatalf("Unexpected round2(%d)=%d. Expected %d", n, round2(n), expectedRound2) + } +} + +func TestRequestReadChunked(t *testing.T) { + var req Request + + s := "POST /foo HTTP/1.1\r\nHost: google.com\r\nTransfer-Encoding: chunked\r\nContent-Type: aa/bb\r\n\r\n3\r\nabc\r\n5\r\n12345\r\n0\r\n\r\ntrail" + r := bytes.NewBufferString(s) + rb := bufio.NewReader(r) + err := req.Read(rb) + if err != nil { + t.Fatalf("Unexpected error when reading chunked request: %s", err) + } + expectedBody := "abc12345" + if string(req.Body()) != expectedBody { + t.Fatalf("Unexpected body %q. Expected %q", req.Body(), expectedBody) + } + verifyRequestHeader(t, &req.Header, 8, "/foo", "google.com", "", "aa/bb") + verifyTrailer(t, rb, "trail") +} + +func TestResponseReadWithoutBody(t *testing.T) { + var resp Response + + testResponseReadWithoutBody(t, &resp, "HTTP/1.1 304 Not Modified\r\nContent-Type: aa\r\nContent-Length: 1235\r\n\r\nfoobar", false, + 304, 1235, "aa", "foobar") + + testResponseReadWithoutBody(t, &resp, "HTTP/1.1 204 Foo Bar\r\nContent-Type: aab\r\nTransfer-Encoding: chunked\r\n\r\n123\r\nss", false, + 204, -1, "aab", "123\r\nss") + + testResponseReadWithoutBody(t, &resp, "HTTP/1.1 123 AAA\r\nContent-Type: xxx\r\nContent-Length: 3434\r\n\r\naaaa", false, + 123, 3434, "xxx", "aaaa") + + testResponseReadWithoutBody(t, &resp, "HTTP 200 OK\r\nContent-Type: text/xml\r\nContent-Length: 123\r\n\r\nxxxx", true, + 200, 123, "text/xml", "xxxx") + + // '100 Continue' must be skipped. + testResponseReadWithoutBody(t, &resp, "HTTP/1.1 100 Continue\r\nFoo-bar: baz\r\n\r\nHTTP/1.1 329 aaa\r\nContent-Type: qwe\r\nContent-Length: 894\r\n\r\nfoobar", true, + 329, 894, "qwe", "foobar") +} + +func testResponseReadWithoutBody(t *testing.T, resp *Response, s string, skipBody bool, + expectedStatusCode, expectedContentLength int, expectedContentType, expectedTrailer string) { + r := bytes.NewBufferString(s) + rb := bufio.NewReader(r) + resp.SkipBody = skipBody + err := resp.Read(rb) + if err != nil { + t.Fatalf("Unexpected error when reading response without body: %s. response=%q", err, s) + } + if len(resp.Body()) != 0 { + t.Fatalf("Unexpected response body %q. Expected %q. response=%q", resp.Body(), "", s) + } + verifyResponseHeader(t, &resp.Header, expectedStatusCode, expectedContentLength, expectedContentType) + verifyTrailer(t, rb, expectedTrailer) + + // verify that ordinal response is read after null-body response + resp.SkipBody = false + testResponseReadSuccess(t, resp, "HTTP/1.1 300 OK\r\nContent-Length: 5\r\nContent-Type: bar\r\n\r\n56789aaa", + 300, 5, "bar", "56789", "aaa") +} + +func TestRequestSuccess(t *testing.T) { + // empty method, user-agent and body + testRequestSuccess(t, "", "/foo/bar", "google.com", "", "", "GET") + + // non-empty user-agent + testRequestSuccess(t, "GET", "/foo/bar", "google.com", "MSIE", "", "GET") + + // non-empty method + testRequestSuccess(t, "HEAD", "/aaa", "fobar", "", "", "HEAD") + + // POST method with body + testRequestSuccess(t, "POST", "/bbb", "aaa.com", "Chrome aaa", "post body", "POST") + + // PUT method with body + testRequestSuccess(t, "PUT", "/aa/bb", "a.com", "ome aaa", "put body", "PUT") + + // only host is set + testRequestSuccess(t, "", "", "gooble.com", "", "", "GET") +} + +func TestResponseSuccess(t *testing.T) { + // 200 response + testResponseSuccess(t, 200, "test/plain", "server", "foobar", + 200, "test/plain", "server") + + // response with missing statusCode + testResponseSuccess(t, 0, "text/plain", "server", "foobar", + 200, "text/plain", "server") + + // response with missing server + testResponseSuccess(t, 500, "aaa", "", "aaadfsd", + 500, "aaa", "") + + // empty body + testResponseSuccess(t, 200, "bbb", "qwer", "", + 200, "bbb", "qwer") + + // missing content-type + testResponseSuccess(t, 200, "", "asdfsd", "asdf", + 200, string(defaultContentType), "asdfsd") +} + +func testResponseSuccess(t *testing.T, statusCode int, contentType, serverName, body string, + expectedStatusCode int, expectedContentType, expectedServerName string) { + var resp Response + resp.SetStatusCode(statusCode) + resp.Header.Set("Content-Type", contentType) + resp.Header.Set("Server", serverName) + resp.SetBody([]byte(body)) + + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + err := resp.Write(bw) + if err != nil { + t.Fatalf("Unexpected error when calling Response.Write(): %s", err) + } + if err = bw.Flush(); err != nil { + t.Fatalf("Unexpected error when flushing bufio.Writer: %s", err) + } + + var resp1 Response + br := bufio.NewReader(w) + if err = resp1.Read(br); err != nil { + t.Fatalf("Unexpected error when calling Response.Read(): %s", err) + } + if resp1.StatusCode() != expectedStatusCode { + t.Fatalf("Unexpected status code: %d. Expected %d", resp1.StatusCode(), expectedStatusCode) + } + if resp1.Header.ContentLength() != len(body) { + t.Fatalf("Unexpected content-length: %d. Expected %d", resp1.Header.ContentLength(), len(body)) + } + if string(resp1.Header.Peek("Content-Type")) != expectedContentType { + t.Fatalf("Unexpected content-type: %q. Expected %q", resp1.Header.Peek("Content-Type"), expectedContentType) + } + if string(resp1.Header.Peek("Server")) != expectedServerName { + t.Fatalf("Unexpected server: %q. Expected %q", resp1.Header.Peek("Server"), expectedServerName) + } + if !bytes.Equal(resp1.Body(), []byte(body)) { + t.Fatalf("Unexpected body: %q. Expected %q", resp1.Body(), body) + } +} + +func TestRequestWriteError(t *testing.T) { + // no host + testRequestWriteError(t, "", "/foo/bar", "", "", "") + + // get with body + testRequestWriteError(t, "GET", "/foo/bar", "aaa.com", "", "foobar") +} + +func testRequestWriteError(t *testing.T, method, requestURI, host, userAgent, body string) { + var req Request + + req.Header.SetMethod(method) + req.Header.SetRequestURI(requestURI) + req.Header.Set("Host", host) + req.Header.Set("User-Agent", userAgent) + req.SetBody([]byte(body)) + + w := &ByteBuffer{} + bw := bufio.NewWriter(w) + err := req.Write(bw) + if err == nil { + t.Fatalf("Expecting error when writing request=%#v", &req) + } +} + +func testRequestSuccess(t *testing.T, method, requestURI, host, userAgent, body, expectedMethod string) { + var req Request + + req.Header.SetMethod(method) + req.Header.SetRequestURI(requestURI) + req.Header.Set("Host", host) + req.Header.Set("User-Agent", userAgent) + req.SetBody([]byte(body)) + + contentType := "foobar" + if method == "POST" { + req.Header.Set("Content-Type", contentType) + } + + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + err := req.Write(bw) + if err != nil { + t.Fatalf("Unexpected error when calling Request.Write(): %s", err) + } + if err = bw.Flush(); err != nil { + t.Fatalf("Unexpected error when flushing bufio.Writer: %s", err) + } + + var req1 Request + br := bufio.NewReader(w) + if err = req1.Read(br); err != nil { + t.Fatalf("Unexpected error when calling Request.Read(): %s", err) + } + if string(req1.Header.Method()) != expectedMethod { + t.Fatalf("Unexpected method: %q. Expected %q", req1.Header.Method(), expectedMethod) + } + if len(requestURI) == 0 { + requestURI = "/" + } + if string(req1.Header.RequestURI()) != requestURI { + t.Fatalf("Unexpected RequestURI: %q. Expected %q", req1.Header.RequestURI(), requestURI) + } + if string(req1.Header.Peek("Host")) != host { + t.Fatalf("Unexpected host: %q. Expected %q", req1.Header.Peek("Host"), host) + } + if len(userAgent) == 0 { + userAgent = string(defaultUserAgent) + } + if string(req1.Header.Peek("User-Agent")) != userAgent { + t.Fatalf("Unexpected user-agent: %q. Expected %q", req1.Header.Peek("User-Agent"), userAgent) + } + if !bytes.Equal(req1.Body(), []byte(body)) { + t.Fatalf("Unexpected body: %q. Expected %q", req1.Body(), body) + } + + if method == "POST" && string(req1.Header.Peek("Content-Type")) != contentType { + t.Fatalf("Unexpected content-type: %q. Expected %q", req1.Header.Peek("Content-Type"), contentType) + } +} + +func TestResponseReadSuccess(t *testing.T) { + resp := &Response{} + + // usual response + testResponseReadSuccess(t, resp, "HTTP/1.1 200 OK\r\nContent-Length: 10\r\nContent-Type: foo/bar\r\n\r\n0123456789", + 200, 10, "foo/bar", "0123456789", "") + + // zero response + testResponseReadSuccess(t, resp, "HTTP/1.1 500 OK\r\nContent-Length: 0\r\nContent-Type: foo/bar\r\n\r\n", + 500, 0, "foo/bar", "", "") + + // response with trailer + testResponseReadSuccess(t, resp, "HTTP/1.1 300 OK\r\nContent-Length: 5\r\nContent-Type: bar\r\n\r\n56789aaa", + 300, 5, "bar", "56789", "aaa") + + // no conent-length ('identity' transfer-encoding) + testResponseReadSuccess(t, resp, "HTTP/1.1 200 OK\r\nContent-Type: foobar\r\n\r\nzxxc", + 200, 4, "foobar", "zxxc", "") + + // explicitly stated 'Transfer-Encoding: identity' + testResponseReadSuccess(t, resp, "HTTP/1.1 234 ss\r\nContent-Type: xxx\r\n\r\nxag", + 234, 3, "xxx", "xag", "") + + // big 'identity' response + body := string(createFixedBody(100500)) + testResponseReadSuccess(t, resp, "HTTP/1.1 200 OK\r\nContent-Type: aa\r\n\r\n"+body, + 200, 100500, "aa", body, "") + + // chunked response + testResponseReadSuccess(t, resp, "HTTP/1.1 200 OK\r\nContent-Type: text/html\r\nTransfer-Encoding: chunked\r\n\r\n4\r\nqwer\r\n2\r\nty\r\n0\r\n\r\nzzzzz", + 200, 6, "text/html", "qwerty", "zzzzz") + + // chunked response with non-chunked Transfer-Encoding. + testResponseReadSuccess(t, resp, "HTTP/1.1 230 OK\r\nContent-Type: text\r\nTransfer-Encoding: aaabbb\r\n\r\n2\r\ner\r\n2\r\nty\r\n0\r\n\r\nwe", + 230, 4, "text", "erty", "we") + + // zero chunked response + testResponseReadSuccess(t, resp, "HTTP/1.1 200 OK\r\nContent-Type: text/html\r\nTransfer-Encoding: chunked\r\n\r\n0\r\n\r\nzzz", + 200, 0, "text/html", "", "zzz") +} + +func TestResponseReadError(t *testing.T) { + resp := &Response{} + + // empty response + testResponseReadError(t, resp, "") + + // invalid header + testResponseReadError(t, resp, "foobar") + + // empty body + testResponseReadError(t, resp, "HTTP/1.1 200 OK\r\nContent-Type: aaa\r\nContent-Length: 1234\r\n\r\n") + + // short body + testResponseReadError(t, resp, "HTTP/1.1 200 OK\r\nContent-Type: aaa\r\nContent-Length: 1234\r\n\r\nshort") +} + +func testResponseReadError(t *testing.T, resp *Response, response string) { + r := bytes.NewBufferString(response) + rb := bufio.NewReader(r) + err := resp.Read(rb) + if err == nil { + t.Fatalf("Expecting error for response=%q", response) + } + + testResponseReadSuccess(t, resp, "HTTP/1.1 303 Redisred sedfs sdf\r\nContent-Type: aaa\r\nContent-Length: 5\r\n\r\nHELLOaaa", + 303, 5, "aaa", "HELLO", "aaa") +} + +func testResponseReadSuccess(t *testing.T, resp *Response, response string, expectedStatusCode, expectedContentLength int, + expectedContenType, expectedBody, expectedTrailer string) { + + r := bytes.NewBufferString(response) + rb := bufio.NewReader(r) + err := resp.Read(rb) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + + verifyResponseHeader(t, &resp.Header, expectedStatusCode, expectedContentLength, expectedContenType) + if !bytes.Equal(resp.Body(), []byte(expectedBody)) { + t.Fatalf("Unexpected body %q. Expected %q", resp.Body(), []byte(expectedBody)) + } + verifyTrailer(t, rb, expectedTrailer) +} + +func TestReadBodyFixedSize(t *testing.T) { + var b []byte + + // zero-size body + testReadBodyFixedSize(t, b, 0) + + // small-size body + testReadBodyFixedSize(t, b, 3) + + // medium-size body + testReadBodyFixedSize(t, b, 1024) + + // large-size body + testReadBodyFixedSize(t, b, 1024*1024) + + // smaller body after big one + testReadBodyFixedSize(t, b, 34345) +} + +func TestReadBodyChunked(t *testing.T) { + var b []byte + + // zero-size body + testReadBodyChunked(t, b, 0) + + // small-size body + testReadBodyChunked(t, b, 5) + + // medium-size body + testReadBodyChunked(t, b, 43488) + + // big body + testReadBodyChunked(t, b, 3*1024*1024) + + // smaler body after big one + testReadBodyChunked(t, b, 12343) +} + +func TestRequestURITLS(t *testing.T) { + uriNoScheme := "//foobar.com/baz/aa?bb=dd&dd#sdf" + requestURI := "http:" + uriNoScheme + requestURITLS := "https:" + uriNoScheme + + var req Request + + req.isTLS = true + req.SetRequestURI(requestURI) + uri := req.URI().String() + if uri != requestURITLS { + t.Fatalf("unexpected request uri: %q. Expecting %q", uri, requestURITLS) + } + + req.Reset() + req.SetRequestURI(requestURI) + uri = req.URI().String() + if uri != requestURI { + t.Fatalf("unexpected request uri: %q. Expecting %q", uri, requestURI) + } +} + +func TestRequestURI(t *testing.T) { + host := "foobar.com" + requestURI := "/aaa/bb+b%20d?ccc=ddd&qqq#1334dfds&=d" + expectedPathOriginal := "/aaa/bb+b%20d" + expectedPath := "/aaa/bb+b d" + expectedQueryString := "ccc=ddd&qqq" + expectedHash := "1334dfds&=d" + + var req Request + req.Header.Set("Host", host) + req.Header.SetRequestURI(requestURI) + + uri := req.URI() + if string(uri.Host()) != host { + t.Fatalf("Unexpected host %q. Expected %q", uri.Host(), host) + } + if string(uri.PathOriginal()) != expectedPathOriginal { + t.Fatalf("Unexpected source path %q. Expected %q", uri.PathOriginal(), expectedPathOriginal) + } + if string(uri.Path()) != expectedPath { + t.Fatalf("Unexpected path %q. Expected %q", uri.Path(), expectedPath) + } + if string(uri.QueryString()) != expectedQueryString { + t.Fatalf("Unexpected query string %q. Expected %q", uri.QueryString(), expectedQueryString) + } + if string(uri.Hash()) != expectedHash { + t.Fatalf("Unexpected hash %q. Expected %q", uri.Hash(), expectedHash) + } +} + +func TestRequestPostArgsSuccess(t *testing.T) { + var req Request + + testRequestPostArgsSuccess(t, &req, "POST / HTTP/1.1\r\nHost: aaa.com\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: 0\r\n\r\n", 0, "foo=", "=") + + testRequestPostArgsSuccess(t, &req, "POST / HTTP/1.1\r\nHost: aaa.com\r\nContent-Type: application/x-www-form-urlencoded\r\nContent-Length: 18\r\n\r\nfoo&b%20r=b+z=&qwe", 3, "foo=", "b r=b z=", "qwe=") +} + +func TestRequestPostArgsError(t *testing.T) { + var req Request + + // non-post + testRequestPostArgsError(t, &req, "GET /aa HTTP/1.1\r\nHost: aaa\r\n\r\n") + + // invalid content-type + testRequestPostArgsError(t, &req, "POST /aa HTTP/1.1\r\nHost: aaa\r\nContent-Type: text/html\r\nContent-Length: 5\r\n\r\nabcde") +} + +func testRequestPostArgsError(t *testing.T, req *Request, s string) { + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + err := req.Read(br) + if err != nil { + t.Fatalf("Unexpected error when reading %q: %s", s, err) + } + ss := req.PostArgs().String() + if len(ss) != 0 { + t.Fatalf("unexpected post args: %q. Expecting empty post args", ss) + } +} + +func testRequestPostArgsSuccess(t *testing.T, req *Request, s string, expectedArgsLen int, expectedArgs ...string) { + r := bytes.NewBufferString(s) + br := bufio.NewReader(r) + err := req.Read(br) + if err != nil { + t.Fatalf("Unexpected error when reading %q: %s", s, err) + } + + args := req.PostArgs() + if args.Len() != expectedArgsLen { + t.Fatalf("Unexpected args len %d. Expected %d for %q", args.Len(), expectedArgsLen, s) + } + for _, x := range expectedArgs { + tmp := strings.SplitN(x, "=", 2) + k := tmp[0] + v := tmp[1] + vv := string(args.Peek(k)) + if vv != v { + t.Fatalf("Unexpected value for key %q: %q. Expected %q for %q", k, vv, v, s) + } + } +} + +func testReadBodyChunked(t *testing.T, b []byte, bodySize int) { + body := createFixedBody(bodySize) + chunkedBody := createChunkedBody(body) + expectedTrailer := []byte("chunked shit") + chunkedBody = append(chunkedBody, expectedTrailer...) + + r := bytes.NewBuffer(chunkedBody) + br := bufio.NewReader(r) + b, err := readBody(br, -1, 0, nil) + if err != nil { + t.Fatalf("Unexpected error for bodySize=%d: %s. body=%q, chunkedBody=%q", bodySize, err, body, chunkedBody) + } + if !bytes.Equal(b, body) { + t.Fatalf("Unexpected response read for bodySize=%d: %q. Expected %q. chunkedBody=%q", bodySize, b, body, chunkedBody) + } + verifyTrailer(t, br, string(expectedTrailer)) +} + +func testReadBodyFixedSize(t *testing.T, b []byte, bodySize int) { + body := createFixedBody(bodySize) + expectedTrailer := []byte("traler aaaa") + bodyWithTrailer := append(body, expectedTrailer...) + + r := bytes.NewBuffer(bodyWithTrailer) + br := bufio.NewReader(r) + b, err := readBody(br, bodySize, 0, nil) + if err != nil { + t.Fatalf("Unexpected error in ReadResponseBody(%d): %s", bodySize, err) + } + if !bytes.Equal(b, body) { + t.Fatalf("Unexpected response read for bodySize=%d: %q. Expected %q", bodySize, b, body) + } + verifyTrailer(t, br, string(expectedTrailer)) +} + +func createFixedBody(bodySize int) []byte { + var b []byte + for i := 0; i < bodySize; i++ { + b = append(b, byte(i%10)+'0') + } + return b +} + +func createChunkedBody(body []byte) []byte { + var b []byte + chunkSize := 1 + for len(body) > 0 { + if chunkSize > len(body) { + chunkSize = len(body) + } + b = append(b, []byte(fmt.Sprintf("%x\r\n", chunkSize))...) + b = append(b, body[:chunkSize]...) + b = append(b, []byte("\r\n")...) + body = body[chunkSize:] + chunkSize++ + } + return append(b, []byte("0\r\n\r\n")...) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/lbclient.go b/vendor/github.com/erikdubbelboer/fasthttp/lbclient.go new file mode 100644 index 0000000..12418b6 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/lbclient.go @@ -0,0 +1,183 @@ +package fasthttp + +import ( + "sync" + "sync/atomic" + "time" +) + +// BalancingClient is the interface for clients, which may be passed +// to LBClient.Clients. +type BalancingClient interface { + DoDeadline(req *Request, resp *Response, deadline time.Time) error + PendingRequests() int +} + +// LBClient balances requests among available LBClient.Clients. +// +// It has the following features: +// +// - Balances load among available clients using 'least loaded' + 'round robin' +// hybrid technique. +// - Dynamically decreases load on unhealthy clients. +// +// It is forbidden copying LBClient instances. Create new instances instead. +// +// It is safe calling LBClient methods from concurrently running goroutines. +type LBClient struct { + noCopy noCopy + + // Clients must contain non-zero clients list. + // Incoming requests are balanced among these clients. + Clients []BalancingClient + + // HealthCheck is a callback called after each request. + // + // The request, response and the error returned by the client + // is passed to HealthCheck, so the callback may determine whether + // the client is healthy. + // + // Load on the current client is decreased if HealthCheck returns false. + // + // By default HealthCheck returns false if err != nil. + HealthCheck func(req *Request, resp *Response, err error) bool + + // Timeout is the request timeout used when calling LBClient.Do. + // + // DefaultLBClientTimeout is used by default. + Timeout time.Duration + + cs []*lbClient + + // nextIdx is for spreading requests among equally loaded clients + // in a round-robin fashion. + nextIdx uint32 + + once sync.Once +} + +// DefaultLBClientTimeout is the default request timeout used by LBClient +// when calling LBClient.Do. +// +// The timeout may be overridden via LBClient.Timeout. +const DefaultLBClientTimeout = time.Second + +// DoDeadline calls DoDeadline on the least loaded client +func (cc *LBClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return cc.get().DoDeadline(req, resp, deadline) +} + +// DoTimeout calculates deadline and calls DoDeadline on the least loaded client +func (cc *LBClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + return cc.get().DoDeadline(req, resp, deadline) +} + +// Do calls calculates deadline using LBClient.Timeout and calls DoDeadline +// on the least loaded client. +func (cc *LBClient) Do(req *Request, resp *Response) error { + timeout := cc.Timeout + if timeout <= 0 { + timeout = DefaultLBClientTimeout + } + return cc.DoTimeout(req, resp, timeout) +} + +func (cc *LBClient) init() { + if len(cc.Clients) == 0 { + panic("BUG: LBClient.Clients cannot be empty") + } + for _, c := range cc.Clients { + cc.cs = append(cc.cs, &lbClient{ + c: c, + healthCheck: cc.HealthCheck, + }) + } + + // Randomize nextIdx in order to prevent initial servers' + // hammering from a cluster of identical LBClients. + cc.nextIdx = uint32(time.Now().UnixNano()) +} + +func (cc *LBClient) get() *lbClient { + cc.once.Do(cc.init) + + cs := cc.cs + idx := atomic.AddUint32(&cc.nextIdx, 1) + idx %= uint32(len(cs)) + + minC := cs[idx] + minN := minC.PendingRequests() + if minN == 0 { + return minC + } + for _, c := range cs[idx+1:] { + n := c.PendingRequests() + if n == 0 { + return c + } + if n < minN { + minC = c + minN = n + } + } + for _, c := range cs[:idx] { + n := c.PendingRequests() + if n == 0 { + return c + } + if n < minN { + minC = c + minN = n + } + } + return minC +} + +type lbClient struct { + c BalancingClient + healthCheck func(req *Request, resp *Response, err error) bool + penalty uint32 +} + +func (c *lbClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + err := c.c.DoDeadline(req, resp, deadline) + if !c.isHealthy(req, resp, err) && c.incPenalty() { + // Penalize the client returning error, so the next requests + // are routed to another clients. + time.AfterFunc(penaltyDuration, c.decPenalty) + } + return err +} + +func (c *lbClient) PendingRequests() int { + n := c.c.PendingRequests() + m := atomic.LoadUint32(&c.penalty) + return n + int(m) +} + +func (c *lbClient) isHealthy(req *Request, resp *Response, err error) bool { + if c.healthCheck == nil { + return err == nil + } + return c.healthCheck(req, resp, err) +} + +func (c *lbClient) incPenalty() bool { + m := atomic.AddUint32(&c.penalty, 1) + if m > maxPenalty { + c.decPenalty() + return false + } + return true +} + +func (c *lbClient) decPenalty() { + atomic.AddUint32(&c.penalty, ^uint32(0)) +} + +const ( + maxPenalty = 300 + + penaltyDuration = 3 * time.Second +) diff --git a/vendor/github.com/erikdubbelboer/fasthttp/lbclient_example_test.go b/vendor/github.com/erikdubbelboer/fasthttp/lbclient_example_test.go new file mode 100644 index 0000000..d24fd59 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/lbclient_example_test.go @@ -0,0 +1,42 @@ +package fasthttp_test + +import ( + "fmt" + "log" + + "github.com/erikdubbelboer/fasthttp" +) + +func ExampleLBClient() { + // Requests will be spread among these servers. + servers := []string{ + "google.com:80", + "foobar.com:8080", + "127.0.0.1:123", + } + + // Prepare clients for each server + var lbc fasthttp.LBClient + for _, addr := range servers { + c := &fasthttp.HostClient{ + Addr: addr, + } + lbc.Clients = append(lbc.Clients, c) + } + + // Send requests to load-balanced servers + var req fasthttp.Request + var resp fasthttp.Response + for i := 0; i < 10; i++ { + url := fmt.Sprintf("http://abcedfg/foo/bar/%d", i) + req.SetRequestURI(url) + if err := lbc.Do(&req, &resp); err != nil { + log.Fatalf("Error when sending request: %s", err) + } + if resp.StatusCode() != fasthttp.StatusOK { + log.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), fasthttp.StatusOK) + } + + useResponseBody(resp.Body()) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/nocopy.go b/vendor/github.com/erikdubbelboer/fasthttp/nocopy.go new file mode 100644 index 0000000..32af52e --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/nocopy.go @@ -0,0 +1,9 @@ +package fasthttp + +// Embed this type into a struct, which mustn't be copied, +// so `go vet` gives a warning if this struct is copied. +// +// See https://github.com/golang/go/issues/8005#issuecomment-190753527 for details. +type noCopy struct{} + +func (*noCopy) Lock() {} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/peripconn.go b/vendor/github.com/erikdubbelboer/fasthttp/peripconn.go new file mode 100644 index 0000000..afd2a92 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/peripconn.go @@ -0,0 +1,100 @@ +package fasthttp + +import ( + "fmt" + "net" + "sync" +) + +type perIPConnCounter struct { + pool sync.Pool + lock sync.Mutex + m map[uint32]int +} + +func (cc *perIPConnCounter) Register(ip uint32) int { + cc.lock.Lock() + if cc.m == nil { + cc.m = make(map[uint32]int) + } + n := cc.m[ip] + 1 + cc.m[ip] = n + cc.lock.Unlock() + return n +} + +func (cc *perIPConnCounter) Unregister(ip uint32) { + cc.lock.Lock() + if cc.m == nil { + cc.lock.Unlock() + panic("BUG: perIPConnCounter.Register() wasn't called") + } + n := cc.m[ip] - 1 + if n < 0 { + cc.lock.Unlock() + panic(fmt.Sprintf("BUG: negative per-ip counter=%d for ip=%d", n, ip)) + } + cc.m[ip] = n + cc.lock.Unlock() +} + +type perIPConn struct { + net.Conn + + ip uint32 + perIPConnCounter *perIPConnCounter +} + +func acquirePerIPConn(conn net.Conn, ip uint32, counter *perIPConnCounter) *perIPConn { + v := counter.pool.Get() + if v == nil { + v = &perIPConn{ + perIPConnCounter: counter, + } + } + c := v.(*perIPConn) + c.Conn = conn + c.ip = ip + return c +} + +func releasePerIPConn(c *perIPConn) { + c.Conn = nil + c.perIPConnCounter.pool.Put(c) +} + +func (c *perIPConn) Close() error { + err := c.Conn.Close() + c.perIPConnCounter.Unregister(c.ip) + releasePerIPConn(c) + return err +} + +func getUint32IP(c net.Conn) uint32 { + return ip2uint32(getConnIP4(c)) +} + +func getConnIP4(c net.Conn) net.IP { + addr := c.RemoteAddr() + ipAddr, ok := addr.(*net.TCPAddr) + if !ok { + return net.IPv4zero + } + return ipAddr.IP.To4() +} + +func ip2uint32(ip net.IP) uint32 { + if len(ip) != 4 { + return 0 + } + return uint32(ip[0])<<24 | uint32(ip[1])<<16 | uint32(ip[2])<<8 | uint32(ip[3]) +} + +func uint322ip(ip uint32) net.IP { + b := make([]byte, 4) + b[0] = byte(ip >> 24) + b[1] = byte(ip >> 16) + b[2] = byte(ip >> 8) + b[3] = byte(ip) + return b +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/peripconn_test.go b/vendor/github.com/erikdubbelboer/fasthttp/peripconn_test.go new file mode 100644 index 0000000..7cfb0d8 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/peripconn_test.go @@ -0,0 +1,59 @@ +package fasthttp + +import ( + "testing" +) + +func TestIPxUint32(t *testing.T) { + testIPxUint32(t, 0) + testIPxUint32(t, 10) + testIPxUint32(t, 0x12892392) +} + +func testIPxUint32(t *testing.T, n uint32) { + ip := uint322ip(n) + nn := ip2uint32(ip) + if n != nn { + t.Fatalf("Unexpected value=%d for ip=%s. Expected %d", nn, ip, n) + } +} + +func TestPerIPConnCounter(t *testing.T) { + var cc perIPConnCounter + + expectPanic(t, func() { cc.Unregister(123) }) + + for i := 1; i < 100; i++ { + if n := cc.Register(123); n != i { + t.Fatalf("Unexpected counter value=%d. Expected %d", n, i) + } + } + + n := cc.Register(456) + if n != 1 { + t.Fatalf("Unexpected counter value=%d. Expected 1", n) + } + + for i := 1; i < 100; i++ { + cc.Unregister(123) + } + cc.Unregister(456) + + expectPanic(t, func() { cc.Unregister(123) }) + expectPanic(t, func() { cc.Unregister(456) }) + + n = cc.Register(123) + if n != 1 { + t.Fatalf("Unexpected counter value=%d. Expected 1", n) + } + cc.Unregister(123) +} + +func expectPanic(t *testing.T, f func()) { + defer func() { + if r := recover(); r == nil { + t.Fatalf("Expecting panic") + } + }() + f() +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/pprofhandler/pprof.go b/vendor/github.com/erikdubbelboer/fasthttp/pprofhandler/pprof.go new file mode 100644 index 0000000..5ec161a --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/pprofhandler/pprof.go @@ -0,0 +1,35 @@ +package pprofhandler + +import ( + "net/http/pprof" + "strings" + + "github.com/erikdubbelboer/fasthttp" + "github.com/erikdubbelboer/fasthttp/fasthttpadaptor" +) + +var ( + cmdline = fasthttpadaptor.NewFastHTTPHandlerFunc(pprof.Cmdline) + profile = fasthttpadaptor.NewFastHTTPHandlerFunc(pprof.Profile) + symbol = fasthttpadaptor.NewFastHTTPHandlerFunc(pprof.Symbol) + trace = fasthttpadaptor.NewFastHTTPHandlerFunc(pprof.Trace) + index = fasthttpadaptor.NewFastHTTPHandlerFunc(pprof.Index) +) + +// PprofHandler serves server runtime profiling data in the format expected by the pprof visualization tool. +// +// See https://golang.org/pkg/net/http/pprof/ for details. +func PprofHandler(ctx *fasthttp.RequestCtx) { + ctx.Response.Header.Set("Content-Type", "text/html") + if strings.HasPrefix(string(ctx.Path()), "/debug/pprof/cmdline") { + cmdline(ctx) + } else if strings.HasPrefix(string(ctx.Path()), "/debug/pprof/profile") { + profile(ctx) + } else if strings.HasPrefix(string(ctx.Path()), "/debug/pprof/symbol") { + symbol(ctx) + } else if strings.HasPrefix(string(ctx.Path()), "/debug/pprof/trace") { + trace(ctx) + } else { + index(ctx) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/requestctx_setbodystreamwriter_example_test.go b/vendor/github.com/erikdubbelboer/fasthttp/requestctx_setbodystreamwriter_example_test.go new file mode 100644 index 0000000..da860be --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/requestctx_setbodystreamwriter_example_test.go @@ -0,0 +1,32 @@ +package fasthttp_test + +import ( + "bufio" + "fmt" + "log" + "time" + + "github.com/erikdubbelboer/fasthttp" +) + +func ExampleRequestCtx_SetBodyStreamWriter() { + // Start fasthttp server for streaming responses. + if err := fasthttp.ListenAndServe(":8080", responseStreamHandler); err != nil { + log.Fatalf("unexpected error in server: %s", err) + } +} + +func responseStreamHandler(ctx *fasthttp.RequestCtx) { + // Send the response in chunks and wait for a second between each chunk. + ctx.SetBodyStreamWriter(func(w *bufio.Writer) { + for i := 0; i < 10; i++ { + fmt.Fprintf(w, "this is a message number %d", i) + + // Do not forget flushing streamed data to the client. + if err := w.Flush(); err != nil { + return + } + time.Sleep(time.Second) + } + }) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/reuseport/LICENSE b/vendor/github.com/erikdubbelboer/fasthttp/reuseport/LICENSE new file mode 100644 index 0000000..5f25159 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/reuseport/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Max Riveiro + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/erikdubbelboer/fasthttp/reuseport/reuseport.go b/vendor/github.com/erikdubbelboer/fasthttp/reuseport/reuseport.go new file mode 100644 index 0000000..fe80eae --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/reuseport/reuseport.go @@ -0,0 +1,56 @@ +// +build linux darwin dragonfly freebsd netbsd openbsd rumprun + +// Package reuseport provides TCP net.Listener with SO_REUSEPORT support. +// +// SO_REUSEPORT allows linear scaling server performance on multi-CPU servers. +// See https://www.nginx.com/blog/socket-sharding-nginx-release-1-9-1/ for more details :) +// +// The package is based on https://github.com/kavu/go_reuseport . +package reuseport + +import ( + "fmt" + "github.com/valyala/tcplisten" + "net" + "strings" +) + +// ErrNoReusePort is returned if the OS doesn't support SO_REUSEPORT. +type ErrNoReusePort struct { + err error +} + +// Error implements error interface. +func (e *ErrNoReusePort) Error() string { + return fmt.Sprintf("The OS doesn't support SO_REUSEPORT: %s", e.err) +} + +// Listen returns TCP listener with SO_REUSEPORT option set. +// +// The returned listener tries enabling the following TCP options, which usually +// have positive impact on performance: +// +// - TCP_DEFER_ACCEPT. This option expects that the server reads from accepted +// connections before writing to them. +// +// - TCP_FASTOPEN. See https://lwn.net/Articles/508865/ for details. +// +// Use https://github.com/valyala/tcplisten if you want customizing +// these options. +// +// Only tcp4 and tcp6 networks are supported. +// +// ErrNoReusePort error is returned if the system doesn't support SO_REUSEPORT. +func Listen(network, addr string) (net.Listener, error) { + ln, err := cfg.NewListener(network, addr) + if err != nil && strings.Contains(err.Error(), "SO_REUSEPORT") { + return nil, &ErrNoReusePort{err} + } + return ln, err +} + +var cfg = &tcplisten.Config{ + ReusePort: true, + DeferAccept: true, + FastOpen: true, +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/reuseport/reuseport_example_test.go b/vendor/github.com/erikdubbelboer/fasthttp/reuseport/reuseport_example_test.go new file mode 100644 index 0000000..55121c7 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/reuseport/reuseport_example_test.go @@ -0,0 +1,24 @@ +package reuseport_test + +import ( + "fmt" + "log" + + "github.com/erikdubbelboer/fasthttp" + "github.com/erikdubbelboer/fasthttp/reuseport" +) + +func ExampleListen() { + ln, err := reuseport.Listen("tcp4", "localhost:12345") + if err != nil { + log.Fatalf("error in reuseport listener: %s", err) + } + + if err = fasthttp.Serve(ln, requestHandler); err != nil { + log.Fatalf("error in fasthttp Server: %s", err) + } +} + +func requestHandler(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "Hello, world!") +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/reuseport/reuseport_test.go b/vendor/github.com/erikdubbelboer/fasthttp/reuseport/reuseport_test.go new file mode 100644 index 0000000..0c70e83 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/reuseport/reuseport_test.go @@ -0,0 +1,119 @@ +package reuseport + +import ( + "fmt" + "io/ioutil" + "net" + "testing" + "time" +) + +func TestTCP4(t *testing.T) { + testNewListener(t, "tcp4", "localhost:10081", 20, 1000) +} + +func TestTCP6(t *testing.T) { + // Run this test only if tcp6 interface exists. + if hasLocalIPv6(t) { + testNewListener(t, "tcp6", "[::1]:10082", 20, 1000) + } +} + +func hasLocalIPv6(t *testing.T) bool { + addrs, err := net.InterfaceAddrs() + if err != nil { + t.Fatalf("cannot obtain local interfaces: %s", err) + } + for _, a := range addrs { + if a.String() == "::1/128" { + return true + } + } + return false +} + +func testNewListener(t *testing.T, network, addr string, serversCount, requestsCount int) { + + var lns []net.Listener + doneCh := make(chan struct{}, serversCount) + + for i := 0; i < serversCount; i++ { + ln, err := Listen(network, addr) + if err != nil { + t.Fatalf("cannot create listener %d: %s", i, err) + } + go func() { + serveEcho(t, ln) + doneCh <- struct{}{} + }() + lns = append(lns, ln) + } + + for i := 0; i < requestsCount; i++ { + c, err := net.Dial(network, addr) + if err != nil { + t.Fatalf("%d. unexpected error when dialing: %s", i, err) + } + req := fmt.Sprintf("request number %d", i) + if _, err = c.Write([]byte(req)); err != nil { + t.Fatalf("%d. unexpected error when writing request: %s", i, err) + } + if err = c.(*net.TCPConn).CloseWrite(); err != nil { + t.Fatalf("%d. unexpected error when closing write end of the connection: %s", i, err) + } + + var resp []byte + ch := make(chan struct{}) + go func() { + if resp, err = ioutil.ReadAll(c); err != nil { + t.Fatalf("%d. unexpected error when reading response: %s", i, err) + } + close(ch) + }() + select { + case <-ch: + case <-time.After(200 * time.Millisecond): + t.Fatalf("%d. timeout when waiting for response: %s", i, err) + } + + if string(resp) != req { + t.Fatalf("%d. unexpected response %q. Expecting %q", i, resp, req) + } + if err = c.Close(); err != nil { + t.Fatalf("%d. unexpected error when closing connection: %s", i, err) + } + } + + for _, ln := range lns { + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error when closing listener: %s", err) + } + } + + for i := 0; i < serversCount; i++ { + select { + case <-doneCh: + case <-time.After(200 * time.Millisecond): + t.Fatalf("timeout when waiting for servers to be closed") + } + } +} + +func serveEcho(t *testing.T, ln net.Listener) { + for { + c, err := ln.Accept() + if err != nil { + break + } + req, err := ioutil.ReadAll(c) + if err != nil { + t.Fatalf("unepxected error when reading request: %s", err) + } + if _, err = c.Write(req); err != nil { + t.Fatalf("unexpected error when writing response: %s", err) + } + if err = c.Close(); err != nil { + t.Fatalf("unexpected error when closing connection: %s", err) + } + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/server.go b/vendor/github.com/erikdubbelboer/fasthttp/server.go new file mode 100644 index 0000000..3c784d5 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/server.go @@ -0,0 +1,2082 @@ +package fasthttp + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "mime/multipart" + "net" + "os" + "strings" + "sync" + "sync/atomic" + "time" +) + +var errNoCertOrKeyProvided = errors.New("Cert or key has not provided") + +// ServeConn serves HTTP requests from the given connection +// using the given handler. +// +// ServeConn returns nil if all requests from the c are successfully served. +// It returns non-nil error otherwise. +// +// Connection c must immediately propagate all the data passed to Write() +// to the client. Otherwise requests' processing may hang. +// +// ServeConn closes c before returning. +func ServeConn(c net.Conn, handler RequestHandler) error { + v := serverPool.Get() + if v == nil { + v = &Server{} + } + s := v.(*Server) + s.Handler = handler + err := s.ServeConn(c) + s.Handler = nil + serverPool.Put(v) + return err +} + +var serverPool sync.Pool + +// Serve serves incoming connections from the given listener +// using the given handler. +// +// Serve blocks until the given listener returns permanent error. +func Serve(ln net.Listener, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.Serve(ln) +} + +// ServeTLS serves HTTPS requests from the given net.Listener +// using the given handler. +// +// certFile and keyFile are paths to TLS certificate and key files. +func ServeTLS(ln net.Listener, certFile, keyFile string, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ServeTLS(ln, certFile, keyFile) +} + +// ServeTLSEmbed serves HTTPS requests from the given net.Listener +// using the given handler. +// +// certData and keyData must contain valid TLS certificate and key data. +func ServeTLSEmbed(ln net.Listener, certData, keyData []byte, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ServeTLSEmbed(ln, certData, keyData) +} + +// ListenAndServe serves HTTP requests from the given TCP addr +// using the given handler. +func ListenAndServe(addr string, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServe(addr) +} + +// ListenAndServeUNIX serves HTTP requests from the given UNIX addr +// using the given handler. +// +// The function deletes existing file at addr before starting serving. +// +// The server sets the given file mode for the UNIX addr. +func ListenAndServeUNIX(addr string, mode os.FileMode, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServeUNIX(addr, mode) +} + +// ListenAndServeTLS serves HTTPS requests from the given TCP addr +// using the given handler. +// +// certFile and keyFile are paths to TLS certificate and key files. +func ListenAndServeTLS(addr, certFile, keyFile string, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServeTLS(addr, certFile, keyFile) +} + +// ListenAndServeTLSEmbed serves HTTPS requests from the given TCP addr +// using the given handler. +// +// certData and keyData must contain valid TLS certificate and key data. +func ListenAndServeTLSEmbed(addr string, certData, keyData []byte, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServeTLSEmbed(addr, certData, keyData) +} + +// RequestHandler must process incoming requests. +// +// RequestHandler must call ctx.TimeoutError() before returning +// if it keeps references to ctx and/or its' members after the return. +// Consider wrapping RequestHandler into TimeoutHandler if response time +// must be limited. +type RequestHandler func(ctx *RequestCtx) + +// Server implements HTTP server. +// +// Default Server settings should satisfy the majority of Server users. +// Adjust Server settings only if you really understand the consequences. +// +// It is forbidden copying Server instances. Create new Server instances +// instead. +// +// It is safe to call Server methods from concurrently running goroutines. +type Server struct { + noCopy noCopy + + // Handler for processing incoming requests. + Handler RequestHandler + + // Server name for sending in response headers. + // + // Default server name is used if left blank. + Name string + + // The maximum number of concurrent connections the server may serve. + // + // DefaultConcurrency is used if not set. + Concurrency int + + // Whether to disable keep-alive connections. + // + // The server will close all the incoming connections after sending + // the first response to client if this option is set to true. + // + // By default keep-alive connections are enabled. + DisableKeepalive bool + + // Per-connection buffer size for requests' reading. + // This also limits the maximum header size. + // + // Increase this buffer if your clients send multi-KB RequestURIs + // and/or multi-KB headers (for example, BIG cookies). + // + // Default buffer size is used if not set. + ReadBufferSize int + + // Per-connection buffer size for responses' writing. + // + // Default buffer size is used if not set. + WriteBufferSize int + + // Maximum duration for reading the full request (including body). + // + // This also limits the maximum duration for idle keep-alive + // connections. + // + // By default request read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for writing the full response (including body). + // + // By default response write timeout is unlimited. + WriteTimeout time.Duration + + // Maximum number of concurrent client connections allowed per IP. + // + // By default unlimited number of concurrent connections + // may be established to the server from a single IP address. + MaxConnsPerIP int + + // Maximum number of requests served per connection. + // + // The server closes connection after the last request. + // 'Connection: close' header is added to the last response. + // + // By default unlimited number of requests may be served per connection. + MaxRequestsPerConn int + + // Maximum keep-alive connection lifetime. + // + // The server closes keep-alive connection after its' lifetime + // expiration. + // + // See also ReadTimeout for limiting the duration of idle keep-alive + // connections. + // + // By default keep-alive connection lifetime is unlimited. + MaxKeepaliveDuration time.Duration + + // Maximum request body size. + // + // The server rejects requests with bodies exceeding this limit. + // + // Request body size is limited by DefaultMaxRequestBodySize by default. + MaxRequestBodySize int + + // Aggressively reduces memory usage at the cost of higher CPU usage + // if set to true. + // + // Try enabling this option only if the server consumes too much memory + // serving mostly idle keep-alive connections. This may reduce memory + // usage by more than 50%. + // + // Aggressive memory usage reduction is disabled by default. + ReduceMemoryUsage bool + + // Rejects all non-GET requests if set to true. + // + // This option is useful as anti-DoS protection for servers + // accepting only GET requests. The request size is limited + // by ReadBufferSize if GetOnly is set. + // + // Server accepts all the requests by default. + GetOnly bool + + // Logs all errors, including the most frequent + // 'connection reset by peer', 'broken pipe' and 'connection timeout' + // errors. Such errors are common in production serving real-world + // clients. + // + // By default the most frequent errors such as + // 'connection reset by peer', 'broken pipe' and 'connection timeout' + // are suppressed in order to limit output log traffic. + LogAllErrors bool + + // Header names are passed as-is without normalization + // if this option is set. + // + // Disabled header names' normalization may be useful only for proxying + // incoming requests to other servers expecting case-sensitive + // header names. See https://github.com/valyala/fasthttp/issues/57 + // for details. + // + // By default request and response header names are normalized, i.e. + // The first letter and the first letters following dashes + // are uppercased, while all the other letters are lowercased. + // Examples: + // + // * HOST -> Host + // * content-type -> Content-Type + // * cONTENT-lenGTH -> Content-Length + DisableHeaderNamesNormalizing bool + + // NoDefaultServerHeader, when set to true, causes the default Server header + // to be excluded from the Response. + // + // The default Server header value is the value of the Name field or an + // internal default value in its absence. With this option set to true, + // the only time a Server header will be sent is if a non-zero length + // value is explicitly provided during a request. + NoDefaultServerHeader bool + + // Logger, which is used by RequestCtx.Logger(). + // + // By default standard logger from log package is used. + Logger Logger + + tlsConfig *tls.Config + + concurrency uint32 + concurrencyCh chan struct{} + perIPConnCounter perIPConnCounter + serverName atomic.Value + + ctxPool sync.Pool + readerPool sync.Pool + writerPool sync.Pool + hijackConnPool sync.Pool + bytePool sync.Pool +} + +// TimeoutHandler creates RequestHandler, which returns StatusRequestTimeout +// error with the given msg to the client if h didn't return during +// the given duration. +// +// The returned handler may return StatusTooManyRequests error with the given +// msg to the client if there are more than Server.Concurrency concurrent +// handlers h are running at the moment. +func TimeoutHandler(h RequestHandler, timeout time.Duration, msg string) RequestHandler { + if timeout <= 0 { + return h + } + + return func(ctx *RequestCtx) { + concurrencyCh := ctx.s.concurrencyCh + select { + case concurrencyCh <- struct{}{}: + default: + ctx.Error(msg, StatusTooManyRequests) + return + } + + ch := ctx.timeoutCh + if ch == nil { + ch = make(chan struct{}, 1) + ctx.timeoutCh = ch + } + go func() { + h(ctx) + ch <- struct{}{} + <-concurrencyCh + }() + ctx.timeoutTimer = initTimer(ctx.timeoutTimer, timeout) + select { + case <-ch: + case <-ctx.timeoutTimer.C: + ctx.TimeoutError(msg) + } + stopTimer(ctx.timeoutTimer) + } +} + +// CompressHandler returns RequestHandler that transparently compresses +// response body generated by h if the request contains 'gzip' or 'deflate' +// 'Accept-Encoding' header. +func CompressHandler(h RequestHandler) RequestHandler { + return CompressHandlerLevel(h, CompressDefaultCompression) +} + +// CompressHandlerLevel returns RequestHandler that transparently compresses +// response body generated by h if the request contains 'gzip' or 'deflate' +// 'Accept-Encoding' header. +// +// Level is the desired compression level: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +func CompressHandlerLevel(h RequestHandler, level int) RequestHandler { + return func(ctx *RequestCtx) { + h(ctx) + if ctx.Request.Header.HasAcceptEncodingBytes(strGzip) { + ctx.Response.gzipBody(level) + } else if ctx.Request.Header.HasAcceptEncodingBytes(strDeflate) { + ctx.Response.deflateBody(level) + } + } +} + +// RequestCtx contains incoming request and manages outgoing response. +// +// It is forbidden copying RequestCtx instances. +// +// RequestHandler should avoid holding references to incoming RequestCtx and/or +// its' members after the return. +// If holding RequestCtx references after the return is unavoidable +// (for instance, ctx is passed to a separate goroutine and ctx lifetime cannot +// be controlled), then the RequestHandler MUST call ctx.TimeoutError() +// before return. +// +// It is unsafe modifying/reading RequestCtx instance from concurrently +// running goroutines. The only exception is TimeoutError*, which may be called +// while other goroutines accessing RequestCtx. +type RequestCtx struct { + noCopy noCopy + + // Incoming request. + // + // Copying Request by value is forbidden. Use pointer to Request instead. + Request Request + + // Outgoing response. + // + // Copying Response by value is forbidden. Use pointer to Response instead. + Response Response + + userValues userData + + lastReadDuration time.Duration + + connID uint64 + connRequestNum uint64 + connTime time.Time + + time time.Time + + logger ctxLogger + s *Server + c net.Conn + fbr firstByteReader + + timeoutResponse *Response + timeoutCh chan struct{} + timeoutTimer *time.Timer + + hijackHandler HijackHandler +} + +// HijackHandler must process the hijacked connection c. +// +// The connection c is automatically closed after returning from HijackHandler. +// +// The connection c must not be used after returning from the handler. +type HijackHandler func(c net.Conn) + +// Hijack registers the given handler for connection hijacking. +// +// The handler is called after returning from RequestHandler +// and sending http response. The current connection is passed +// to the handler. The connection is automatically closed after +// returning from the handler. +// +// The server skips calling the handler in the following cases: +// +// * 'Connection: close' header exists in either request or response. +// * Unexpected error during response writing to the connection. +// +// The server stops processing requests from hijacked connections. +// Server limits such as Concurrency, ReadTimeout, WriteTimeout, etc. +// aren't applied to hijacked connections. +// +// The handler must not retain references to ctx members. +// +// Arbitrary 'Connection: Upgrade' protocols may be implemented +// with HijackHandler. For instance, +// +// * WebSocket ( https://en.wikipedia.org/wiki/WebSocket ) +// * HTTP/2.0 ( https://en.wikipedia.org/wiki/HTTP/2 ) +// +func (ctx *RequestCtx) Hijack(handler HijackHandler) { + ctx.hijackHandler = handler +} + +// Hijacked returns true after Hijack is called. +func (ctx *RequestCtx) Hijacked() bool { + return ctx.hijackHandler != nil +} + +// SetUserValue stores the given value (arbitrary object) +// under the given key in ctx. +// +// The value stored in ctx may be obtained by UserValue*. +// +// This functionality may be useful for passing arbitrary values between +// functions involved in request processing. +// +// All the values are removed from ctx after returning from the top +// RequestHandler. Additionally, Close method is called on each value +// implementing io.Closer before removing the value from ctx. +func (ctx *RequestCtx) SetUserValue(key string, value interface{}) { + ctx.userValues.Set(key, value) +} + +// SetUserValueBytes stores the given value (arbitrary object) +// under the given key in ctx. +// +// The value stored in ctx may be obtained by UserValue*. +// +// This functionality may be useful for passing arbitrary values between +// functions involved in request processing. +// +// All the values stored in ctx are deleted after returning from RequestHandler. +func (ctx *RequestCtx) SetUserValueBytes(key []byte, value interface{}) { + ctx.userValues.SetBytes(key, value) +} + +// UserValue returns the value stored via SetUserValue* under the given key. +func (ctx *RequestCtx) UserValue(key string) interface{} { + return ctx.userValues.Get(key) +} + +// UserValueBytes returns the value stored via SetUserValue* +// under the given key. +func (ctx *RequestCtx) UserValueBytes(key []byte) interface{} { + return ctx.userValues.GetBytes(key) +} + +// VisitUserValues calls visitor for each existing userValue. +// +// visitor must not retain references to key and value after returning. +// Make key and/or value copies if you need storing them after returning. +func (ctx *RequestCtx) VisitUserValues(visitor func([]byte, interface{})) { + for i, n := 0, len(ctx.userValues); i < n; i++ { + kv := &ctx.userValues[i] + visitor(kv.key, kv.value) + } +} + +type connTLSer interface { + ConnectionState() tls.ConnectionState +} + +// IsTLS returns true if the underlying connection is tls.Conn. +// +// tls.Conn is an encrypted connection (aka SSL, HTTPS). +func (ctx *RequestCtx) IsTLS() bool { + // cast to (connTLSer) instead of (*tls.Conn), since it catches + // cases with overridden tls.Conn such as: + // + // type customConn struct { + // *tls.Conn + // + // // other custom fields here + // } + _, ok := ctx.c.(connTLSer) + return ok +} + +// TLSConnectionState returns TLS connection state. +// +// The function returns nil if the underlying connection isn't tls.Conn. +// +// The returned state may be used for verifying TLS version, client certificates, +// etc. +func (ctx *RequestCtx) TLSConnectionState() *tls.ConnectionState { + tlsConn, ok := ctx.c.(connTLSer) + if !ok { + return nil + } + state := tlsConn.ConnectionState() + return &state +} + +// Conn returns reference to an underlying net.Conn. +// +// It should be used to expose connection-level data from a custom net.Conn, +// reading from or writing to a connection will end badly. +func (ctx *RequestCtx) Conn() net.Conn { + return ctx.c +} + +type firstByteReader struct { + c net.Conn + ch byte + byteRead bool +} + +func (r *firstByteReader) Read(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + nn := 0 + if !r.byteRead { + b[0] = r.ch + b = b[1:] + r.byteRead = true + nn = 1 + } + n, err := r.c.Read(b) + return n + nn, err +} + +// Logger is used for logging formatted messages. +type Logger interface { + // Printf must have the same semantics as log.Printf. + Printf(format string, args ...interface{}) +} + +var ctxLoggerLock sync.Mutex + +type ctxLogger struct { + ctx *RequestCtx + logger Logger +} + +func (cl *ctxLogger) Printf(format string, args ...interface{}) { + ctxLoggerLock.Lock() + msg := fmt.Sprintf(format, args...) + ctx := cl.ctx + cl.logger.Printf("%.3f %s - %s", time.Since(ctx.Time()).Seconds(), ctx.String(), msg) + ctxLoggerLock.Unlock() +} + +var zeroTCPAddr = &net.TCPAddr{ + IP: net.IPv4zero, +} + +// String returns unique string representation of the ctx. +// +// The returned value may be useful for logging. +func (ctx *RequestCtx) String() string { + return fmt.Sprintf("#%016X - %s<->%s - %s %s", ctx.ID(), ctx.LocalAddr(), ctx.RemoteAddr(), ctx.Request.Header.Method(), ctx.URI().FullURI()) +} + +// ID returns unique ID of the request. +func (ctx *RequestCtx) ID() uint64 { + return (ctx.connID << 32) | ctx.connRequestNum +} + +// ConnID returns unique connection ID. +// +// This ID may be used to match distinct requests to the same incoming +// connection. +func (ctx *RequestCtx) ConnID() uint64 { + return ctx.connID +} + +// Time returns RequestHandler call time. +func (ctx *RequestCtx) Time() time.Time { + return ctx.time +} + +// ConnTime returns the time server starts serving the connection +// the current request came from. +func (ctx *RequestCtx) ConnTime() time.Time { + return ctx.connTime +} + +// ConnRequestNum returns request sequence number +// for the current connection. +// +// Sequence starts with 1. +func (ctx *RequestCtx) ConnRequestNum() uint64 { + return ctx.connRequestNum +} + +// SetConnectionClose sets 'Connection: close' response header and closes +// connection after the RequestHandler returns. +func (ctx *RequestCtx) SetConnectionClose() { + ctx.Response.SetConnectionClose() +} + +// SetStatusCode sets response status code. +func (ctx *RequestCtx) SetStatusCode(statusCode int) { + ctx.Response.SetStatusCode(statusCode) +} + +// SetContentType sets response Content-Type. +func (ctx *RequestCtx) SetContentType(contentType string) { + ctx.Response.Header.SetContentType(contentType) +} + +// SetContentTypeBytes sets response Content-Type. +// +// It is safe modifying contentType buffer after function return. +func (ctx *RequestCtx) SetContentTypeBytes(contentType []byte) { + ctx.Response.Header.SetContentTypeBytes(contentType) +} + +// RequestURI returns RequestURI. +// +// This uri is valid until returning from RequestHandler. +func (ctx *RequestCtx) RequestURI() []byte { + return ctx.Request.Header.RequestURI() +} + +// URI returns requested uri. +// +// The uri is valid until returning from RequestHandler. +func (ctx *RequestCtx) URI() *URI { + return ctx.Request.URI() +} + +// Referer returns request referer. +// +// The referer is valid until returning from RequestHandler. +func (ctx *RequestCtx) Referer() []byte { + return ctx.Request.Header.Referer() +} + +// UserAgent returns User-Agent header value from the request. +func (ctx *RequestCtx) UserAgent() []byte { + return ctx.Request.Header.UserAgent() +} + +// Path returns requested path. +// +// The path is valid until returning from RequestHandler. +func (ctx *RequestCtx) Path() []byte { + return ctx.URI().Path() +} + +// Host returns requested host. +// +// The host is valid until returning from RequestHandler. +func (ctx *RequestCtx) Host() []byte { + return ctx.URI().Host() +} + +// QueryArgs returns query arguments from RequestURI. +// +// It doesn't return POST'ed arguments - use PostArgs() for this. +// +// Returned arguments are valid until returning from RequestHandler. +// +// See also PostArgs, FormValue and FormFile. +func (ctx *RequestCtx) QueryArgs() *Args { + return ctx.URI().QueryArgs() +} + +// PostArgs returns POST arguments. +// +// It doesn't return query arguments from RequestURI - use QueryArgs for this. +// +// Returned arguments are valid until returning from RequestHandler. +// +// See also QueryArgs, FormValue and FormFile. +func (ctx *RequestCtx) PostArgs() *Args { + return ctx.Request.PostArgs() +} + +// MultipartForm returns requests's multipart form. +// +// Returns ErrNoMultipartForm if request's content-type +// isn't 'multipart/form-data'. +// +// All uploaded temporary files are automatically deleted after +// returning from RequestHandler. Either move or copy uploaded files +// into new place if you want retaining them. +// +// Use SaveMultipartFile function for permanently saving uploaded file. +// +// The returned form is valid until returning from RequestHandler. +// +// See also FormFile and FormValue. +func (ctx *RequestCtx) MultipartForm() (*multipart.Form, error) { + return ctx.Request.MultipartForm() +} + +// FormFile returns uploaded file associated with the given multipart form key. +// +// The file is automatically deleted after returning from RequestHandler, +// so either move or copy uploaded file into new place if you want retaining it. +// +// Use SaveMultipartFile function for permanently saving uploaded file. +// +// The returned file header is valid until returning from RequestHandler. +func (ctx *RequestCtx) FormFile(key string) (*multipart.FileHeader, error) { + mf, err := ctx.MultipartForm() + if err != nil { + return nil, err + } + if mf.File == nil { + return nil, err + } + fhh := mf.File[key] + if fhh == nil { + return nil, ErrMissingFile + } + return fhh[0], nil +} + +// ErrMissingFile may be returned from FormFile when the is no uploaded file +// associated with the given multipart form key. +var ErrMissingFile = errors.New("there is no uploaded file associated with the given key") + +// SaveMultipartFile saves multipart file fh under the given filename path. +func SaveMultipartFile(fh *multipart.FileHeader, path string) error { + f, err := fh.Open() + if err != nil { + return err + } + defer f.Close() + + if ff, ok := f.(*os.File); ok { + return os.Rename(ff.Name(), path) + } + + ff, err := os.Create(path) + if err != nil { + return err + } + defer ff.Close() + _, err = copyZeroAlloc(ff, f) + return err +} + +// FormValue returns form value associated with the given key. +// +// The value is searched in the following places: +// +// * Query string. +// * POST or PUT body. +// +// There are more fine-grained methods for obtaining form values: +// +// * QueryArgs for obtaining values from query string. +// * PostArgs for obtaining values from POST or PUT body. +// * MultipartForm for obtaining values from multipart form. +// * FormFile for obtaining uploaded files. +// +// The returned value is valid until returning from RequestHandler. +func (ctx *RequestCtx) FormValue(key string) []byte { + v := ctx.QueryArgs().Peek(key) + if len(v) > 0 { + return v + } + v = ctx.PostArgs().Peek(key) + if len(v) > 0 { + return v + } + mf, err := ctx.MultipartForm() + if err == nil && mf.Value != nil { + vv := mf.Value[key] + if len(vv) > 0 { + return []byte(vv[0]) + } + } + return nil +} + +// IsGet returns true if request method is GET. +func (ctx *RequestCtx) IsGet() bool { + return ctx.Request.Header.IsGet() +} + +// IsPost returns true if request method is POST. +func (ctx *RequestCtx) IsPost() bool { + return ctx.Request.Header.IsPost() +} + +// IsPut returns true if request method is PUT. +func (ctx *RequestCtx) IsPut() bool { + return ctx.Request.Header.IsPut() +} + +// IsDelete returns true if request method is DELETE. +func (ctx *RequestCtx) IsDelete() bool { + return ctx.Request.Header.IsDelete() +} + +// Method return request method. +// +// Returned value is valid until returning from RequestHandler. +func (ctx *RequestCtx) Method() []byte { + return ctx.Request.Header.Method() +} + +// IsHead returns true if request method is HEAD. +func (ctx *RequestCtx) IsHead() bool { + return ctx.Request.Header.IsHead() +} + +// RemoteAddr returns client address for the given request. +// +// Always returns non-nil result. +func (ctx *RequestCtx) RemoteAddr() net.Addr { + if ctx.c == nil { + return zeroTCPAddr + } + addr := ctx.c.RemoteAddr() + if addr == nil { + return zeroTCPAddr + } + return addr +} + +// LocalAddr returns server address for the given request. +// +// Always returns non-nil result. +func (ctx *RequestCtx) LocalAddr() net.Addr { + if ctx.c == nil { + return zeroTCPAddr + } + addr := ctx.c.LocalAddr() + if addr == nil { + return zeroTCPAddr + } + return addr +} + +// RemoteIP returns the client ip the request came from. +// +// Always returns non-nil result. +func (ctx *RequestCtx) RemoteIP() net.IP { + return addrToIP(ctx.RemoteAddr()) +} + +// LocalIP returns the server ip the request came to. +// +// Always returns non-nil result. +func (ctx *RequestCtx) LocalIP() net.IP { + return addrToIP(ctx.LocalAddr()) +} + +func addrToIP(addr net.Addr) net.IP { + x, ok := addr.(*net.TCPAddr) + if !ok { + return net.IPv4zero + } + return x.IP +} + +// Error sets response status code to the given value and sets response body +// to the given message. +func (ctx *RequestCtx) Error(msg string, statusCode int) { + ctx.Response.Reset() + ctx.SetStatusCode(statusCode) + ctx.SetContentTypeBytes(defaultContentType) + ctx.SetBodyString(msg) +} + +// Success sets response Content-Type and body to the given values. +func (ctx *RequestCtx) Success(contentType string, body []byte) { + ctx.SetContentType(contentType) + ctx.SetBody(body) +} + +// SuccessString sets response Content-Type and body to the given values. +func (ctx *RequestCtx) SuccessString(contentType, body string) { + ctx.SetContentType(contentType) + ctx.SetBodyString(body) +} + +// Redirect sets 'Location: uri' response header and sets the given statusCode. +// +// statusCode must have one of the following values: +// +// * StatusMovedPermanently (301) +// * StatusFound (302) +// * StatusSeeOther (303) +// * StatusTemporaryRedirect (307) +// +// All other statusCode values are replaced by StatusFound (302). +// +// The redirect uri may be either absolute or relative to the current +// request uri. +func (ctx *RequestCtx) Redirect(uri string, statusCode int) { + u := AcquireURI() + ctx.URI().CopyTo(u) + u.Update(uri) + ctx.redirect(u.FullURI(), statusCode) + ReleaseURI(u) +} + +// RedirectBytes sets 'Location: uri' response header and sets +// the given statusCode. +// +// statusCode must have one of the following values: +// +// * StatusMovedPermanently (301) +// * StatusFound (302) +// * StatusSeeOther (303) +// * StatusTemporaryRedirect (307) +// +// All other statusCode values are replaced by StatusFound (302). +// +// The redirect uri may be either absolute or relative to the current +// request uri. +func (ctx *RequestCtx) RedirectBytes(uri []byte, statusCode int) { + s := b2s(uri) + ctx.Redirect(s, statusCode) +} + +func (ctx *RequestCtx) redirect(uri []byte, statusCode int) { + ctx.Response.Header.SetCanonical(strLocation, uri) + statusCode = getRedirectStatusCode(statusCode) + ctx.Response.SetStatusCode(statusCode) +} + +func getRedirectStatusCode(statusCode int) int { + if statusCode == StatusMovedPermanently || statusCode == StatusFound || + statusCode == StatusSeeOther || statusCode == StatusTemporaryRedirect { + return statusCode + } + return StatusFound +} + +// SetBody sets response body to the given value. +// +// It is safe re-using body argument after the function returns. +func (ctx *RequestCtx) SetBody(body []byte) { + ctx.Response.SetBody(body) +} + +// SetBodyString sets response body to the given value. +func (ctx *RequestCtx) SetBodyString(body string) { + ctx.Response.SetBodyString(body) +} + +// ResetBody resets response body contents. +func (ctx *RequestCtx) ResetBody() { + ctx.Response.ResetBody() +} + +// SendFile sends local file contents from the given path as response body. +// +// This is a shortcut to ServeFile(ctx, path). +// +// SendFile logs all the errors via ctx.Logger. +// +// See also ServeFile, FSHandler and FS. +func (ctx *RequestCtx) SendFile(path string) { + ServeFile(ctx, path) +} + +// SendFileBytes sends local file contents from the given path as response body. +// +// This is a shortcut to ServeFileBytes(ctx, path). +// +// SendFileBytes logs all the errors via ctx.Logger. +// +// See also ServeFileBytes, FSHandler and FS. +func (ctx *RequestCtx) SendFileBytes(path []byte) { + ServeFileBytes(ctx, path) +} + +// IfModifiedSince returns true if lastModified exceeds 'If-Modified-Since' +// value from the request header. +// +// The function returns true also 'If-Modified-Since' request header is missing. +func (ctx *RequestCtx) IfModifiedSince(lastModified time.Time) bool { + ifModStr := ctx.Request.Header.peek(strIfModifiedSince) + if len(ifModStr) == 0 { + return true + } + ifMod, err := ParseHTTPDate(ifModStr) + if err != nil { + return true + } + lastModified = lastModified.Truncate(time.Second) + return ifMod.Before(lastModified) +} + +// NotModified resets response and sets '304 Not Modified' response status code. +func (ctx *RequestCtx) NotModified() { + ctx.Response.Reset() + ctx.SetStatusCode(StatusNotModified) +} + +// NotFound resets response and sets '404 Not Found' response status code. +func (ctx *RequestCtx) NotFound() { + ctx.Response.Reset() + ctx.SetStatusCode(StatusNotFound) + ctx.SetBodyString("404 Page not found") +} + +// Write writes p into response body. +func (ctx *RequestCtx) Write(p []byte) (int, error) { + ctx.Response.AppendBody(p) + return len(p), nil +} + +// WriteString appends s to response body. +func (ctx *RequestCtx) WriteString(s string) (int, error) { + ctx.Response.AppendBodyString(s) + return len(s), nil +} + +// PostBody returns POST request body. +// +// The returned value is valid until RequestHandler return. +func (ctx *RequestCtx) PostBody() []byte { + return ctx.Request.Body() +} + +// SetBodyStream sets response body stream and, optionally body size. +// +// bodyStream.Close() is called after finishing reading all body data +// if it implements io.Closer. +// +// If bodySize is >= 0, then bodySize bytes must be provided by bodyStream +// before returning io.EOF. +// +// If bodySize < 0, then bodyStream is read until io.EOF. +// +// See also SetBodyStreamWriter. +func (ctx *RequestCtx) SetBodyStream(bodyStream io.Reader, bodySize int) { + ctx.Response.SetBodyStream(bodyStream, bodySize) +} + +// SetBodyStreamWriter registers the given stream writer for populating +// response body. +// +// Access to RequestCtx and/or its' members is forbidden from sw. +// +// This function may be used in the following cases: +// +// * if response body is too big (more than 10MB). +// * if response body is streamed from slow external sources. +// * if response body must be streamed to the client in chunks. +// (aka `http server push`). +func (ctx *RequestCtx) SetBodyStreamWriter(sw StreamWriter) { + ctx.Response.SetBodyStreamWriter(sw) +} + +// IsBodyStream returns true if response body is set via SetBodyStream*. +func (ctx *RequestCtx) IsBodyStream() bool { + return ctx.Response.IsBodyStream() +} + +// Logger returns logger, which may be used for logging arbitrary +// request-specific messages inside RequestHandler. +// +// Each message logged via returned logger contains request-specific information +// such as request id, request duration, local address, remote address, +// request method and request url. +// +// It is safe re-using returned logger for logging multiple messages +// for the current request. +// +// The returned logger is valid until returning from RequestHandler. +func (ctx *RequestCtx) Logger() Logger { + if ctx.logger.ctx == nil { + ctx.logger.ctx = ctx + } + if ctx.logger.logger == nil { + ctx.logger.logger = ctx.s.logger() + } + return &ctx.logger +} + +// TimeoutError sets response status code to StatusRequestTimeout and sets +// body to the given msg. +// +// All response modifications after TimeoutError call are ignored. +// +// TimeoutError MUST be called before returning from RequestHandler if there are +// references to ctx and/or its members in other goroutines remain. +// +// Usage of this function is discouraged. Prefer eliminating ctx references +// from pending goroutines instead of using this function. +func (ctx *RequestCtx) TimeoutError(msg string) { + ctx.TimeoutErrorWithCode(msg, StatusRequestTimeout) +} + +// TimeoutErrorWithCode sets response body to msg and response status +// code to statusCode. +// +// All response modifications after TimeoutErrorWithCode call are ignored. +// +// TimeoutErrorWithCode MUST be called before returning from RequestHandler +// if there are references to ctx and/or its members in other goroutines remain. +// +// Usage of this function is discouraged. Prefer eliminating ctx references +// from pending goroutines instead of using this function. +func (ctx *RequestCtx) TimeoutErrorWithCode(msg string, statusCode int) { + var resp Response + resp.SetStatusCode(statusCode) + resp.SetBodyString(msg) + ctx.TimeoutErrorWithResponse(&resp) +} + +// TimeoutErrorWithResponse marks the ctx as timed out and sends the given +// response to the client. +// +// All ctx modifications after TimeoutErrorWithResponse call are ignored. +// +// TimeoutErrorWithResponse MUST be called before returning from RequestHandler +// if there are references to ctx and/or its members in other goroutines remain. +// +// Usage of this function is discouraged. Prefer eliminating ctx references +// from pending goroutines instead of using this function. +func (ctx *RequestCtx) TimeoutErrorWithResponse(resp *Response) { + respCopy := &Response{} + resp.CopyTo(respCopy) + ctx.timeoutResponse = respCopy +} + +// ListenAndServe serves HTTP requests from the given TCP4 addr. +// +// Pass custom listener to Serve if you need listening on non-TCP4 media +// such as IPv6. +func (s *Server) ListenAndServe(addr string) error { + ln, err := net.Listen("tcp4", addr) + if err != nil { + return err + } + return s.Serve(ln) +} + +// ListenAndServeUNIX serves HTTP requests from the given UNIX addr. +// +// The function deletes existing file at addr before starting serving. +// +// The server sets the given file mode for the UNIX addr. +func (s *Server) ListenAndServeUNIX(addr string, mode os.FileMode) error { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unexpected error when trying to remove unix socket file %q: %s", addr, err) + } + ln, err := net.Listen("unix", addr) + if err != nil { + return err + } + if err = os.Chmod(addr, mode); err != nil { + return fmt.Errorf("cannot chmod %#o for %q: %s", mode, addr, err) + } + return s.Serve(ln) +} + +// ListenAndServeTLS serves HTTPS requests from the given TCP4 addr. +// +// certFile and keyFile are paths to TLS certificate and key files. +// +// Pass custom listener to Serve if you need listening on non-TCP4 media +// such as IPv6. +// +// If the certFile or keyFile has not been provided to the server structure, +// the function will use the previously added TLS configuration. +func (s *Server) ListenAndServeTLS(addr, certFile, keyFile string) error { + ln, err := net.Listen("tcp4", addr) + if err != nil { + return err + } + return s.ServeTLS(ln, certFile, keyFile) +} + +// ListenAndServeTLSEmbed serves HTTPS requests from the given TCP4 addr. +// +// certData and keyData must contain valid TLS certificate and key data. +// +// Pass custom listener to Serve if you need listening on arbitrary media +// such as IPv6. +// +// If the certFile or keyFile has not been provided the server structure, +// the function will use previously added TLS configuration. +func (s *Server) ListenAndServeTLSEmbed(addr string, certData, keyData []byte) error { + ln, err := net.Listen("tcp4", addr) + if err != nil { + return err + } + return s.ServeTLSEmbed(ln, certData, keyData) +} + +// ServeTLS serves HTTPS requests from the given listener. +// +// certFile and keyFile are paths to TLS certificate and key files. +// +// If the certFile or keyFile has not been provided the server structure, +// the function will use previously added TLS configuration. +func (s *Server) ServeTLS(ln net.Listener, certFile, keyFile string) error { + err := s.AppendCert(certFile, keyFile) + if err != nil && err != errNoCertOrKeyProvided { + return err + } + if s.tlsConfig == nil { + return errNoCertOrKeyProvided + } + s.tlsConfig.BuildNameToCertificate() + + return s.Serve( + tls.NewListener(ln, s.tlsConfig), + ) +} + +// ServeTLSEmbed serves HTTPS requests from the given listener. +// +// certData and keyData must contain valid TLS certificate and key data. +// +// If the certFile or keyFile has not been provided the server structure, +// the function will use previously added TLS configuration. +func (s *Server) ServeTLSEmbed(ln net.Listener, certData, keyData []byte) error { + err := s.AppendCertEmbed(certData, keyData) + if err != nil && err != errNoCertOrKeyProvided { + return err + } + if s.tlsConfig == nil { + return errNoCertOrKeyProvided + } + s.tlsConfig.BuildNameToCertificate() + + return s.Serve( + tls.NewListener(ln, s.tlsConfig), + ) +} + +// AppendCert appends certificate and keyfile to TLS Configuration. +// +// This function allows programmer to handle multiple domains +// in one server structure. See examples/multidomain +func (s *Server) AppendCert(certFile, keyFile string) error { + if len(certFile) == 0 && len(keyFile) == 0 { + return errNoCertOrKeyProvided + } + + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return fmt.Errorf("cannot load TLS key pair from certFile=%q and keyFile=%q: %s", certFile, keyFile, err) + } + + if s.tlsConfig == nil { + s.tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + PreferServerCipherSuites: true, + } + return nil + } + + s.tlsConfig.Certificates = append(s.tlsConfig.Certificates, cert) + return nil +} + +// AppendCertEmbed does the same as AppendCert but using in-memory data. +func (s *Server) AppendCertEmbed(certData, keyData []byte) error { + if len(certData) == 0 && len(keyData) == 0 { + return errNoCertOrKeyProvided + } + + cert, err := tls.X509KeyPair(certData, keyData) + if err != nil { + return fmt.Errorf("cannot load TLS key pair from the provided certData(%d) and keyData(%d): %s", + len(certData), len(keyData), err) + } + + if s.tlsConfig == nil { + s.tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + PreferServerCipherSuites: true, + } + return nil + } + + s.tlsConfig.Certificates = append(s.tlsConfig.Certificates, cert) + return nil +} + +// DefaultConcurrency is the maximum number of concurrent connections +// the Server may serve by default (i.e. if Server.Concurrency isn't set). +const DefaultConcurrency = 256 * 1024 + +// Serve serves incoming connections from the given listener. +// +// Serve blocks until the given listener returns permanent error. +func (s *Server) Serve(ln net.Listener) error { + var lastOverflowErrorTime time.Time + var lastPerIPErrorTime time.Time + var c net.Conn + var err error + + maxWorkersCount := s.getConcurrency() + s.concurrencyCh = make(chan struct{}, maxWorkersCount) + wp := &workerPool{ + WorkerFunc: s.serveConn, + MaxWorkersCount: maxWorkersCount, + LogAllErrors: s.LogAllErrors, + Logger: s.logger(), + } + wp.Start() + + for { + if c, err = acceptConn(s, ln, &lastPerIPErrorTime); err != nil { + wp.Stop() + if err == io.EOF { + return nil + } + return err + } + if !wp.Serve(c) { + s.writeFastError(c, StatusServiceUnavailable, + "The connection cannot be served because Server.Concurrency limit exceeded") + c.Close() + if time.Since(lastOverflowErrorTime) > time.Minute { + s.logger().Printf("The incoming connection cannot be served, because %d concurrent connections are served. "+ + "Try increasing Server.Concurrency", maxWorkersCount) + lastOverflowErrorTime = time.Now() + } + + // The current server reached concurrency limit, + // so give other concurrently running servers a chance + // accepting incoming connections on the same address. + // + // There is a hope other servers didn't reach their + // concurrency limits yet :) + time.Sleep(100 * time.Millisecond) + } + c = nil + } +} + +func acceptConn(s *Server, ln net.Listener, lastPerIPErrorTime *time.Time) (net.Conn, error) { + for { + c, err := ln.Accept() + if err != nil { + if c != nil { + panic("BUG: net.Listener returned non-nil conn and non-nil error") + } + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + s.logger().Printf("Temporary error when accepting new connections: %s", netErr) + time.Sleep(time.Second) + continue + } + if err != io.EOF && !strings.Contains(err.Error(), "use of closed network connection") { + s.logger().Printf("Permanent error when accepting new connections: %s", err) + return nil, err + } + return nil, io.EOF + } + if c == nil { + panic("BUG: net.Listener returned (nil, nil)") + } + if s.MaxConnsPerIP > 0 { + pic := wrapPerIPConn(s, c) + if pic == nil { + if time.Since(*lastPerIPErrorTime) > time.Minute { + s.logger().Printf("The number of connections from %s exceeds MaxConnsPerIP=%d", + getConnIP4(c), s.MaxConnsPerIP) + *lastPerIPErrorTime = time.Now() + } + continue + } + c = pic + } + return c, nil + } +} + +func wrapPerIPConn(s *Server, c net.Conn) net.Conn { + ip := getUint32IP(c) + if ip == 0 { + return c + } + n := s.perIPConnCounter.Register(ip) + if n > s.MaxConnsPerIP { + s.perIPConnCounter.Unregister(ip) + s.writeFastError(c, StatusTooManyRequests, "The number of connections from your ip exceeds MaxConnsPerIP") + c.Close() + return nil + } + return acquirePerIPConn(c, ip, &s.perIPConnCounter) +} + +var defaultLogger = Logger(log.New(os.Stderr, "", log.LstdFlags)) + +func (s *Server) logger() Logger { + if s.Logger != nil { + return s.Logger + } + return defaultLogger +} + +var ( + // ErrPerIPConnLimit may be returned from ServeConn if the number of connections + // per ip exceeds Server.MaxConnsPerIP. + ErrPerIPConnLimit = errors.New("too many connections per ip") + + // ErrConcurrencyLimit may be returned from ServeConn if the number + // of concurrenty served connections exceeds Server.Concurrency. + ErrConcurrencyLimit = errors.New("canot serve the connection because Server.Concurrency concurrent connections are served") + + // ErrKeepaliveTimeout is returned from ServeConn + // if the connection lifetime exceeds MaxKeepaliveDuration. + ErrKeepaliveTimeout = errors.New("exceeded MaxKeepaliveDuration") +) + +// ServeConn serves HTTP requests from the given connection. +// +// ServeConn returns nil if all requests from the c are successfully served. +// It returns non-nil error otherwise. +// +// Connection c must immediately propagate all the data passed to Write() +// to the client. Otherwise requests' processing may hang. +// +// ServeConn closes c before returning. +func (s *Server) ServeConn(c net.Conn) error { + if s.MaxConnsPerIP > 0 { + pic := wrapPerIPConn(s, c) + if pic == nil { + return ErrPerIPConnLimit + } + c = pic + } + + n := atomic.AddUint32(&s.concurrency, 1) + if n > uint32(s.getConcurrency()) { + atomic.AddUint32(&s.concurrency, ^uint32(0)) + s.writeFastError(c, StatusServiceUnavailable, "The connection cannot be served because Server.Concurrency limit exceeded") + c.Close() + return ErrConcurrencyLimit + } + + err := s.serveConn(c) + + atomic.AddUint32(&s.concurrency, ^uint32(0)) + + if err != errHijacked { + err1 := c.Close() + if err == nil { + err = err1 + } + } else { + err = nil + } + return err +} + +var errHijacked = errors.New("connection has been hijacked") + +func (s *Server) getConcurrency() int { + n := s.Concurrency + if n <= 0 { + n = DefaultConcurrency + } + return n +} + +var globalConnID uint64 + +func nextConnID() uint64 { + return atomic.AddUint64(&globalConnID, 1) +} + +// DefaultMaxRequestBodySize is the maximum request body size the server +// reads by default. +// +// See Server.MaxRequestBodySize for details. +const DefaultMaxRequestBodySize = 4 * 1024 * 1024 + +func (s *Server) serveConn(c net.Conn) error { + var serverName []byte + if !s.NoDefaultServerHeader { + serverName = s.getServerName() + } + + connRequestNum := uint64(0) + connID := nextConnID() + currentTime := time.Now() + connTime := currentTime + maxRequestBodySize := s.MaxRequestBodySize + if maxRequestBodySize <= 0 { + maxRequestBodySize = DefaultMaxRequestBodySize + } + + ctx := s.acquireCtx(c) + ctx.connTime = connTime + isTLS := ctx.IsTLS() + var ( + br *bufio.Reader + bw *bufio.Writer + + err error + timeoutResponse *Response + hijackHandler HijackHandler + + lastReadDeadlineTime time.Time + lastWriteDeadlineTime time.Time + + connectionClose bool + isHTTP11 bool + ) + for { + connRequestNum++ + ctx.time = currentTime + + if s.ReadTimeout > 0 || s.MaxKeepaliveDuration > 0 { + lastReadDeadlineTime = s.updateReadDeadline(c, ctx, lastReadDeadlineTime) + if lastReadDeadlineTime.IsZero() { + err = ErrKeepaliveTimeout + break + } + } + + if !(s.ReduceMemoryUsage || ctx.lastReadDuration > time.Second) || br != nil { + if br == nil { + br = acquireReader(ctx) + } + } else { + br, err = acquireByteReader(&ctx) + } + ctx.Request.isTLS = isTLS + + if err == nil { + if s.DisableHeaderNamesNormalizing { + ctx.Request.Header.DisableNormalizing() + ctx.Response.Header.DisableNormalizing() + } + // reading Headers and Body + err = ctx.Request.readLimitBody(br, maxRequestBodySize, s.GetOnly) + if br.Buffered() == 0 || err != nil { + releaseReader(s, br) + br = nil + } + } + + currentTime = time.Now() + ctx.lastReadDuration = currentTime.Sub(ctx.time) + + if err != nil { + if err == io.EOF { + err = nil + } else { + bw = writeErrorResponse(bw, ctx, serverName, err) + } + break + } + + // 'Expect: 100-continue' request handling. + // See http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html for details. + if !ctx.Request.Header.ignoreBody() && ctx.Request.MayContinue() { + // Send 'HTTP/1.1 100 Continue' response. + if bw == nil { + bw = acquireWriter(ctx) + } + bw.Write(strResponseContinue) + err = bw.Flush() + releaseWriter(s, bw) + bw = nil + if err != nil { + break + } + + // Read request body. + if br == nil { + br = acquireReader(ctx) + } + err = ctx.Request.ContinueReadBody(br, maxRequestBodySize) + if br.Buffered() == 0 || err != nil { + releaseReader(s, br) + br = nil + } + if err != nil { + bw = writeErrorResponse(bw, ctx, serverName, err) + break + } + } + + connectionClose = s.DisableKeepalive || ctx.Request.Header.connectionCloseFast() + isHTTP11 = ctx.Request.Header.IsHTTP11() + + if serverName != nil { + ctx.Response.Header.SetServerBytes(serverName) + } + ctx.connID = connID + ctx.connRequestNum = connRequestNum + ctx.connTime = connTime + ctx.time = currentTime + s.Handler(ctx) + + timeoutResponse = ctx.timeoutResponse + if timeoutResponse != nil { + ctx = s.acquireCtx(c) + timeoutResponse.CopyTo(&ctx.Response) + if br != nil { + // Close connection, since br may be attached to the old ctx via ctx.fbr. + ctx.SetConnectionClose() + } + } + + if !ctx.IsGet() && ctx.IsHead() { + ctx.Response.SkipBody = true + } + ctx.Request.Reset() + + hijackHandler = ctx.hijackHandler + ctx.hijackHandler = nil + + ctx.userValues.Reset() + + if s.MaxRequestsPerConn > 0 && connRequestNum >= uint64(s.MaxRequestsPerConn) { + ctx.SetConnectionClose() + } + + if s.WriteTimeout > 0 || s.MaxKeepaliveDuration > 0 { + lastWriteDeadlineTime = s.updateWriteDeadline(c, ctx, lastWriteDeadlineTime) + } + + // Verify Request.Header.connectionCloseFast() again, + // since request handler might trigger full headers' parsing. + connectionClose = connectionClose || ctx.Request.Header.connectionCloseFast() || ctx.Response.ConnectionClose() + if connectionClose { + ctx.Response.Header.SetCanonical(strConnection, strClose) + } else if !isHTTP11 { + // Set 'Connection: keep-alive' response header for non-HTTP/1.1 request. + // There is no need in setting this header for http/1.1, since in http/1.1 + // connections are keep-alive by default. + ctx.Response.Header.SetCanonical(strConnection, strKeepAlive) + } + + if serverName != nil && len(ctx.Response.Header.Server()) == 0 { + ctx.Response.Header.SetServerBytes(serverName) + } + + if bw == nil { + bw = acquireWriter(ctx) + } + if err = writeResponse(ctx, bw); err != nil { + break + } + + if br == nil || connectionClose { + err = bw.Flush() + releaseWriter(s, bw) + bw = nil + if err != nil { + break + } + if connectionClose { + break + } + } + + if hijackHandler != nil { + var hjr io.Reader + hjr = c + if br != nil { + hjr = br + br = nil + + // br may point to ctx.fbr, so do not return ctx into pool. + ctx = s.acquireCtx(c) + } + if bw != nil { + err = bw.Flush() + releaseWriter(s, bw) + bw = nil + if err != nil { + break + } + } + c.SetReadDeadline(zeroTime) + c.SetWriteDeadline(zeroTime) + go hijackConnHandler(hjr, c, s, hijackHandler) + hijackHandler = nil + err = errHijacked + break + } + + currentTime = time.Now() + } + + if br != nil { + releaseReader(s, br) + } + if bw != nil { + releaseWriter(s, bw) + } + s.releaseCtx(ctx) + return err +} + +func (s *Server) updateReadDeadline(c net.Conn, ctx *RequestCtx, lastDeadlineTime time.Time) time.Time { + readTimeout := s.ReadTimeout + currentTime := ctx.time + if s.MaxKeepaliveDuration > 0 { + connTimeout := s.MaxKeepaliveDuration - currentTime.Sub(ctx.connTime) + if connTimeout <= 0 { + return zeroTime + } + if connTimeout < readTimeout { + readTimeout = connTimeout + } + } + + // Optimization: update read deadline only if more than 25% + // of the last read deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + if currentTime.Sub(lastDeadlineTime) > (readTimeout >> 2) { + if err := c.SetReadDeadline(currentTime.Add(readTimeout)); err != nil { + panic(fmt.Sprintf("BUG: error in SetReadDeadline(%s): %s", readTimeout, err)) + } + lastDeadlineTime = currentTime + } + return lastDeadlineTime +} + +func (s *Server) updateWriteDeadline(c net.Conn, ctx *RequestCtx, lastDeadlineTime time.Time) time.Time { + writeTimeout := s.WriteTimeout + if s.MaxKeepaliveDuration > 0 { + connTimeout := s.MaxKeepaliveDuration - time.Since(ctx.connTime) + if connTimeout <= 0 { + // MaxKeepAliveDuration exceeded, but let's try sending response anyway + // in 100ms with 'Connection: close' header. + ctx.SetConnectionClose() + connTimeout = 100 * time.Millisecond + } + if connTimeout < writeTimeout { + writeTimeout = connTimeout + } + } + + // Optimization: update write deadline only if more than 25% + // of the last write deadline exceeded. + // See https://github.com/golang/go/issues/15133 for details. + currentTime := time.Now() + if currentTime.Sub(lastDeadlineTime) > (writeTimeout >> 2) { + if err := c.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil { + panic(fmt.Sprintf("BUG: error in SetWriteDeadline(%s): %s", writeTimeout, err)) + } + lastDeadlineTime = currentTime + } + return lastDeadlineTime +} + +func hijackConnHandler(r io.Reader, c net.Conn, s *Server, h HijackHandler) { + hjc := s.acquireHijackConn(r, c) + h(hjc) + + if br, ok := r.(*bufio.Reader); ok { + releaseReader(s, br) + } + c.Close() + s.releaseHijackConn(hjc) +} + +func (s *Server) acquireHijackConn(r io.Reader, c net.Conn) *hijackConn { + v := s.hijackConnPool.Get() + if v == nil { + hjc := &hijackConn{ + Conn: c, + r: r, + } + return hjc + } + hjc := v.(*hijackConn) + hjc.Conn = c + hjc.r = r + return hjc +} + +func (s *Server) releaseHijackConn(hjc *hijackConn) { + hjc.Conn = nil + hjc.r = nil + s.hijackConnPool.Put(hjc) +} + +type hijackConn struct { + net.Conn + r io.Reader +} + +func (c hijackConn) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c hijackConn) Close() error { + // hijacked conn is closed in hijackConnHandler. + return nil +} + +// LastTimeoutErrorResponse returns the last timeout response set +// via TimeoutError* call. +// +// This function is intended for custom server implementations. +func (ctx *RequestCtx) LastTimeoutErrorResponse() *Response { + return ctx.timeoutResponse +} + +func writeResponse(ctx *RequestCtx, w *bufio.Writer) error { + if ctx.timeoutResponse != nil { + panic("BUG: cannot write timed out response") + } + err := ctx.Response.Write(w) + ctx.Response.Reset() + return err +} + +const ( + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 +) + +func acquireByteReader(ctxP **RequestCtx) (*bufio.Reader, error) { + ctx := *ctxP + s := ctx.s + c := ctx.c + t := ctx.time + s.releaseCtx(ctx) + + // Make GC happy, so it could garbage collect ctx + // while we waiting for the next request. + ctx = nil + *ctxP = nil + + v := s.bytePool.Get() + if v == nil { + v = make([]byte, 1) + } + b := v.([]byte) + n, err := c.Read(b) + ch := b[0] + s.bytePool.Put(v) + ctx = s.acquireCtx(c) + ctx.time = t + *ctxP = ctx + if err != nil { + // Treat all errors as EOF on unsuccessful read + // of the first request byte. + return nil, io.EOF + } + if n != 1 { + panic("BUG: Reader must return at least one byte") + } + + ctx.fbr.c = c + ctx.fbr.ch = ch + ctx.fbr.byteRead = false + r := acquireReader(ctx) + r.Reset(&ctx.fbr) + return r, nil +} + +func acquireReader(ctx *RequestCtx) *bufio.Reader { + v := ctx.s.readerPool.Get() + if v == nil { + n := ctx.s.ReadBufferSize + if n <= 0 { + n = defaultReadBufferSize + } + return bufio.NewReaderSize(ctx.c, n) + } + r := v.(*bufio.Reader) + r.Reset(ctx.c) + return r +} + +func releaseReader(s *Server, r *bufio.Reader) { + s.readerPool.Put(r) +} + +func acquireWriter(ctx *RequestCtx) *bufio.Writer { + v := ctx.s.writerPool.Get() + if v == nil { + n := ctx.s.WriteBufferSize + if n <= 0 { + n = defaultWriteBufferSize + } + return bufio.NewWriterSize(ctx.c, n) + } + w := v.(*bufio.Writer) + w.Reset(ctx.c) + return w +} + +func releaseWriter(s *Server, w *bufio.Writer) { + s.writerPool.Put(w) +} + +func (s *Server) acquireCtx(c net.Conn) (ctx *RequestCtx) { + v := s.ctxPool.Get() + if v == nil { + ctx = &RequestCtx{ + s: s, + } + keepBodyBuffer := !s.ReduceMemoryUsage + ctx.Request.keepBodyBuffer = keepBodyBuffer + ctx.Response.keepBodyBuffer = keepBodyBuffer + } else { + ctx = v.(*RequestCtx) + } + ctx.c = c + return +} + +// Init2 prepares ctx for passing to RequestHandler. +// +// conn is used only for determining local and remote addresses. +// +// This function is intended for custom Server implementations. +// See https://github.com/valyala/httpteleport for details. +func (ctx *RequestCtx) Init2(conn net.Conn, logger Logger, reduceMemoryUsage bool) { + ctx.c = conn + ctx.logger.logger = logger + ctx.connID = nextConnID() + ctx.s = fakeServer + ctx.connRequestNum = 0 + ctx.connTime = time.Now() + ctx.time = ctx.connTime + + keepBodyBuffer := !reduceMemoryUsage + ctx.Request.keepBodyBuffer = keepBodyBuffer + ctx.Response.keepBodyBuffer = keepBodyBuffer +} + +// Init prepares ctx for passing to RequestHandler. +// +// remoteAddr and logger are optional. They are used by RequestCtx.Logger(). +// +// This function is intended for custom Server implementations. +func (ctx *RequestCtx) Init(req *Request, remoteAddr net.Addr, logger Logger) { + if remoteAddr == nil { + remoteAddr = zeroTCPAddr + } + c := &fakeAddrer{ + laddr: zeroTCPAddr, + raddr: remoteAddr, + } + if logger == nil { + logger = defaultLogger + } + ctx.Init2(c, logger, true) + req.CopyTo(&ctx.Request) +} + +var fakeServer = &Server{ + // Initialize concurrencyCh for TimeoutHandler + concurrencyCh: make(chan struct{}, DefaultConcurrency), +} + +type fakeAddrer struct { + net.Conn + laddr net.Addr + raddr net.Addr +} + +func (fa *fakeAddrer) RemoteAddr() net.Addr { + return fa.raddr +} + +func (fa *fakeAddrer) LocalAddr() net.Addr { + return fa.laddr +} + +func (fa *fakeAddrer) Read(p []byte) (int, error) { + panic("BUG: unexpected Read call") +} + +func (fa *fakeAddrer) Write(p []byte) (int, error) { + panic("BUG: unexpected Write call") +} + +func (fa *fakeAddrer) Close() error { + panic("BUG: unexpected Close call") +} + +func (s *Server) releaseCtx(ctx *RequestCtx) { + if ctx.timeoutResponse != nil { + panic("BUG: cannot release timed out RequestCtx") + } + ctx.c = nil + ctx.fbr.c = nil + s.ctxPool.Put(ctx) +} + +func (s *Server) getServerName() []byte { + v := s.serverName.Load() + var serverName []byte + if v == nil { + serverName = []byte(s.Name) + if len(serverName) == 0 { + serverName = defaultServerName + } + s.serverName.Store(serverName) + } else { + serverName = v.([]byte) + } + return serverName +} + +func (s *Server) writeFastError(w io.Writer, statusCode int, msg string) { + w.Write(statusLine(statusCode)) + + server := "" + if !s.NoDefaultServerHeader { + server = fmt.Sprintf("Server: %s\r\n", s.getServerName()) + } + + fmt.Fprintf(w, "Connection: close\r\n"+ + server+ + "Date: %s\r\n"+ + "Content-Type: text/plain\r\n"+ + "Content-Length: %d\r\n"+ + "\r\n"+ + "%s", + serverDate.Load(), len(msg), msg) +} + +func writeErrorResponse(bw *bufio.Writer, ctx *RequestCtx, serverName []byte, + err error) *bufio.Writer { + if _, ok := err.(*ErrSmallBuffer); ok { + ctx.Error("Too big request header", StatusRequestHeaderFieldsTooLarge) + } else { + ctx.Error("Error when parsing request", StatusBadRequest) + } + if serverName != nil { + ctx.Response.Header.SetServerBytes(serverName) + } + + ctx.SetConnectionClose() + if bw == nil { + bw = acquireWriter(ctx) + } + writeResponse(ctx, bw) + bw.Flush() + return bw +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/server_example_test.go b/vendor/github.com/erikdubbelboer/fasthttp/server_example_test.go new file mode 100644 index 0000000..16c93ae --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/server_example_test.go @@ -0,0 +1,177 @@ +package fasthttp_test + +import ( + "fmt" + "log" + "math/rand" + "net" + "time" + + "github.com/erikdubbelboer/fasthttp" +) + +func ExampleListenAndServe() { + // The server will listen for incoming requests on this address. + listenAddr := "127.0.0.1:80" + + // This function will be called by the server for each incoming request. + // + // RequestCtx provides a lot of functionality related to http request + // processing. See RequestCtx docs for details. + requestHandler := func(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "Hello, world! Requested path is %q", ctx.Path()) + } + + // Start the server with default settings. + // Create Server instance for adjusting server settings. + // + // ListenAndServe returns only on error, so usually it blocks forever. + if err := fasthttp.ListenAndServe(listenAddr, requestHandler); err != nil { + log.Fatalf("error in ListenAndServe: %s", err) + } +} + +func ExampleServe() { + // Create network listener for accepting incoming requests. + // + // Note that you are not limited by TCP listener - arbitrary + // net.Listener may be used by the server. + // For example, unix socket listener or TLS listener. + ln, err := net.Listen("tcp4", "127.0.0.1:8080") + if err != nil { + log.Fatalf("error in net.Listen: %s", err) + } + + // This function will be called by the server for each incoming request. + // + // RequestCtx provides a lot of functionality related to http request + // processing. See RequestCtx docs for details. + requestHandler := func(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "Hello, world! Requested path is %q", ctx.Path()) + } + + // Start the server with default settings. + // Create Server instance for adjusting server settings. + // + // Serve returns on ln.Close() or error, so usually it blocks forever. + if err := fasthttp.Serve(ln, requestHandler); err != nil { + log.Fatalf("error in Serve: %s", err) + } +} + +func ExampleServer() { + // This function will be called by the server for each incoming request. + // + // RequestCtx provides a lot of functionality related to http request + // processing. See RequestCtx docs for details. + requestHandler := func(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "Hello, world! Requested path is %q", ctx.Path()) + } + + // Create custom server. + s := &fasthttp.Server{ + Handler: requestHandler, + + // Every response will contain 'Server: My super server' header. + Name: "My super server", + + // Other Server settings may be set here. + } + + // Start the server listening for incoming requests on the given address. + // + // ListenAndServe returns only on error, so usually it blocks forever. + if err := s.ListenAndServe("127.0.0.1:80"); err != nil { + log.Fatalf("error in ListenAndServe: %s", err) + } +} + +func ExampleRequestCtx_Hijack() { + // hijackHandler is called on hijacked connection. + hijackHandler := func(c net.Conn) { + fmt.Fprintf(c, "This message is sent over a hijacked connection to the client %s\n", c.RemoteAddr()) + fmt.Fprintf(c, "Send me something and I'll echo it to you\n") + var buf [1]byte + for { + if _, err := c.Read(buf[:]); err != nil { + log.Printf("error when reading from hijacked connection: %s", err) + return + } + fmt.Fprintf(c, "You sent me %q. Waiting for new data\n", buf[:]) + } + } + + // requestHandler is called for each incoming request. + requestHandler := func(ctx *fasthttp.RequestCtx) { + path := ctx.Path() + switch { + case string(path) == "/hijack": + // Note that the connection is hijacked only after + // returning from requestHandler and sending http response. + ctx.Hijack(hijackHandler) + + // The connection will be hijacked after sending this response. + fmt.Fprintf(ctx, "Hijacked the connection!") + case string(path) == "/": + fmt.Fprintf(ctx, "Root directory requested") + default: + fmt.Fprintf(ctx, "Requested path is %q", path) + } + } + + if err := fasthttp.ListenAndServe(":80", requestHandler); err != nil { + log.Fatalf("error in ListenAndServe: %s", err) + } +} + +func ExampleRequestCtx_TimeoutError() { + requestHandler := func(ctx *fasthttp.RequestCtx) { + // Emulate long-running task, which touches ctx. + doneCh := make(chan struct{}) + go func() { + workDuration := time.Millisecond * time.Duration(rand.Intn(2000)) + time.Sleep(workDuration) + + fmt.Fprintf(ctx, "ctx has been accessed by long-running task\n") + fmt.Fprintf(ctx, "The reuqestHandler may be finished by this time.\n") + + close(doneCh) + }() + + select { + case <-doneCh: + fmt.Fprintf(ctx, "The task has been finished in less than a second") + case <-time.After(time.Second): + // Since the long-running task is still running and may access ctx, + // we must call TimeoutError before returning from requestHandler. + // + // Otherwise the program will suffer from data races. + ctx.TimeoutError("Timeout!") + } + } + + if err := fasthttp.ListenAndServe(":80", requestHandler); err != nil { + log.Fatalf("error in ListenAndServe: %s", err) + } +} + +func ExampleRequestCtx_Logger() { + requestHandler := func(ctx *fasthttp.RequestCtx) { + if string(ctx.Path()) == "/top-secret" { + ctx.Logger().Printf("Alarm! Alien intrusion detected!") + ctx.Error("Access denied!", fasthttp.StatusForbidden) + return + } + + // Logger may be cached in local variables. + logger := ctx.Logger() + + logger.Printf("Good request from User-Agent %q", ctx.Request.Header.UserAgent()) + fmt.Fprintf(ctx, "Good request to %q", ctx.Path()) + logger.Printf("Multiple log messages may be written during a single request") + } + + if err := fasthttp.ListenAndServe(":80", requestHandler); err != nil { + log.Fatalf("error in ListenAndServe: %s", err) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/server_test.go b/vendor/github.com/erikdubbelboer/fasthttp/server_test.go new file mode 100644 index 0000000..779149b --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/server_test.go @@ -0,0 +1,2666 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/erikdubbelboer/fasthttp/fasthttputil" +) + +func TestRequestCtxString(t *testing.T) { + var ctx RequestCtx + + s := ctx.String() + expectedS := "#0000000000000000 - 0.0.0.0:0<->0.0.0.0:0 - GET http:///" + if s != expectedS { + t.Fatalf("unexpected ctx.String: %q. Expecting %q", s, expectedS) + } + + ctx.Request.SetRequestURI("https://foobar.com/aaa?bb=c") + s = ctx.String() + expectedS = "#0000000000000000 - 0.0.0.0:0<->0.0.0.0:0 - GET https://foobar.com/aaa?bb=c" + if s != expectedS { + t.Fatalf("unexpected ctx.String: %q. Expecting %q", s, expectedS) + } +} + +func TestServerErrSmallBufferLogged(t *testing.T) { + logger := &customLogger{} + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.WriteString("shouldn't be never called") + }, + ReadBufferSize: 20, + Logger: logger, + LogAllErrors: true, + } + + testServerErrSmallBuffer(t, s, logger) + + expectedErr := errSmallBuffer.Error() + if !strings.Contains(logger.out, expectedErr) { + t.Fatalf("unexpected log output: %q. Expecting %q", logger.out, expectedErr) + } +} + +func TestServerErrSmallBufferNotLogged(t *testing.T) { + logger := &customLogger{} + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.WriteString("shouldn't be never called") + }, + ReadBufferSize: 20, + Logger: logger, + } + + testServerErrSmallBuffer(t, s, logger) + + if len(logger.out) > 0 { + t.Fatalf("unexpected log output: %q. Expecting no output", logger.out) + } +} + +func testServerErrSmallBuffer(t *testing.T, s *Server, logger *customLogger) { + ln := fasthttputil.NewInmemoryListener() + + serverCh := make(chan error, 1) + go func() { + err := s.Serve(ln) + serverCh <- err + }() + + clientCh := make(chan error, 1) + go func() { + c, err := ln.Dial() + if err != nil { + clientCh <- fmt.Errorf("unexpected error: %s", err) + return + } + _, err = c.Write([]byte("GET / HTTP/1.1\r\nHost: aabb.com\r\nVERY-long-Header: sdfdfsd dsf dsaf dsf df fsd\r\n\r\n")) + if err != nil { + clientCh <- fmt.Errorf("unexpected error when sending request: %s", err) + return + } + br := bufio.NewReader(c) + var resp Response + if err = resp.Read(br); err != nil { + clientCh <- fmt.Errorf("unexpected error: %s", err) + return + } + statusCode := resp.StatusCode() + if statusCode != StatusRequestHeaderFieldsTooLarge { + clientCh <- fmt.Errorf("unexpected status code: %d. Expecting %d", statusCode, StatusRequestHeaderFieldsTooLarge) + return + } + if !resp.ConnectionClose() { + clientCh <- fmt.Errorf("missing 'Connection: close' response header") + return + } + clientCh <- nil + }() + + var err error + + // wait for the client + select { + case <-time.After(time.Second): + t.Fatalf("timeout when waiting for the client. Server log: %q", logger.out) + case err = <-clientCh: + if err != nil { + t.Fatalf("unexpected client error: %s. Server log: %q", err, logger.out) + } + } + + // wait for the server + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s. Server log: %q", err, logger.out) + } + select { + case <-time.After(time.Second): + t.Fatalf("timeout when waiting for the server. Server log: %q", logger.out) + case err = <-serverCh: + if err != nil { + t.Fatalf("unexpected server error: %s. Server log: %q", err, logger.out) + } + } +} + +func TestRequestCtxIsTLS(t *testing.T) { + var ctx RequestCtx + + // tls.Conn + ctx.c = &tls.Conn{} + if !ctx.IsTLS() { + t.Fatalf("IsTLS must return true") + } + + // non-tls.Conn + ctx.c = &readWriter{} + if ctx.IsTLS() { + t.Fatalf("IsTLS must return false") + } + + // overridden tls.Conn + ctx.c = &struct { + *tls.Conn + fooBar bool + }{} + if !ctx.IsTLS() { + t.Fatalf("IsTLS must return true") + } +} + +func TestRequestCtxConn(t *testing.T) { + var ctx RequestCtx + ctx.c = &struct { + *tls.Conn + value bool + }{value: true} + + tc, ok := ctx.Conn().(*struct { + *tls.Conn + value bool + }) + + if !ok || !tc.value { + t.Fatalf("Conn must return underlying connection") + } +} + +func TestRequestCtxRedirectHTTPSSchemeless(t *testing.T) { + var ctx RequestCtx + + s := "GET /foo/bar?baz HTTP/1.1\nHost: aaa.com\n\n" + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := ctx.Request.Read(br); err != nil { + t.Fatalf("cannot read request: %s", err) + } + ctx.Request.isTLS = true + + ctx.Redirect("//foobar.com/aa/bbb", StatusFound) + location := ctx.Response.Header.Peek("Location") + expectedLocation := "https://foobar.com/aa/bbb" + if string(location) != expectedLocation { + t.Fatalf("Unexpected location: %q. Expecting %q", location, expectedLocation) + } +} + +func TestRequestCtxRedirect(t *testing.T) { + testRequestCtxRedirect(t, "http://qqq/", "", "http://qqq/") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "", "http://qqq/foo/bar?baz=111") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "#aaa", "http://qqq/foo/bar?baz=111#aaa") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "?abc=de&f", "http://qqq/foo/bar?abc=de&f") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "?abc=de&f#sf", "http://qqq/foo/bar?abc=de&f#sf") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "x.html", "http://qqq/foo/x.html") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "x.html?a=1", "http://qqq/foo/x.html?a=1") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "x.html#aaa=bbb&cc=ddd", "http://qqq/foo/x.html#aaa=bbb&cc=ddd") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "x.html?b=1#aaa=bbb&cc=ddd", "http://qqq/foo/x.html?b=1#aaa=bbb&cc=ddd") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "/x.html", "http://qqq/x.html") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "/x.html#aaa=bbb&cc=ddd", "http://qqq/x.html#aaa=bbb&cc=ddd") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "../x.html", "http://qqq/x.html") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "../../x.html", "http://qqq/x.html") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "./.././../x.html", "http://qqq/x.html") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "http://foo.bar/baz", "http://foo.bar/baz") + testRequestCtxRedirect(t, "http://qqq/foo/bar?baz=111", "https://foo.bar/baz", "https://foo.bar/baz") + testRequestCtxRedirect(t, "https://foo.com/bar?aaa", "//google.com/aaa?bb", "https://google.com/aaa?bb") +} + +func testRequestCtxRedirect(t *testing.T, origURL, redirectURL, expectedURL string) { + var ctx RequestCtx + var req Request + req.SetRequestURI(origURL) + ctx.Init(&req, nil, nil) + + ctx.Redirect(redirectURL, StatusFound) + loc := ctx.Response.Header.Peek("Location") + if string(loc) != expectedURL { + t.Fatalf("unexpected redirect url %q. Expecting %q. origURL=%q, redirectURL=%q", loc, expectedURL, origURL, redirectURL) + } +} + +func TestServerResponseServerHeader(t *testing.T) { + serverName := "foobar serv" + + s := &Server{ + Handler: func(ctx *RequestCtx) { + name := ctx.Response.Header.Server() + if string(name) != serverName { + fmt.Fprintf(ctx, "unexpected server name: %q. Expecting %q", name, serverName) + } else { + ctx.WriteString("OK") + } + + // make sure the server name is sent to the client after ctx.Response.Reset() + ctx.NotFound() + }, + Name: serverName, + } + + ln := fasthttputil.NewInmemoryListener() + + serverCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverCh) + }() + + clientCh := make(chan struct{}) + go func() { + c, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if _, err = c.Write([]byte("GET / HTTP/1.1\r\nHost: aa\r\n\r\n")); err != nil { + t.Fatalf("unexpected error: %s", err) + } + br := bufio.NewReader(c) + var resp Response + if err = resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if resp.StatusCode() != StatusNotFound { + t.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusNotFound) + } + if string(resp.Body()) != "404 Page not found" { + t.Fatalf("unexpected body: %q. Expecting %q", resp.Body(), "404 Page not found") + } + if string(resp.Header.Server()) != serverName { + t.Fatalf("unexpected server header: %q. Expecting %q", resp.Header.Server(), serverName) + } + if err = c.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(clientCh) + }() + + select { + case <-clientCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case <-serverCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestServerResponseBodyStream(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + + readyCh := make(chan struct{}) + h := func(ctx *RequestCtx) { + ctx.SetConnectionClose() + if ctx.IsBodyStream() { + t.Fatalf("IsBodyStream must return false") + } + ctx.SetBodyStreamWriter(func(w *bufio.Writer) { + fmt.Fprintf(w, "first") + if err := w.Flush(); err != nil { + return + } + <-readyCh + fmt.Fprintf(w, "second") + // there is no need to flush w here, since it will + // be flushed automatically after returning from StreamWriter. + }) + if !ctx.IsBodyStream() { + t.Fatalf("IsBodyStream must return true") + } + } + + serverCh := make(chan struct{}) + go func() { + if err := Serve(ln, h); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverCh) + }() + + clientCh := make(chan struct{}) + go func() { + c, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if _, err = c.Write([]byte("GET / HTTP/1.1\r\nHost: aa\r\n\r\n")); err != nil { + t.Fatalf("unexpected error: %s", err) + } + br := bufio.NewReader(c) + var respH ResponseHeader + if err = respH.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if respH.StatusCode() != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", respH.StatusCode(), StatusOK) + } + + buf := make([]byte, 1024) + n, err := br.Read(buf) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + b := buf[:n] + if string(b) != "5\r\nfirst\r\n" { + t.Fatalf("unexpected result %q. Expecting %q", b, "5\r\nfirst\r\n") + } + close(readyCh) + + tail, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(tail) != "6\r\nsecond\r\n0\r\n\r\n" { + t.Fatalf("unexpected tail %q. Expecting %q", tail, "6\r\nsecond\r\n0\r\n\r\n") + } + + close(clientCh) + }() + + select { + case <-clientCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case <-serverCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestServerDisableKeepalive(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.WriteString("OK") + }, + DisableKeepalive: true, + } + + ln := fasthttputil.NewInmemoryListener() + + serverCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverCh) + }() + + clientCh := make(chan struct{}) + go func() { + c, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if _, err = c.Write([]byte("GET / HTTP/1.1\r\nHost: aa\r\n\r\n")); err != nil { + t.Fatalf("unexpected error: %s", err) + } + br := bufio.NewReader(c) + var resp Response + if err = resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode() != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusOK) + } + if !resp.ConnectionClose() { + t.Fatalf("expecting 'Connection: close' response header") + } + if string(resp.Body()) != "OK" { + t.Fatalf("unexpected body: %q. Expecting %q", resp.Body(), "OK") + } + + // make sure the connection is closed + data, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if len(data) > 0 { + t.Fatalf("unexpected data read from the connection: %q. Expecting empty data", data) + } + + close(clientCh) + }() + + select { + case <-clientCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case <-serverCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestServerMaxConnsPerIPLimit(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.WriteString("OK") + }, + MaxConnsPerIP: 1, + Logger: &customLogger{}, + } + + ln := fasthttputil.NewInmemoryListener() + + serverCh := make(chan struct{}) + go func() { + fakeLN := &fakeIPListener{ + Listener: ln, + } + if err := s.Serve(fakeLN); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverCh) + }() + + clientCh := make(chan struct{}) + go func() { + c1, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + c2, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + br := bufio.NewReader(c2) + var resp Response + if err = resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode() != StatusTooManyRequests { + t.Fatalf("unexpected status code for the second connection: %d. Expecting %d", + resp.StatusCode(), StatusTooManyRequests) + } + + if _, err = c1.Write([]byte("GET / HTTP/1.1\r\nHost: aa\r\n\r\n")); err != nil { + t.Fatalf("unexpected error when writing to the first connection: %s", err) + } + br = bufio.NewReader(c1) + if err = resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode() != StatusOK { + t.Fatalf("unexpected status code for the first connection: %d. Expecting %d", + resp.StatusCode(), StatusOK) + } + if string(resp.Body()) != "OK" { + t.Fatalf("unexpected body for the first connection: %q. Expecting %q", resp.Body(), "OK") + } + close(clientCh) + }() + + select { + case <-clientCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case <-serverCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +type fakeIPListener struct { + net.Listener +} + +func (ln *fakeIPListener) Accept() (net.Conn, error) { + conn, err := ln.Listener.Accept() + if err != nil { + return nil, err + } + return &fakeIPConn{ + Conn: conn, + }, nil +} + +type fakeIPConn struct { + net.Conn +} + +func (conn *fakeIPConn) RemoteAddr() net.Addr { + addr, err := net.ResolveTCPAddr("tcp4", "1.2.3.4:5789") + if err != nil { + panic(fmt.Sprintf("BUG: unexpected error: %s", err)) + } + return addr +} + +func TestServerConcurrencyLimit(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.WriteString("OK") + }, + Concurrency: 1, + Logger: &customLogger{}, + } + + ln := fasthttputil.NewInmemoryListener() + + serverCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(serverCh) + }() + + clientCh := make(chan struct{}) + go func() { + c1, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + c2, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + br := bufio.NewReader(c2) + var resp Response + if err = resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode() != StatusServiceUnavailable { + t.Fatalf("unexpected status code for the second connection: %d. Expecting %d", + resp.StatusCode(), StatusServiceUnavailable) + } + + if _, err = c1.Write([]byte("GET / HTTP/1.1\r\nHost: aa\r\n\r\n")); err != nil { + t.Fatalf("unexpected error when writing to the first connection: %s", err) + } + br = bufio.NewReader(c1) + if err = resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode() != StatusOK { + t.Fatalf("unexpected status code for the first connection: %d. Expecting %d", + resp.StatusCode(), StatusOK) + } + if string(resp.Body()) != "OK" { + t.Fatalf("unexpected body for the first connection: %q. Expecting %q", resp.Body(), "OK") + } + close(clientCh) + }() + + select { + case <-clientCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case <-serverCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestServerWriteFastError(t *testing.T) { + s := &Server{ + Name: "foobar", + } + var buf bytes.Buffer + expectedBody := "access denied" + s.writeFastError(&buf, StatusForbidden, expectedBody) + + br := bufio.NewReader(&buf) + var resp Response + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if resp.StatusCode() != StatusForbidden { + t.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusForbidden) + } + body := resp.Body() + if string(body) != expectedBody { + t.Fatalf("unexpected body: %q. Expecting %q", body, expectedBody) + } + server := string(resp.Header.Server()) + if server != s.Name { + t.Fatalf("unexpected server: %q. Expecting %q", server, s.Name) + } + contentType := string(resp.Header.ContentType()) + if contentType != "text/plain" { + t.Fatalf("unexpected content-type: %q. Expecting %q", contentType, "text/plain") + } + if !resp.Header.ConnectionClose() { + t.Fatalf("expecting 'Connection: close' response header") + } +} + +func TestServerTLS(t *testing.T) { + text := []byte("Make fasthttp great again") + ln := fasthttputil.NewInmemoryListener() + + certFile := "./ssl-cert-snakeoil.pem" + keyFile := "./ssl-cert-snakeoil.key" + + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.Write(text) + }, + } + + err := s.AppendCert(certFile, keyFile) + if err != nil { + t.Fatal(err) + } + go func() { + err = s.ServeTLS(ln, "", "") + if err != nil { + t.Fatal(err) + } + }() + + c := &Client{ + ReadTimeout: time.Second * 2, + Dial: func(addr string) (net.Conn, error) { + return ln.Dial() + }, + TLSConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + + req, res := AcquireRequest(), AcquireResponse() + req.SetRequestURI("https://some.url") + + err = c.Do(req, res) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(text, res.Body()) { + t.Fatal("error transmitting information") + } +} + +func TestServerServeTLSEmbed(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + + certFile := "./ssl-cert-snakeoil.pem" + keyFile := "./ssl-cert-snakeoil.key" + + certData, err := ioutil.ReadFile(certFile) + if err != nil { + t.Fatalf("unexpected error when reading %q: %s", certFile, err) + } + keyData, err := ioutil.ReadFile(keyFile) + if err != nil { + t.Fatalf("unexpected error when reading %q: %s", keyFile, err) + } + + // start the server + ch := make(chan struct{}) + go func() { + err := ServeTLSEmbed(ln, certData, keyData, func(ctx *RequestCtx) { + if !ctx.IsTLS() { + ctx.Error("expecting tls", StatusBadRequest) + return + } + scheme := ctx.URI().Scheme() + if string(scheme) != "https" { + ctx.Error(fmt.Sprintf("unexpected scheme=%q. Expecting %q", scheme, "https"), StatusBadRequest) + return + } + ctx.WriteString("success") + }) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(ch) + }() + + // establish connection to the server + conn, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + tlsConn := tls.Client(conn, &tls.Config{ + InsecureSkipVerify: true, + }) + + // send request + if _, err = tlsConn.Write([]byte("GET / HTTP/1.1\r\nHost: aaa\r\n\r\n")); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // read response + respCh := make(chan struct{}) + go func() { + br := bufio.NewReader(tlsConn) + var resp Response + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error") + } + body := resp.Body() + if string(body) != "success" { + t.Fatalf("unexpected response body %q. Expecting %q", body, "success") + } + close(respCh) + }() + select { + case <-respCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + // close the server + if err = ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestServerMultipartFormDataRequest(t *testing.T) { + reqS := `POST /upload HTTP/1.1 +Host: qwerty.com +Content-Length: 521 +Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryJwfATyF8tmxSJnLg + +------WebKitFormBoundaryJwfATyF8tmxSJnLg +Content-Disposition: form-data; name="f1" + +value1 +------WebKitFormBoundaryJwfATyF8tmxSJnLg +Content-Disposition: form-data; name="fileaaa"; filename="TODO" +Content-Type: application/octet-stream + +- SessionClient with referer and cookies support. +- Client with requests' pipelining support. +- ProxyHandler similar to FSHandler. +- WebSockets. See https://tools.ietf.org/html/rfc6455 . +- HTTP/2.0. See https://tools.ietf.org/html/rfc7540 . + +------WebKitFormBoundaryJwfATyF8tmxSJnLg-- + +GET / HTTP/1.1 +Host: asbd +Connection: close + +` + + ln := fasthttputil.NewInmemoryListener() + + s := &Server{ + Handler: func(ctx *RequestCtx) { + switch string(ctx.Path()) { + case "/upload": + f, err := ctx.MultipartForm() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if len(f.Value) != 1 { + t.Fatalf("unexpected values %d. Expecting %d", len(f.Value), 1) + } + if len(f.File) != 1 { + t.Fatalf("unexpected file values %d. Expecting %d", len(f.File), 1) + } + fv := ctx.FormValue("f1") + if string(fv) != "value1" { + t.Fatalf("unexpected form value: %q. Expecting %q", fv, "value1") + } + ctx.Redirect("/", StatusSeeOther) + default: + ctx.WriteString("non-upload") + } + }, + } + + ch := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(ch) + }() + + conn, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if _, err = conn.Write([]byte(reqS)); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + var resp Response + br := bufio.NewReader(conn) + respCh := make(chan struct{}) + go func() { + if err := resp.Read(br); err != nil { + t.Fatalf("error when reading response: %s", err) + } + if resp.StatusCode() != StatusSeeOther { + t.Fatalf("unexpected status code %d. Expecting %d", resp.StatusCode(), StatusSeeOther) + } + loc := resp.Header.Peek("Location") + if string(loc) != "http://qwerty.com/" { + t.Fatalf("unexpected location %q. Expecting %q", loc, "http://qwerty.com/") + } + + if err := resp.Read(br); err != nil { + t.Fatalf("error when reading the second response: %s", err) + } + if resp.StatusCode() != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusOK) + } + body := resp.Body() + if string(body) != "non-upload" { + t.Fatalf("unexpected body %q. Expecting %q", body, "non-upload") + } + close(respCh) + }() + + select { + case <-respCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + if err := ln.Close(); err != nil { + t.Fatalf("error when closing listener: %s", err) + } + + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout when waiting for the server to stop") + } +} + +func TestServerGetWithContent(t *testing.T) { + h := func(ctx *RequestCtx) { + ctx.Success("foo/bar", []byte("success")) + } + s := &Server{ + Handler: h, + } + + rw := &readWriter{} + rw.r.WriteString("GET / HTTP/1.1\r\nHost: mm.com\r\nContent-Length: 5\r\n\r\nabcde") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s.", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + resp := rw.w.String() + if !strings.HasSuffix(resp, "success") { + t.Fatalf("unexpected response %s.", resp) + } +} + +func TestServerDisableHeaderNamesNormalizing(t *testing.T) { + headerName := "CASE-senSITive-HEAder-NAME" + headerNameLower := strings.ToLower(headerName) + headerValue := "foobar baz" + s := &Server{ + Handler: func(ctx *RequestCtx) { + hv := ctx.Request.Header.Peek(headerName) + if string(hv) != headerValue { + t.Fatalf("unexpected header value for %q: %q. Expecting %q", headerName, hv, headerValue) + } + hv = ctx.Request.Header.Peek(headerNameLower) + if len(hv) > 0 { + t.Fatalf("unexpected header value for %q: %q. Expecting empty value", headerNameLower, hv) + } + ctx.Response.Header.Set(headerName, headerValue) + ctx.WriteString("ok") + ctx.SetContentType("aaa") + }, + DisableHeaderNamesNormalizing: true, + } + + rw := &readWriter{} + rw.r.WriteString(fmt.Sprintf("GET / HTTP/1.1\r\n%s: %s\r\nHost: google.com\r\n\r\n", headerName, headerValue)) + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + var resp Response + resp.Header.DisableNormalizing() + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + hv := resp.Header.Peek(headerName) + if string(hv) != headerValue { + t.Fatalf("unexpected header value for %q: %q. Expecting %q", headerName, hv, headerValue) + } + hv = resp.Header.Peek(headerNameLower) + if len(hv) > 0 { + t.Fatalf("unexpected header value for %q: %q. Expecting empty value", headerNameLower, hv) + } +} + +func TestServerReduceMemoryUsageSerial(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + + s := &Server{ + Handler: func(ctx *RequestCtx) {}, + ReduceMemoryUsage: true, + } + + ch := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(ch) + }() + + testServerRequests(t, ln) + + if err := ln.Close(); err != nil { + t.Fatalf("error when closing listener: %s", err) + } + + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout when waiting for the server to stop") + } +} + +func TestServerReduceMemoryUsageConcurrent(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + + s := &Server{ + Handler: func(ctx *RequestCtx) {}, + ReduceMemoryUsage: true, + } + + ch := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(ch) + }() + + gCh := make(chan struct{}) + for i := 0; i < 10; i++ { + go func() { + testServerRequests(t, ln) + gCh <- struct{}{} + }() + } + for i := 0; i < 10; i++ { + select { + case <-gCh: + case <-time.After(time.Second): + t.Fatalf("timeout on goroutine %d", i) + } + } + + if err := ln.Close(); err != nil { + t.Fatalf("error when closing listener: %s", err) + } + + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout when waiting for the server to stop") + } +} + +func testServerRequests(t *testing.T, ln *fasthttputil.InmemoryListener) { + conn, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + br := bufio.NewReader(conn) + var resp Response + for i := 0; i < 10; i++ { + if _, err = fmt.Fprintf(conn, "GET / HTTP/1.1\r\nHost: aaa\r\n\r\n"); err != nil { + t.Fatalf("unexpected error on iteration %d: %s", i, err) + } + + respCh := make(chan struct{}) + go func() { + if err = resp.Read(br); err != nil { + t.Fatalf("unexpected error when reading response on iteration %d: %s", i, err) + } + close(respCh) + }() + select { + case <-respCh: + case <-time.After(time.Second): + t.Fatalf("timeout on iteration %d", i) + } + } + + if err = conn.Close(); err != nil { + t.Fatalf("error when closing the connection: %s", err) + } +} + +func TestServerHTTP10ConnectionKeepAlive(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + + ch := make(chan struct{}) + go func() { + err := Serve(ln, func(ctx *RequestCtx) { + if string(ctx.Path()) == "/close" { + ctx.SetConnectionClose() + } + }) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(ch) + }() + + conn, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + _, err = fmt.Fprintf(conn, "%s", "GET / HTTP/1.0\r\nHost: aaa\r\nConnection: keep-alive\r\n\r\n") + if err != nil { + t.Fatalf("error when writing request: %s", err) + } + _, err = fmt.Fprintf(conn, "%s", "GET /close HTTP/1.0\r\nHost: aaa\r\nConnection: keep-alive\r\n\r\n") + if err != nil { + t.Fatalf("error when writing request: %s", err) + } + + br := bufio.NewReader(conn) + var resp Response + if err = resp.Read(br); err != nil { + t.Fatalf("error when reading response: %s", err) + } + if resp.ConnectionClose() { + t.Fatalf("response mustn't have 'Connection: close' header") + } + if err = resp.Read(br); err != nil { + t.Fatalf("error when reading response: %s", err) + } + if !resp.ConnectionClose() { + t.Fatalf("response must have 'Connection: close' header") + } + + tailCh := make(chan struct{}) + go func() { + tail, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("error when reading tail: %s", err) + } + if len(tail) > 0 { + t.Fatalf("unexpected non-zero tail %q", tail) + } + close(tailCh) + }() + + select { + case <-tailCh: + case <-time.After(time.Second): + t.Fatalf("timeout when reading tail") + } + + if err = conn.Close(); err != nil { + t.Fatalf("error when closing the connection: %s", err) + } + + if err = ln.Close(); err != nil { + t.Fatalf("error when closing listener: %s", err) + } + + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout when waiting for the server to stop") + } +} + +func TestServerHTTP10ConnectionClose(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + + ch := make(chan struct{}) + go func() { + err := Serve(ln, func(ctx *RequestCtx) { + // The server must close the connection irregardless + // of request and response state set inside request + // handler, since the HTTP/1.0 request + // had no 'Connection: keep-alive' header. + ctx.Request.Header.ResetConnectionClose() + ctx.Request.Header.Set("Connection", "keep-alive") + ctx.Response.Header.ResetConnectionClose() + ctx.Response.Header.Set("Connection", "keep-alive") + }) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + close(ch) + }() + + conn, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + _, err = fmt.Fprintf(conn, "%s", "GET / HTTP/1.0\r\nHost: aaa\r\n\r\n") + if err != nil { + t.Fatalf("error when writing request: %s", err) + } + + br := bufio.NewReader(conn) + var resp Response + if err = resp.Read(br); err != nil { + t.Fatalf("error when reading response: %s", err) + } + + if !resp.ConnectionClose() { + t.Fatalf("HTTP1.0 response must have 'Connection: close' header") + } + + tailCh := make(chan struct{}) + go func() { + tail, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("error when reading tail: %s", err) + } + if len(tail) > 0 { + t.Fatalf("unexpected non-zero tail %q", tail) + } + close(tailCh) + }() + + select { + case <-tailCh: + case <-time.After(time.Second): + t.Fatalf("timeout when reading tail") + } + + if err = conn.Close(); err != nil { + t.Fatalf("error when closing the connection: %s", err) + } + + if err = ln.Close(); err != nil { + t.Fatalf("error when closing listener: %s", err) + } + + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout when waiting for the server to stop") + } +} + +func TestRequestCtxFormValue(t *testing.T) { + var ctx RequestCtx + var req Request + req.SetRequestURI("/foo/bar?baz=123&aaa=bbb") + req.SetBodyString("qqq=port&mmm=sddd") + req.Header.SetContentType("application/x-www-form-urlencoded") + + ctx.Init(&req, nil, nil) + + v := ctx.FormValue("baz") + if string(v) != "123" { + t.Fatalf("unexpected value %q. Expecting %q", v, "123") + } + v = ctx.FormValue("mmm") + if string(v) != "sddd" { + t.Fatalf("unexpected value %q. Expecting %q", v, "sddd") + } + v = ctx.FormValue("aaaasdfsdf") + if len(v) > 0 { + t.Fatalf("unexpected value for unknown key %q", v) + } +} + +func TestRequestCtxUserValue(t *testing.T) { + var ctx RequestCtx + + for i := 0; i < 5; i++ { + k := fmt.Sprintf("key-%d", i) + ctx.SetUserValue(k, i) + } + for i := 5; i < 10; i++ { + k := fmt.Sprintf("key-%d", i) + ctx.SetUserValueBytes([]byte(k), i) + } + + for i := 0; i < 10; i++ { + k := fmt.Sprintf("key-%d", i) + v := ctx.UserValue(k) + n, ok := v.(int) + if !ok || n != i { + t.Fatalf("unexpected value obtained for key %q: %v. Expecting %d", k, v, i) + } + } + vlen := 0 + ctx.VisitUserValues(func(key []byte, value interface{}) { + vlen++ + v := ctx.UserValueBytes(key) + if v != value { + t.Fatalf("unexpected value obtained from VisitUserValues for key: %q, expecting: %#v but got: %#v", key, v, value) + } + }) + if len(ctx.userValues) != vlen { + t.Fatalf("the length of user values returned from VisitUserValues is not equal to the length of the userValues, expecting: %d but got: %d", len(ctx.userValues), vlen) + } +} + +func TestServerHeadRequest(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + fmt.Fprintf(ctx, "Request method is %q", ctx.Method()) + ctx.SetContentType("aaa/bbb") + }, + } + + rw := &readWriter{} + rw.r.WriteString("HEAD /foobar HTTP/1.1\r\nHost: aaa.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + var resp Response + resp.SkipBody = true + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when parsing response: %s", err) + } + if resp.Header.StatusCode() != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", resp.Header.StatusCode(), StatusOK) + } + if len(resp.Body()) > 0 { + t.Fatalf("Unexpected non-zero body %q", resp.Body()) + } + if resp.Header.ContentLength() != 24 { + t.Fatalf("unexpected content-length %d. Expecting %d", resp.Header.ContentLength(), 24) + } + if string(resp.Header.ContentType()) != "aaa/bbb" { + t.Fatalf("unexpected content-type %q. Expecting %q", resp.Header.ContentType(), "aaa/bbb") + } + + data, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("Unexpected error when reading remaining data: %s", err) + } + if len(data) > 0 { + t.Fatalf("unexpected remaining data %q", data) + } +} + +func TestServerExpect100Continue(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + if !ctx.IsPost() { + t.Fatalf("unexpected method %q. Expecting POST", ctx.Method()) + } + if string(ctx.Path()) != "/foo" { + t.Fatalf("unexpected path %q. Expecting %q", ctx.Path(), "/foo") + } + ct := ctx.Request.Header.ContentType() + if string(ct) != "a/b" { + t.Fatalf("unexpectected content-type: %q. Expecting %q", ct, "a/b") + } + if string(ctx.PostBody()) != "12345" { + t.Fatalf("unexpected body: %q. Expecting %q", ctx.PostBody(), "12345") + } + ctx.WriteString("foobar") + }, + } + + rw := &readWriter{} + rw.r.WriteString("POST /foo HTTP/1.1\r\nHost: gle.com\r\nExpect: 100-continue\r\nContent-Length: 5\r\nContent-Type: a/b\r\n\r\n12345") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + verifyResponse(t, br, StatusOK, string(defaultContentType), "foobar") + + data, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("Unexpected error when reading remaining data: %s", err) + } + if len(data) > 0 { + t.Fatalf("unexpected remaining data %q", data) + } +} + +func TestCompressHandler(t *testing.T) { + expectedBody := string(createFixedBody(2e4)) + h := CompressHandler(func(ctx *RequestCtx) { + ctx.Write([]byte(expectedBody)) + }) + + var ctx RequestCtx + var resp Response + + // verify uncompressed response + h(&ctx) + s := ctx.Response.String() + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + ce := resp.Header.Peek("Content-Encoding") + if string(ce) != "" { + t.Fatalf("unexpected Content-Encoding: %q. Expecting %q", ce, "") + } + body := resp.Body() + if string(body) != expectedBody { + t.Fatalf("unexpected body %q. Expecting %q", body, expectedBody) + } + + // verify gzip-compressed response + ctx.Request.Reset() + ctx.Response.Reset() + ctx.Request.Header.Set("Accept-Encoding", "gzip, deflate, sdhc") + + h(&ctx) + s = ctx.Response.String() + br = bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + ce = resp.Header.Peek("Content-Encoding") + if string(ce) != "gzip" { + t.Fatalf("unexpected Content-Encoding: %q. Expecting %q", ce, "gzip") + } + body, err := resp.BodyGunzip() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(body) != expectedBody { + t.Fatalf("unexpected body %q. Expecting %q", body, expectedBody) + } + + // an attempt to compress already compressed response + ctx.Request.Reset() + ctx.Response.Reset() + ctx.Request.Header.Set("Accept-Encoding", "gzip, deflate, sdhc") + hh := CompressHandler(h) + hh(&ctx) + s = ctx.Response.String() + br = bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + ce = resp.Header.Peek("Content-Encoding") + if string(ce) != "gzip" { + t.Fatalf("unexpected Content-Encoding: %q. Expecting %q", ce, "gzip") + } + body, err = resp.BodyGunzip() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(body) != expectedBody { + t.Fatalf("unexpected body %q. Expecting %q", body, expectedBody) + } + + // verify deflate-compressed response + ctx.Request.Reset() + ctx.Response.Reset() + ctx.Request.Header.Set("Accept-Encoding", "foobar, deflate, sdhc") + + h(&ctx) + s = ctx.Response.String() + br = bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + ce = resp.Header.Peek("Content-Encoding") + if string(ce) != "deflate" { + t.Fatalf("unexpected Content-Encoding: %q. Expecting %q", ce, "deflate") + } + body, err = resp.BodyInflate() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(body) != expectedBody { + t.Fatalf("unexpected body %q. Expecting %q", body, expectedBody) + } +} + +func TestRequestCtxWriteString(t *testing.T) { + var ctx RequestCtx + n, err := ctx.WriteString("foo") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if n != 3 { + t.Fatalf("unexpected n %d. Expecting 3", n) + } + n, err = ctx.WriteString("привет") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if n != 12 { + t.Fatalf("unexpected n=%d. Expecting 12", n) + } + + s := ctx.Response.Body() + if string(s) != "fooпривет" { + t.Fatalf("unexpected response body %q. Expecting %q", s, "fooпривет") + } +} + +func TestServeConnNonHTTP11KeepAlive(t *testing.T) { + rw := &readWriter{} + rw.r.WriteString("GET /foo HTTP/1.0\r\nConnection: keep-alive\r\nHost: google.com\r\n\r\n") + rw.r.WriteString("GET /bar HTTP/1.0\r\nHost: google.com\r\n\r\n") + rw.r.WriteString("GET /must/be/ignored HTTP/1.0\r\nHost: google.com\r\n\r\n") + + requestsServed := 0 + + ch := make(chan struct{}) + go func() { + err := ServeConn(rw, func(ctx *RequestCtx) { + requestsServed++ + ctx.SuccessString("aaa/bbb", "foobar") + }) + if err != nil { + t.Fatalf("unexpected error in ServeConn: %s", err) + } + close(ch) + }() + + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + + var resp Response + + // verify the first response + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when parsing response: %s", err) + } + if string(resp.Header.Peek("Connection")) != "keep-alive" { + t.Fatalf("unexpected Connection header %q. Expecting %q", resp.Header.Peek("Connection"), "keep-alive") + } + if resp.Header.ConnectionClose() { + t.Fatalf("unexpected Connection: close") + } + + // verify the second response + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when parsing response: %s", err) + } + if string(resp.Header.Peek("Connection")) != "close" { + t.Fatalf("unexpected Connection header %q. Expecting %q", resp.Header.Peek("Connection"), "close") + } + if !resp.Header.ConnectionClose() { + t.Fatalf("expecting Connection: close") + } + + data, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("Unexpected error when reading remaining data: %s", err) + } + if len(data) != 0 { + t.Fatalf("Unexpected data read after responses %q", data) + } + + if requestsServed != 2 { + t.Fatalf("unexpected number of requests served: %d. Expecting 2", requestsServed) + } +} + +func TestRequestCtxSetBodyStreamWriter(t *testing.T) { + var ctx RequestCtx + var req Request + ctx.Init(&req, nil, defaultLogger) + + if ctx.IsBodyStream() { + t.Fatalf("IsBodyStream must return false") + } + ctx.SetBodyStreamWriter(func(w *bufio.Writer) { + fmt.Fprintf(w, "body writer line 1\n") + if err := w.Flush(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + fmt.Fprintf(w, "body writer line 2\n") + }) + if !ctx.IsBodyStream() { + t.Fatalf("IsBodyStream must return true") + } + + s := ctx.Response.String() + + br := bufio.NewReader(bytes.NewBufferString(s)) + var resp Response + if err := resp.Read(br); err != nil { + t.Fatalf("Error when reading response: %s", err) + } + + body := string(resp.Body()) + expectedBody := "body writer line 1\nbody writer line 2\n" + if body != expectedBody { + t.Fatalf("unexpected body: %q. Expecting %q", body, expectedBody) + } +} + +func TestRequestCtxIfModifiedSince(t *testing.T) { + var ctx RequestCtx + var req Request + ctx.Init(&req, nil, defaultLogger) + + lastModified := time.Now().Add(-time.Hour) + + if !ctx.IfModifiedSince(lastModified) { + t.Fatalf("IfModifiedSince must return true for non-existing If-Modified-Since header") + } + + ctx.Request.Header.Set("If-Modified-Since", string(AppendHTTPDate(nil, lastModified))) + + if ctx.IfModifiedSince(lastModified) { + t.Fatalf("If-Modified-Since current time must return false") + } + + past := lastModified.Add(-time.Hour) + if ctx.IfModifiedSince(past) { + t.Fatalf("If-Modified-Since past time must return false") + } + + future := lastModified.Add(time.Hour) + if !ctx.IfModifiedSince(future) { + t.Fatalf("If-Modified-Since future time must return true") + } +} + +func TestRequestCtxSendFileNotModified(t *testing.T) { + var ctx RequestCtx + var req Request + ctx.Init(&req, nil, defaultLogger) + + filePath := "./server_test.go" + lastModified, err := FileLastModified(filePath) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + ctx.Request.Header.Set("If-Modified-Since", string(AppendHTTPDate(nil, lastModified))) + + ctx.SendFile(filePath) + + s := ctx.Response.String() + + var resp Response + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("error when reading response: %s", err) + } + if resp.StatusCode() != StatusNotModified { + t.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusNotModified) + } + if len(resp.Body()) > 0 { + t.Fatalf("unexpected non-zero response body: %q", resp.Body()) + } +} + +func TestRequestCtxSendFileModified(t *testing.T) { + var ctx RequestCtx + var req Request + ctx.Init(&req, nil, defaultLogger) + + filePath := "./server_test.go" + lastModified, err := FileLastModified(filePath) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + lastModified = lastModified.Add(-time.Hour) + ctx.Request.Header.Set("If-Modified-Since", string(AppendHTTPDate(nil, lastModified))) + + ctx.SendFile(filePath) + + s := ctx.Response.String() + + var resp Response + br := bufio.NewReader(bytes.NewBufferString(s)) + if err := resp.Read(br); err != nil { + t.Fatalf("error when reading response: %s", err) + } + if resp.StatusCode() != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusOK) + } + + f, err := os.Open(filePath) + if err != nil { + t.Fatalf("cannot open file: %s", err) + } + body, err := ioutil.ReadAll(f) + f.Close() + if err != nil { + t.Fatalf("error when reading file: %s", err) + } + + if !bytes.Equal(resp.Body(), body) { + t.Fatalf("unexpected response body: %q. Expecting %q", resp.Body(), body) + } +} + +func TestRequestCtxSendFile(t *testing.T) { + var ctx RequestCtx + var req Request + ctx.Init(&req, nil, defaultLogger) + + filePath := "./server_test.go" + ctx.SendFile(filePath) + + w := &bytes.Buffer{} + bw := bufio.NewWriter(w) + if err := ctx.Response.Write(bw); err != nil { + t.Fatalf("error when writing response: %s", err) + } + if err := bw.Flush(); err != nil { + t.Fatalf("error when flushing response: %s", err) + } + + var resp Response + br := bufio.NewReader(w) + if err := resp.Read(br); err != nil { + t.Fatalf("error when reading response: %s", err) + } + if resp.StatusCode() != StatusOK { + t.Fatalf("unexpected status code: %d. Expecting %d", resp.StatusCode(), StatusOK) + } + + f, err := os.Open(filePath) + if err != nil { + t.Fatalf("cannot open file: %s", err) + } + body, err := ioutil.ReadAll(f) + f.Close() + if err != nil { + t.Fatalf("error when reading file: %s", err) + } + + if !bytes.Equal(resp.Body(), body) { + t.Fatalf("unexpected response body: %q. Expecting %q", resp.Body(), body) + } +} + +func TestRequestCtxHijack(t *testing.T) { + hijackStartCh := make(chan struct{}) + hijackStopCh := make(chan struct{}) + s := &Server{ + Handler: func(ctx *RequestCtx) { + if ctx.Hijacked() { + t.Fatalf("connection mustn't be hijacked") + } + ctx.Hijack(func(c net.Conn) { + <-hijackStartCh + + b := make([]byte, 1) + // ping-pong echo via hijacked conn + for { + n, err := c.Read(b) + if n != 1 { + if err == io.EOF { + close(hijackStopCh) + return + } + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + t.Fatalf("unexpected number of bytes read: %d. Expecting 1", n) + } + if _, err = c.Write(b); err != nil { + t.Fatalf("unexpected error when writing data: %s", err) + } + } + }) + if !ctx.Hijacked() { + t.Fatalf("connection must be hijacked") + } + ctx.Success("foo/bar", []byte("hijack it!")) + }, + } + + hijackedString := "foobar baz hijacked!!!" + rw := &readWriter{} + rw.r.WriteString("GET /foo HTTP/1.1\r\nHost: google.com\r\n\r\n") + rw.r.WriteString(hijackedString) + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + verifyResponse(t, br, StatusOK, "foo/bar", "hijack it!") + + close(hijackStartCh) + select { + case <-hijackStopCh: + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + data, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("Unexpected error when reading remaining data: %s", err) + } + if string(data) != hijackedString { + t.Fatalf("Unexpected data read after the first response %q. Expecting %q", data, hijackedString) + } +} + +func TestRequestCtxInit(t *testing.T) { + var ctx RequestCtx + var logger customLogger + globalConnID = 0x123456 + ctx.Init(&ctx.Request, zeroTCPAddr, &logger) + ip := ctx.RemoteIP() + if !ip.IsUnspecified() { + t.Fatalf("unexpected ip for bare RequestCtx: %q. Expected 0.0.0.0", ip) + } + ctx.Logger().Printf("foo bar %d", 10) + + expectedLog := "#0012345700000000 - 0.0.0.0:0<->0.0.0.0:0 - GET http:/// - foo bar 10\n" + if logger.out != expectedLog { + t.Fatalf("Unexpected log output: %q. Expected %q", logger.out, expectedLog) + } +} + +func TestTimeoutHandlerSuccess(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + h := func(ctx *RequestCtx) { + if string(ctx.Path()) == "/" { + ctx.Success("aaa/bbb", []byte("real response")) + } + } + s := &Server{ + Handler: TimeoutHandler(h, 10*time.Second, "timeout!!!"), + } + serverCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexepcted error: %s", err) + } + close(serverCh) + }() + + concurrency := 20 + clientCh := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + conn, err := ln.Dial() + if err != nil { + t.Fatalf("unexepcted error: %s", err) + } + if _, err = conn.Write([]byte("GET / HTTP/1.1\r\nHost: google.com\r\n\r\n")); err != nil { + t.Fatalf("unexpected error: %s", err) + } + br := bufio.NewReader(conn) + verifyResponse(t, br, StatusOK, "aaa/bbb", "real response") + clientCh <- struct{}{} + }() + } + + for i := 0; i < concurrency; i++ { + select { + case <-clientCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case <-serverCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestTimeoutHandlerTimeout(t *testing.T) { + ln := fasthttputil.NewInmemoryListener() + readyCh := make(chan struct{}) + doneCh := make(chan struct{}) + h := func(ctx *RequestCtx) { + ctx.Success("aaa/bbb", []byte("real response")) + <-readyCh + doneCh <- struct{}{} + } + s := &Server{ + Handler: TimeoutHandler(h, 20*time.Millisecond, "timeout!!!"), + } + serverCh := make(chan struct{}) + go func() { + if err := s.Serve(ln); err != nil { + t.Fatalf("unexepcted error: %s", err) + } + close(serverCh) + }() + + concurrency := 20 + clientCh := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + conn, err := ln.Dial() + if err != nil { + t.Fatalf("unexepcted error: %s", err) + } + if _, err = conn.Write([]byte("GET / HTTP/1.1\r\nHost: google.com\r\n\r\n")); err != nil { + t.Fatalf("unexpected error: %s", err) + } + br := bufio.NewReader(conn) + verifyResponse(t, br, StatusRequestTimeout, string(defaultContentType), "timeout!!!") + clientCh <- struct{}{} + }() + } + + for i := 0; i < concurrency; i++ { + select { + case <-clientCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } + + close(readyCh) + for i := 0; i < concurrency; i++ { + select { + case <-doneCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case <-serverCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestServerGetOnly(t *testing.T) { + h := func(ctx *RequestCtx) { + if !ctx.IsGet() { + t.Fatalf("non-get request: %q", ctx.Method()) + } + ctx.Success("foo/bar", []byte("success")) + } + s := &Server{ + Handler: h, + GetOnly: true, + } + + rw := &readWriter{} + rw.r.WriteString("POST /foo HTTP/1.1\r\nHost: google.com\r\nContent-Length: 5\r\nContent-Type: aaa\r\n\r\n12345") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err == nil { + t.Fatalf("expecting error") + } + if err != errGetOnly { + t.Fatalf("Unexpected error from serveConn: %s. Expecting %s", err, errGetOnly) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + var resp Response + if err := resp.Read(br); err != nil { + t.Fatalf("unexpected error: %s", err) + } + statusCode := resp.StatusCode() + if statusCode != StatusBadRequest { + t.Fatalf("unexpected status code: %d. Expecting %d", statusCode, StatusBadRequest) + } + if !resp.ConnectionClose() { + t.Fatalf("missing 'Connection: close' response header") + } +} + +func TestServerTimeoutErrorWithResponse(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + go func() { + ctx.Success("aaa/bbb", []byte("xxxyyy")) + }() + + var resp Response + + resp.SetStatusCode(123) + resp.SetBodyString("foobar. Should be ignored") + ctx.TimeoutErrorWithResponse(&resp) + + resp.SetStatusCode(456) + resp.ResetBody() + fmt.Fprintf(resp.BodyWriter(), "path=%s", ctx.Path()) + resp.Header.SetContentType("foo/bar") + ctx.TimeoutErrorWithResponse(&resp) + }, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo HTTP/1.1\r\nHost: google.com\r\n\r\n") + rw.r.WriteString("GET /bar HTTP/1.1\r\nHost: google.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + verifyResponse(t, br, 456, "foo/bar", "path=/foo") + + data, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("Unexpected error when reading remaining data: %s", err) + } + if len(data) != 0 { + t.Fatalf("Unexpected data read after the first response %q. Expecting %q", data, "") + } +} + +func TestServerTimeoutErrorWithCode(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + go func() { + ctx.Success("aaa/bbb", []byte("xxxyyy")) + }() + ctx.TimeoutErrorWithCode("should be ignored", 234) + ctx.TimeoutErrorWithCode("stolen ctx", StatusBadRequest) + }, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo HTTP/1.1\r\nHost: google.com\r\n\r\n") + rw.r.WriteString("GET /foo HTTP/1.1\r\nHost: google.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + verifyResponse(t, br, StatusBadRequest, string(defaultContentType), "stolen ctx") + + data, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("Unexpected error when reading remaining data: %s", err) + } + if len(data) != 0 { + t.Fatalf("Unexpected data read after the first response %q. Expecting %q", data, "") + } +} + +func TestServerTimeoutError(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + go func() { + ctx.Success("aaa/bbb", []byte("xxxyyy")) + }() + ctx.TimeoutError("should be ignored") + ctx.TimeoutError("stolen ctx") + }, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo HTTP/1.1\r\nHost: google.com\r\n\r\n") + rw.r.WriteString("GET /foo HTTP/1.1\r\nHost: google.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + verifyResponse(t, br, StatusRequestTimeout, string(defaultContentType), "stolen ctx") + + data, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("Unexpected error when reading remaining data: %s", err) + } + if len(data) != 0 { + t.Fatalf("Unexpected data read after the first response %q. Expecting %q", data, "") + } +} + +func TestServerMaxKeepaliveDuration(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + time.Sleep(20 * time.Millisecond) + }, + MaxKeepaliveDuration: 10 * time.Millisecond, + } + + rw := &readWriter{} + rw.r.WriteString("GET /aaa HTTP/1.1\r\nHost: aa.com\r\n\r\n") + rw.r.WriteString("GET /bbbb HTTP/1.1\r\nHost: bbb.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + var resp Response + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when parsing response: %s", err) + } + if !resp.ConnectionClose() { + t.Fatalf("Response must have 'connection: close' header") + } + verifyResponseHeader(t, &resp.Header, 200, 0, string(defaultContentType)) + + data, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("Unexpected error when reading remaining data: %s", err) + } + if len(data) != 0 { + t.Fatalf("Unexpected data read after the first response %q. Expecting %q", data, "") + } +} + +func TestServerMaxRequestsPerConn(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) {}, + MaxRequestsPerConn: 1, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo1 HTTP/1.1\r\nHost: google.com\r\n\r\n") + rw.r.WriteString("GET /bar HTTP/1.1\r\nHost: aaa.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + var resp Response + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when parsing response: %s", err) + } + if !resp.ConnectionClose() { + t.Fatalf("Response must have 'connection: close' header") + } + verifyResponseHeader(t, &resp.Header, 200, 0, string(defaultContentType)) + + data, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("Unexpected error when reading remaining data: %s", err) + } + if len(data) != 0 { + t.Fatalf("Unexpected data read after the first response %q. Expecting %q", data, "") + } +} + +func TestServerConnectionClose(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.SetConnectionClose() + }, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo1 HTTP/1.1\r\nHost: google.com\r\n\r\n") + rw.r.WriteString("GET /must/be/ignored HTTP/1.1\r\nHost: aaa.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + var resp Response + + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when parsing response: %s", err) + } + if !resp.ConnectionClose() { + t.Fatalf("expecting Connection: close header") + } + + data, err := ioutil.ReadAll(br) + if err != nil { + t.Fatalf("Unexpected error when reading remaining data: %s", err) + } + if len(data) != 0 { + t.Fatalf("Unexpected data read after the first response %q. Expecting %q", data, "") + } +} + +func TestServerRequestNumAndTime(t *testing.T) { + n := uint64(0) + var connT time.Time + s := &Server{ + Handler: func(ctx *RequestCtx) { + n++ + if ctx.ConnRequestNum() != n { + t.Fatalf("unexpected request number: %d. Expecting %d", ctx.ConnRequestNum(), n) + } + if connT.IsZero() { + connT = ctx.ConnTime() + } + if ctx.ConnTime() != connT { + t.Fatalf("unexpected serve conn time: %s. Expecting %s", ctx.ConnTime(), connT) + } + }, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo1 HTTP/1.1\r\nHost: google.com\r\n\r\n") + rw.r.WriteString("GET /bar HTTP/1.1\r\nHost: google.com\r\n\r\n") + rw.r.WriteString("GET /baz HTTP/1.1\r\nHost: google.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + if n != 3 { + t.Fatalf("unexpected number of requests served: %d. Expecting %d", n, 3) + } + + br := bufio.NewReader(&rw.w) + verifyResponse(t, br, 200, string(defaultContentType), "") +} + +func TestServerEmptyResponse(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + // do nothing :) + }, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo1 HTTP/1.1\r\nHost: google.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + verifyResponse(t, br, 200, string(defaultContentType), "") +} + +type customLogger struct { + lock sync.Mutex + out string +} + +func (cl *customLogger) Printf(format string, args ...interface{}) { + cl.lock.Lock() + cl.out += fmt.Sprintf(format, args...)[6:] + "\n" + cl.lock.Unlock() +} + +func TestServerLogger(t *testing.T) { + cl := &customLogger{} + s := &Server{ + Handler: func(ctx *RequestCtx) { + logger := ctx.Logger() + h := &ctx.Request.Header + logger.Printf("begin") + ctx.Success("text/html", []byte(fmt.Sprintf("requestURI=%s, body=%q, remoteAddr=%s", + h.RequestURI(), ctx.Request.Body(), ctx.RemoteAddr()))) + logger.Printf("end") + }, + Logger: cl, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo1 HTTP/1.1\r\nHost: google.com\r\n\r\n") + rw.r.WriteString("POST /foo2 HTTP/1.1\r\nHost: aaa.com\r\nContent-Length: 5\r\nContent-Type: aa\r\n\r\nabcde") + + rwx := &readWriterRemoteAddr{ + rw: rw, + addr: &net.TCPAddr{ + IP: []byte{1, 2, 3, 4}, + Port: 8765, + }, + } + + globalConnID = 0 + ch := make(chan error) + go func() { + ch <- s.ServeConn(rwx) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + verifyResponse(t, br, 200, "text/html", "requestURI=/foo1, body=\"\", remoteAddr=1.2.3.4:8765") + verifyResponse(t, br, 200, "text/html", "requestURI=/foo2, body=\"abcde\", remoteAddr=1.2.3.4:8765") + + expectedLogOut := `#0000000100000001 - 1.2.3.4:8765<->1.2.3.4:8765 - GET http://google.com/foo1 - begin +#0000000100000001 - 1.2.3.4:8765<->1.2.3.4:8765 - GET http://google.com/foo1 - end +#0000000100000002 - 1.2.3.4:8765<->1.2.3.4:8765 - POST http://aaa.com/foo2 - begin +#0000000100000002 - 1.2.3.4:8765<->1.2.3.4:8765 - POST http://aaa.com/foo2 - end +` + if cl.out != expectedLogOut { + t.Fatalf("Unexpected logger output: %q. Expected %q", cl.out, expectedLogOut) + } +} + +func TestServerRemoteAddr(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + h := &ctx.Request.Header + ctx.Success("text/html", []byte(fmt.Sprintf("requestURI=%s, remoteAddr=%s, remoteIP=%s", + h.RequestURI(), ctx.RemoteAddr(), ctx.RemoteIP()))) + }, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo1 HTTP/1.1\r\nHost: google.com\r\n\r\n") + + rwx := &readWriterRemoteAddr{ + rw: rw, + addr: &net.TCPAddr{ + IP: []byte{1, 2, 3, 4}, + Port: 8765, + }, + } + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rwx) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + verifyResponse(t, br, 200, "text/html", "requestURI=/foo1, remoteAddr=1.2.3.4:8765, remoteIP=1.2.3.4") +} + +type readWriterRemoteAddr struct { + net.Conn + rw io.ReadWriteCloser + addr net.Addr +} + +func (rw *readWriterRemoteAddr) Close() error { + return rw.rw.Close() +} + +func (rw *readWriterRemoteAddr) Read(b []byte) (int, error) { + return rw.rw.Read(b) +} + +func (rw *readWriterRemoteAddr) Write(b []byte) (int, error) { + return rw.rw.Write(b) +} + +func (rw *readWriterRemoteAddr) RemoteAddr() net.Addr { + return rw.addr +} + +func (rw *readWriterRemoteAddr) LocalAddr() net.Addr { + return rw.addr +} + +func TestServerConnError(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.Error("foobar", 423) + }, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo/bar?baz HTTP/1.1\r\nHost: google.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + var resp Response + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if resp.Header.StatusCode() != 423 { + t.Fatalf("Unexpected status code %d. Expected %d", resp.Header.StatusCode(), 423) + } + if resp.Header.ContentLength() != 6 { + t.Fatalf("Unexpected Content-Length %d. Expected %d", resp.Header.ContentLength(), 6) + } + if !bytes.Equal(resp.Header.Peek("Content-Type"), defaultContentType) { + t.Fatalf("Unexpected Content-Type %q. Expected %q", resp.Header.Peek("Content-Type"), defaultContentType) + } + if !bytes.Equal(resp.Body(), []byte("foobar")) { + t.Fatalf("Unexpected body %q. Expected %q", resp.Body(), "foobar") + } +} + +func TestServeConnHex2intTable(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + }, + } + + rw := &readWriter{} + rw.r.WriteString("GET / HTTP/1.1\r\nHost: google.com\r\nTransfer-Encoding: chunked\r\n\r\n\xff") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + var err error + select { + case err = <-ch: + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err.Error() != "empty hex number" { + t.Fatalf("expected: empty hex number") + } +} + +func TestServeConnSingleRequest(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + h := &ctx.Request.Header + ctx.Success("aaa", []byte(fmt.Sprintf("requestURI=%s, host=%s", h.RequestURI(), h.Peek("Host")))) + }, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo/bar?baz HTTP/1.1\r\nHost: google.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + verifyResponse(t, br, 200, "aaa", "requestURI=/foo/bar?baz, host=google.com") +} + +func TestServeConnMultiRequests(t *testing.T) { + s := &Server{ + Handler: func(ctx *RequestCtx) { + h := &ctx.Request.Header + ctx.Success("aaa", []byte(fmt.Sprintf("requestURI=%s, host=%s", h.RequestURI(), h.Peek("Host")))) + }, + } + + rw := &readWriter{} + rw.r.WriteString("GET /foo/bar?baz HTTP/1.1\r\nHost: google.com\r\n\r\nGET /abc HTTP/1.1\r\nHost: foobar.com\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("Unexpected error from serveConn: %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + verifyResponse(t, br, 200, "aaa", "requestURI=/foo/bar?baz, host=google.com") + verifyResponse(t, br, 200, "aaa", "requestURI=/abc, host=foobar.com") +} + +func verifyResponse(t *testing.T, r *bufio.Reader, expectedStatusCode int, expectedContentType, expectedBody string) { + var resp Response + if err := resp.Read(r); err != nil { + t.Fatalf("Unexpected error when parsing response: %s", err) + } + + if !bytes.Equal(resp.Body(), []byte(expectedBody)) { + t.Fatalf("Unexpected body %q. Expected %q", resp.Body(), []byte(expectedBody)) + } + verifyResponseHeader(t, &resp.Header, expectedStatusCode, len(resp.Body()), expectedContentType) +} + +type readWriter struct { + net.Conn + r bytes.Buffer + w bytes.Buffer +} + +func (rw *readWriter) Close() error { + return nil +} + +func (rw *readWriter) Read(b []byte) (int, error) { + return rw.r.Read(b) +} + +func (rw *readWriter) Write(b []byte) (int, error) { + return rw.w.Write(b) +} + +func (rw *readWriter) RemoteAddr() net.Addr { + return zeroTCPAddr +} + +func (rw *readWriter) LocalAddr() net.Addr { + return zeroTCPAddr +} + +func (rw *readWriter) SetReadDeadline(t time.Time) error { + return nil +} + +func (rw *readWriter) SetWriteDeadline(t time.Time) error { + return nil +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/server_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/server_timing_test.go new file mode 100644 index 0000000..5c935e8 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/server_timing_test.go @@ -0,0 +1,461 @@ +package fasthttp + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +var defaultClientsCount = runtime.NumCPU() + +func BenchmarkRequestCtxRedirect(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + var ctx RequestCtx + for pb.Next() { + ctx.Request.SetRequestURI("http://aaa.com/fff/ss.html?sdf") + ctx.Redirect("/foo/bar?baz=111", StatusFound) + } + }) +} + +func BenchmarkServerGet1ReqPerConn(b *testing.B) { + benchmarkServerGet(b, defaultClientsCount, 1) +} + +func BenchmarkServerGet2ReqPerConn(b *testing.B) { + benchmarkServerGet(b, defaultClientsCount, 2) +} + +func BenchmarkServerGet10ReqPerConn(b *testing.B) { + benchmarkServerGet(b, defaultClientsCount, 10) +} + +func BenchmarkServerGet10KReqPerConn(b *testing.B) { + benchmarkServerGet(b, defaultClientsCount, 10000) +} + +func BenchmarkNetHTTPServerGet1ReqPerConn(b *testing.B) { + benchmarkNetHTTPServerGet(b, defaultClientsCount, 1) +} + +func BenchmarkNetHTTPServerGet2ReqPerConn(b *testing.B) { + benchmarkNetHTTPServerGet(b, defaultClientsCount, 2) +} + +func BenchmarkNetHTTPServerGet10ReqPerConn(b *testing.B) { + benchmarkNetHTTPServerGet(b, defaultClientsCount, 10) +} + +func BenchmarkNetHTTPServerGet10KReqPerConn(b *testing.B) { + benchmarkNetHTTPServerGet(b, defaultClientsCount, 10000) +} + +func BenchmarkServerPost1ReqPerConn(b *testing.B) { + benchmarkServerPost(b, defaultClientsCount, 1) +} + +func BenchmarkServerPost2ReqPerConn(b *testing.B) { + benchmarkServerPost(b, defaultClientsCount, 2) +} + +func BenchmarkServerPost10ReqPerConn(b *testing.B) { + benchmarkServerPost(b, defaultClientsCount, 10) +} + +func BenchmarkServerPost10KReqPerConn(b *testing.B) { + benchmarkServerPost(b, defaultClientsCount, 10000) +} + +func BenchmarkNetHTTPServerPost1ReqPerConn(b *testing.B) { + benchmarkNetHTTPServerPost(b, defaultClientsCount, 1) +} + +func BenchmarkNetHTTPServerPost2ReqPerConn(b *testing.B) { + benchmarkNetHTTPServerPost(b, defaultClientsCount, 2) +} + +func BenchmarkNetHTTPServerPost10ReqPerConn(b *testing.B) { + benchmarkNetHTTPServerPost(b, defaultClientsCount, 10) +} + +func BenchmarkNetHTTPServerPost10KReqPerConn(b *testing.B) { + benchmarkNetHTTPServerPost(b, defaultClientsCount, 10000) +} + +func BenchmarkServerGet1ReqPerConn10KClients(b *testing.B) { + benchmarkServerGet(b, 10000, 1) +} + +func BenchmarkServerGet2ReqPerConn10KClients(b *testing.B) { + benchmarkServerGet(b, 10000, 2) +} + +func BenchmarkServerGet10ReqPerConn10KClients(b *testing.B) { + benchmarkServerGet(b, 10000, 10) +} + +func BenchmarkServerGet100ReqPerConn10KClients(b *testing.B) { + benchmarkServerGet(b, 10000, 100) +} + +func BenchmarkNetHTTPServerGet1ReqPerConn10KClients(b *testing.B) { + benchmarkNetHTTPServerGet(b, 10000, 1) +} + +func BenchmarkNetHTTPServerGet2ReqPerConn10KClients(b *testing.B) { + benchmarkNetHTTPServerGet(b, 10000, 2) +} + +func BenchmarkNetHTTPServerGet10ReqPerConn10KClients(b *testing.B) { + benchmarkNetHTTPServerGet(b, 10000, 10) +} + +func BenchmarkNetHTTPServerGet100ReqPerConn10KClients(b *testing.B) { + benchmarkNetHTTPServerGet(b, 10000, 100) +} + +func BenchmarkServerHijack(b *testing.B) { + clientsCount := 1000 + requestsPerConn := 10000 + ch := make(chan struct{}, b.N) + responseBody := []byte("123") + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.Hijack(func(c net.Conn) { + // emulate server loop :) + err := ServeConn(c, func(ctx *RequestCtx) { + ctx.Success("foobar", responseBody) + registerServedRequest(b, ch) + }) + if err != nil { + b.Fatalf("error when serving connection") + } + }) + ctx.Success("foobar", responseBody) + registerServedRequest(b, ch) + }, + Concurrency: 16 * clientsCount, + } + req := "GET /foo HTTP/1.1\r\nHost: google.com\r\n\r\n" + benchmarkServer(b, s, clientsCount, requestsPerConn, req) + verifyRequestsServed(b, ch) +} + +func BenchmarkServerMaxConnsPerIP(b *testing.B) { + clientsCount := 1000 + requestsPerConn := 10 + ch := make(chan struct{}, b.N) + responseBody := []byte("123") + s := &Server{ + Handler: func(ctx *RequestCtx) { + ctx.Success("foobar", responseBody) + registerServedRequest(b, ch) + }, + MaxConnsPerIP: clientsCount * 2, + Concurrency: 16 * clientsCount, + } + req := "GET /foo HTTP/1.1\r\nHost: google.com\r\n\r\n" + benchmarkServer(b, s, clientsCount, requestsPerConn, req) + verifyRequestsServed(b, ch) +} + +func BenchmarkServerTimeoutError(b *testing.B) { + clientsCount := 10 + requestsPerConn := 1 + ch := make(chan struct{}, b.N) + n := uint32(0) + responseBody := []byte("123") + s := &Server{ + Handler: func(ctx *RequestCtx) { + if atomic.AddUint32(&n, 1)&7 == 0 { + ctx.TimeoutError("xxx") + go func() { + ctx.Success("foobar", responseBody) + }() + } else { + ctx.Success("foobar", responseBody) + } + registerServedRequest(b, ch) + }, + Concurrency: 16 * clientsCount, + } + req := "GET /foo HTTP/1.1\r\nHost: google.com\r\n\r\n" + benchmarkServer(b, s, clientsCount, requestsPerConn, req) + verifyRequestsServed(b, ch) +} + +type fakeServerConn struct { + net.TCPConn + ln *fakeListener + requestsCount int + pos int + closed uint32 +} + +func (c *fakeServerConn) Read(b []byte) (int, error) { + nn := 0 + reqLen := len(c.ln.request) + for len(b) > 0 { + if c.requestsCount == 0 { + if nn == 0 { + return 0, io.EOF + } + return nn, nil + } + pos := c.pos % reqLen + n := copy(b, c.ln.request[pos:]) + b = b[n:] + nn += n + c.pos += n + if n+pos == reqLen { + c.requestsCount-- + } + } + return nn, nil +} + +func (c *fakeServerConn) Write(b []byte) (int, error) { + return len(b), nil +} + +var fakeAddr = net.TCPAddr{ + IP: []byte{1, 2, 3, 4}, + Port: 12345, +} + +func (c *fakeServerConn) RemoteAddr() net.Addr { + return &fakeAddr +} + +func (c *fakeServerConn) Close() error { + if atomic.AddUint32(&c.closed, 1) == 1 { + c.ln.ch <- c + } + return nil +} + +func (c *fakeServerConn) SetReadDeadline(t time.Time) error { + return nil +} + +func (c *fakeServerConn) SetWriteDeadline(t time.Time) error { + return nil +} + +type fakeListener struct { + lock sync.Mutex + requestsCount int + requestsPerConn int + request []byte + ch chan *fakeServerConn + done chan struct{} + closed bool +} + +func (ln *fakeListener) Accept() (net.Conn, error) { + ln.lock.Lock() + if ln.requestsCount == 0 { + ln.lock.Unlock() + for len(ln.ch) < cap(ln.ch) { + time.Sleep(10 * time.Millisecond) + } + ln.lock.Lock() + if !ln.closed { + close(ln.done) + ln.closed = true + } + ln.lock.Unlock() + return nil, io.EOF + } + requestsCount := ln.requestsPerConn + if requestsCount > ln.requestsCount { + requestsCount = ln.requestsCount + } + ln.requestsCount -= requestsCount + ln.lock.Unlock() + + c := <-ln.ch + c.requestsCount = requestsCount + c.closed = 0 + c.pos = 0 + + return c, nil +} + +func (ln *fakeListener) Close() error { + return nil +} + +func (ln *fakeListener) Addr() net.Addr { + return &fakeAddr +} + +func newFakeListener(requestsCount, clientsCount, requestsPerConn int, request string) *fakeListener { + ln := &fakeListener{ + requestsCount: requestsCount, + requestsPerConn: requestsPerConn, + request: []byte(request), + ch: make(chan *fakeServerConn, clientsCount), + done: make(chan struct{}), + } + for i := 0; i < clientsCount; i++ { + ln.ch <- &fakeServerConn{ + ln: ln, + } + } + return ln +} + +var ( + fakeResponse = []byte("Hello, world!") + getRequest = "GET /foobar?baz HTTP/1.1\r\nHost: google.com\r\nUser-Agent: aaa/bbb/ccc/ddd/eee Firefox Chrome MSIE Opera\r\n" + + "Referer: http://xxx.com/aaa?bbb=ccc\r\nCookie: foo=bar; baz=baraz; aa=aakslsdweriwereowriewroire\r\n\r\n" + postRequest = fmt.Sprintf("POST /foobar?baz HTTP/1.1\r\nHost: google.com\r\nContent-Type: foo/bar\r\nContent-Length: %d\r\n"+ + "User-Agent: Opera Chrome MSIE Firefox and other/1.2.34\r\nReferer: http://google.com/aaaa/bbb/ccc\r\n"+ + "Cookie: foo=bar; baz=baraz; aa=aakslsdweriwereowriewroire\r\n\r\n%s", + len(fakeResponse), fakeResponse) +) + +func benchmarkServerGet(b *testing.B, clientsCount, requestsPerConn int) { + ch := make(chan struct{}, b.N) + s := &Server{ + Handler: func(ctx *RequestCtx) { + if !ctx.IsGet() { + b.Fatalf("Unexpected request method: %s", ctx.Method()) + } + ctx.Success("text/plain", fakeResponse) + if requestsPerConn == 1 { + ctx.SetConnectionClose() + } + registerServedRequest(b, ch) + }, + Concurrency: 16 * clientsCount, + } + benchmarkServer(b, s, clientsCount, requestsPerConn, getRequest) + verifyRequestsServed(b, ch) +} + +func benchmarkNetHTTPServerGet(b *testing.B, clientsCount, requestsPerConn int) { + ch := make(chan struct{}, b.N) + s := &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.Method != "GET" { + b.Fatalf("Unexpected request method: %s", req.Method) + } + h := w.Header() + h.Set("Content-Type", "text/plain") + if requestsPerConn == 1 { + h.Set("Connection", "close") + } + w.Write(fakeResponse) + registerServedRequest(b, ch) + }), + } + benchmarkServer(b, s, clientsCount, requestsPerConn, getRequest) + verifyRequestsServed(b, ch) +} + +func benchmarkServerPost(b *testing.B, clientsCount, requestsPerConn int) { + ch := make(chan struct{}, b.N) + s := &Server{ + Handler: func(ctx *RequestCtx) { + if !ctx.IsPost() { + b.Fatalf("Unexpected request method: %s", ctx.Method()) + } + body := ctx.Request.Body() + if !bytes.Equal(body, fakeResponse) { + b.Fatalf("Unexpected body %q. Expected %q", body, fakeResponse) + } + ctx.Success("text/plain", body) + if requestsPerConn == 1 { + ctx.SetConnectionClose() + } + registerServedRequest(b, ch) + }, + Concurrency: 16 * clientsCount, + } + benchmarkServer(b, s, clientsCount, requestsPerConn, postRequest) + verifyRequestsServed(b, ch) +} + +func benchmarkNetHTTPServerPost(b *testing.B, clientsCount, requestsPerConn int) { + ch := make(chan struct{}, b.N) + s := &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.Method != "POST" { + b.Fatalf("Unexpected request method: %s", req.Method) + } + body, err := ioutil.ReadAll(req.Body) + if err != nil { + b.Fatalf("Unexpected error: %s", err) + } + req.Body.Close() + if !bytes.Equal(body, fakeResponse) { + b.Fatalf("Unexpected body %q. Expected %q", body, fakeResponse) + } + h := w.Header() + h.Set("Content-Type", "text/plain") + if requestsPerConn == 1 { + h.Set("Connection", "close") + } + w.Write(body) + registerServedRequest(b, ch) + }), + } + benchmarkServer(b, s, clientsCount, requestsPerConn, postRequest) + verifyRequestsServed(b, ch) +} + +func registerServedRequest(b *testing.B, ch chan<- struct{}) { + select { + case ch <- struct{}{}: + default: + b.Fatalf("More than %d requests served", cap(ch)) + } +} + +func verifyRequestsServed(b *testing.B, ch <-chan struct{}) { + requestsServed := 0 + for len(ch) > 0 { + <-ch + requestsServed++ + } + requestsSent := b.N + for requestsServed < requestsSent { + select { + case <-ch: + requestsServed++ + case <-time.After(100 * time.Millisecond): + b.Fatalf("Unexpected number of requests served %d. Expected %d", requestsServed, requestsSent) + } + } +} + +type realServer interface { + Serve(ln net.Listener) error +} + +func benchmarkServer(b *testing.B, s realServer, clientsCount, requestsPerConn int, request string) { + ln := newFakeListener(b.N, clientsCount, requestsPerConn, request) + ch := make(chan struct{}) + go func() { + s.Serve(ln) + ch <- struct{}{} + }() + + <-ln.done + + select { + case <-ch: + case <-time.After(10 * time.Second): + b.Fatalf("Server.Serve() didn't stop") + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/ssl-cert-snakeoil.key b/vendor/github.com/erikdubbelboer/fasthttp/ssl-cert-snakeoil.key new file mode 100644 index 0000000..00a79a3 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/ssl-cert-snakeoil.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG +3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U +wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0 +FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf +IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg +GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF +sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2 +sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D +uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb +K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3 +YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+ +DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk +B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV +Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x +IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY +wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj +wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D +FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m +tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX +fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU +ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk +K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT +6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt +9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN +Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV +c257YgaWmjK9uB0Y2r2VxS0G +-----END PRIVATE KEY----- diff --git a/vendor/github.com/erikdubbelboer/fasthttp/ssl-cert-snakeoil.pem b/vendor/github.com/erikdubbelboer/fasthttp/ssl-cert-snakeoil.pem new file mode 100644 index 0000000..93e77cd --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/ssl-cert-snakeoil.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV +BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV +MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D +K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te ++z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij +L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1 +xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY +6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG +SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98 +L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2 +45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li +K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6 +X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI +whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd +-----END CERTIFICATE----- diff --git a/vendor/github.com/erikdubbelboer/fasthttp/stackless/doc.go b/vendor/github.com/erikdubbelboer/fasthttp/stackless/doc.go new file mode 100644 index 0000000..8c0cc49 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/stackless/doc.go @@ -0,0 +1,3 @@ +// Package stackless provides functionality that may save stack space +// for high number of concurrently running goroutines. +package stackless diff --git a/vendor/github.com/erikdubbelboer/fasthttp/stackless/func.go b/vendor/github.com/erikdubbelboer/fasthttp/stackless/func.go new file mode 100644 index 0000000..9a49bcc --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/stackless/func.go @@ -0,0 +1,79 @@ +package stackless + +import ( + "runtime" + "sync" +) + +// NewFunc returns stackless wrapper for the function f. +// +// Unlike f, the returned stackless wrapper doesn't use stack space +// on the goroutine that calls it. +// The wrapper may save a lot of stack space if the following conditions +// are met: +// +// - f doesn't contain blocking calls on network, I/O or channels; +// - f uses a lot of stack space; +// - the wrapper is called from high number of concurrent goroutines. +// +// The stackless wrapper returns false if the call cannot be processed +// at the moment due to high load. +func NewFunc(f func(ctx interface{})) func(ctx interface{}) bool { + if f == nil { + panic("BUG: f cannot be nil") + } + + funcWorkCh := make(chan *funcWork, runtime.GOMAXPROCS(-1)*2048) + onceInit := func() { + n := runtime.GOMAXPROCS(-1) + for i := 0; i < n; i++ { + go funcWorker(funcWorkCh, f) + } + } + var once sync.Once + + return func(ctx interface{}) bool { + once.Do(onceInit) + fw := getFuncWork() + fw.ctx = ctx + + select { + case funcWorkCh <- fw: + default: + putFuncWork(fw) + return false + } + <-fw.done + putFuncWork(fw) + return true + } +} + +func funcWorker(funcWorkCh <-chan *funcWork, f func(ctx interface{})) { + for fw := range funcWorkCh { + f(fw.ctx) + fw.done <- struct{}{} + } +} + +func getFuncWork() *funcWork { + v := funcWorkPool.Get() + if v == nil { + v = &funcWork{ + done: make(chan struct{}, 1), + } + } + return v.(*funcWork) +} + +func putFuncWork(fw *funcWork) { + fw.ctx = nil + funcWorkPool.Put(fw) +} + +var funcWorkPool sync.Pool + +type funcWork struct { + ctx interface{} + done chan struct{} +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/stackless/func_test.go b/vendor/github.com/erikdubbelboer/fasthttp/stackless/func_test.go new file mode 100644 index 0000000..4f2c492 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/stackless/func_test.go @@ -0,0 +1,86 @@ +package stackless + +import ( + "fmt" + "sync/atomic" + "testing" + "time" +) + +func TestNewFuncSimple(t *testing.T) { + var n uint64 + f := NewFunc(func(ctx interface{}) { + atomic.AddUint64(&n, uint64(ctx.(int))) + }) + + iterations := 4 * 1024 + for i := 0; i < iterations; i++ { + if !f(2) { + t.Fatalf("f mustn't return false") + } + } + if n != uint64(2*iterations) { + t.Fatalf("Unexpected n: %d. Expecting %d", n, 2*iterations) + } +} + +func TestNewFuncMulti(t *testing.T) { + var n1, n2 uint64 + f1 := NewFunc(func(ctx interface{}) { + atomic.AddUint64(&n1, uint64(ctx.(int))) + }) + f2 := NewFunc(func(ctx interface{}) { + atomic.AddUint64(&n2, uint64(ctx.(int))) + }) + + iterations := 4 * 1024 + + f1Done := make(chan error, 1) + go func() { + var err error + for i := 0; i < iterations; i++ { + if !f1(3) { + err = fmt.Errorf("f1 mustn't return false") + break + } + } + f1Done <- err + }() + + f2Done := make(chan error, 1) + go func() { + var err error + for i := 0; i < iterations; i++ { + if !f2(5) { + err = fmt.Errorf("f2 mustn't return false") + break + } + } + f2Done <- err + }() + + select { + case err := <-f1Done: + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + select { + case err := <-f2Done: + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + case <-time.After(time.Second): + t.Fatalf("timeout") + } + + if n1 != uint64(3*iterations) { + t.Fatalf("unexpected n1: %d. Expecting %d", n1, 3*iterations) + } + if n2 != uint64(5*iterations) { + t.Fatalf("unexpected n2: %d. Expecting %d", n2, 5*iterations) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/stackless/func_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/stackless/func_timing_test.go new file mode 100644 index 0000000..cd4e463 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/stackless/func_timing_test.go @@ -0,0 +1,40 @@ +package stackless + +import ( + "sync/atomic" + "testing" +) + +func BenchmarkFuncOverhead(b *testing.B) { + var n uint64 + f := NewFunc(func(ctx interface{}) { + atomic.AddUint64(&n, *(ctx.(*uint64))) + }) + b.RunParallel(func(pb *testing.PB) { + x := uint64(1) + for pb.Next() { + if !f(&x) { + b.Fatalf("f mustn't return false") + } + } + }) + if n != uint64(b.N) { + b.Fatalf("unexected n: %d. Expecting %d", n, b.N) + } +} + +func BenchmarkFuncPure(b *testing.B) { + var n uint64 + f := func(x *uint64) { + atomic.AddUint64(&n, *x) + } + b.RunParallel(func(pb *testing.PB) { + x := uint64(1) + for pb.Next() { + f(&x) + } + }) + if n != uint64(b.N) { + b.Fatalf("unexected n: %d. Expecting %d", n, b.N) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/stackless/writer.go b/vendor/github.com/erikdubbelboer/fasthttp/stackless/writer.go new file mode 100644 index 0000000..9b9ff09 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/stackless/writer.go @@ -0,0 +1,138 @@ +package stackless + +import ( + "errors" + "fmt" + "github.com/valyala/bytebufferpool" + "io" +) + +// Writer is an interface stackless writer must conform to. +// +// The interface contains common subset for Writers from compress/* packages. +type Writer interface { + Write(p []byte) (int, error) + Flush() error + Close() error + Reset(w io.Writer) +} + +// NewWriterFunc must return new writer that will be wrapped into +// stackless writer. +type NewWriterFunc func(w io.Writer) Writer + +// NewWriter creates a stackless writer around a writer returned +// from newWriter. +// +// The returned writer writes data to dstW. +// +// Writers that use a lot of stack space may be wrapped into stackless writer, +// thus saving stack space for high number of concurrently running goroutines. +func NewWriter(dstW io.Writer, newWriter NewWriterFunc) Writer { + w := &writer{ + dstW: dstW, + } + w.zw = newWriter(&w.xw) + return w +} + +type writer struct { + dstW io.Writer + zw Writer + xw xWriter + + err error + n int + + p []byte + op op +} + +type op int + +const ( + opWrite op = iota + opFlush + opClose + opReset +) + +func (w *writer) Write(p []byte) (int, error) { + w.p = p + err := w.do(opWrite) + w.p = nil + return w.n, err +} + +func (w *writer) Flush() error { + return w.do(opFlush) +} + +func (w *writer) Close() error { + return w.do(opClose) +} + +func (w *writer) Reset(dstW io.Writer) { + w.xw.Reset() + w.do(opReset) + w.dstW = dstW +} + +func (w *writer) do(op op) error { + w.op = op + if !stacklessWriterFunc(w) { + return errHighLoad + } + err := w.err + if err != nil { + return err + } + if w.xw.bb != nil && len(w.xw.bb.B) > 0 { + _, err = w.dstW.Write(w.xw.bb.B) + } + w.xw.Reset() + + return err +} + +var errHighLoad = errors.New("cannot compress data due to high load") + +var stacklessWriterFunc = NewFunc(writerFunc) + +func writerFunc(ctx interface{}) { + w := ctx.(*writer) + switch w.op { + case opWrite: + w.n, w.err = w.zw.Write(w.p) + case opFlush: + w.err = w.zw.Flush() + case opClose: + w.err = w.zw.Close() + case opReset: + w.zw.Reset(&w.xw) + w.err = nil + default: + panic(fmt.Sprintf("BUG: unexpected op: %d", w.op)) + } +} + +type xWriter struct { + bb *bytebufferpool.ByteBuffer +} + +func (w *xWriter) Write(p []byte) (int, error) { + if w.bb == nil { + w.bb = bufferPool.Get() + } + w.bb.Write(p) + return len(p), nil +} + +func (w *xWriter) Reset() { + if w.bb != nil { + bufferPool.Put(w.bb) + w.bb = nil + } +} + +var bufferPool bytebufferpool.Pool diff --git a/vendor/github.com/erikdubbelboer/fasthttp/stackless/writer_test.go b/vendor/github.com/erikdubbelboer/fasthttp/stackless/writer_test.go new file mode 100644 index 0000000..f36f18d --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/stackless/writer_test.go @@ -0,0 +1,122 @@ +package stackless + +import ( + "bytes" + "compress/flate" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "testing" + "time" +) + +func TestCompressFlateSerial(t *testing.T) { + if err := testCompressFlate(); err != nil { + t.Fatalf("unexpected error: %s", err) + } +} + +func TestCompressFlateConcurrent(t *testing.T) { + if err := testConcurrent(testCompressFlate, 10); err != nil { + t.Fatalf("unexpected error: %s", err) + } +} + +func testCompressFlate() error { + return testWriter(func(w io.Writer) Writer { + zw, err := flate.NewWriter(w, flate.DefaultCompression) + if err != nil { + panic(fmt.Sprintf("BUG: unexpected error: %s", err)) + } + return zw + }, func(r io.Reader) io.Reader { + return flate.NewReader(r) + }) +} + +func TestCompressGzipSerial(t *testing.T) { + if err := testCompressGzip(); err != nil { + t.Fatalf("unexpected error: %s", err) + } +} + +func TestCompressGzipConcurrent(t *testing.T) { + if err := testConcurrent(testCompressGzip, 10); err != nil { + t.Fatalf("unexpected error: %s", err) + } +} + +func testCompressGzip() error { + return testWriter(func(w io.Writer) Writer { + return gzip.NewWriter(w) + }, func(r io.Reader) io.Reader { + zr, err := gzip.NewReader(r) + if err != nil { + panic(fmt.Sprintf("BUG: cannot create gzip reader: %s", err)) + } + return zr + }) +} + +func testWriter(newWriter NewWriterFunc, newReader func(io.Reader) io.Reader) error { + dstW := &bytes.Buffer{} + w := NewWriter(dstW, newWriter) + + for i := 0; i < 5; i++ { + if err := testWriterReuse(w, dstW, newReader); err != nil { + return fmt.Errorf("unepxected error when re-using writer on iteration %d: %s", i, err) + } + dstW = &bytes.Buffer{} + w.Reset(dstW) + } + + return nil +} + +func testWriterReuse(w Writer, r io.Reader, newReader func(io.Reader) io.Reader) error { + wantW := &bytes.Buffer{} + mw := io.MultiWriter(w, wantW) + for i := 0; i < 30; i++ { + fmt.Fprintf(mw, "foobar %d\n", i) + if i%13 == 0 { + if err := w.Flush(); err != nil { + return fmt.Errorf("error on flush: %s", err) + } + } + } + w.Close() + + zr := newReader(r) + data, err := ioutil.ReadAll(zr) + if err != nil { + return fmt.Errorf("unexpected error: %s, data=%q", err, data) + } + + wantData := wantW.Bytes() + if !bytes.Equal(data, wantData) { + return fmt.Errorf("unexpected data: %q. Expecting %q", data, wantData) + } + + return nil +} + +func testConcurrent(testFunc func() error, concurrency int) error { + ch := make(chan error, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + ch <- testFunc() + }() + } + for i := 0; i < concurrency; i++ { + select { + case err := <-ch: + if err != nil { + return fmt.Errorf("unexpected error on goroutine %d: %s", i, err) + } + case <-time.After(time.Second): + return fmt.Errorf("timeout on goroutine %d", i) + } + } + return nil +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/status.go b/vendor/github.com/erikdubbelboer/fasthttp/status.go new file mode 100644 index 0000000..6687efb --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/status.go @@ -0,0 +1,176 @@ +package fasthttp + +import ( + "fmt" + "sync/atomic" +) + +// HTTP status codes were stolen from net/http. +const ( + StatusContinue = 100 // RFC 7231, 6.2.1 + StatusSwitchingProtocols = 101 // RFC 7231, 6.2.2 + StatusProcessing = 102 // RFC 2518, 10.1 + + StatusOK = 200 // RFC 7231, 6.3.1 + StatusCreated = 201 // RFC 7231, 6.3.2 + StatusAccepted = 202 // RFC 7231, 6.3.3 + StatusNonAuthoritativeInfo = 203 // RFC 7231, 6.3.4 + StatusNoContent = 204 // RFC 7231, 6.3.5 + StatusResetContent = 205 // RFC 7231, 6.3.6 + StatusPartialContent = 206 // RFC 7233, 4.1 + StatusMultiStatus = 207 // RFC 4918, 11.1 + StatusAlreadyReported = 208 // RFC 5842, 7.1 + StatusIMUsed = 226 // RFC 3229, 10.4.1 + + StatusMultipleChoices = 300 // RFC 7231, 6.4.1 + StatusMovedPermanently = 301 // RFC 7231, 6.4.2 + StatusFound = 302 // RFC 7231, 6.4.3 + StatusSeeOther = 303 // RFC 7231, 6.4.4 + StatusNotModified = 304 // RFC 7232, 4.1 + StatusUseProxy = 305 // RFC 7231, 6.4.5 + _ = 306 // RFC 7231, 6.4.6 (Unused) + StatusTemporaryRedirect = 307 // RFC 7231, 6.4.7 + StatusPermanentRedirect = 308 // RFC 7538, 3 + + StatusBadRequest = 400 // RFC 7231, 6.5.1 + StatusUnauthorized = 401 // RFC 7235, 3.1 + StatusPaymentRequired = 402 // RFC 7231, 6.5.2 + StatusForbidden = 403 // RFC 7231, 6.5.3 + StatusNotFound = 404 // RFC 7231, 6.5.4 + StatusMethodNotAllowed = 405 // RFC 7231, 6.5.5 + StatusNotAcceptable = 406 // RFC 7231, 6.5.6 + StatusProxyAuthRequired = 407 // RFC 7235, 3.2 + StatusRequestTimeout = 408 // RFC 7231, 6.5.7 + StatusConflict = 409 // RFC 7231, 6.5.8 + StatusGone = 410 // RFC 7231, 6.5.9 + StatusLengthRequired = 411 // RFC 7231, 6.5.10 + StatusPreconditionFailed = 412 // RFC 7232, 4.2 + StatusRequestEntityTooLarge = 413 // RFC 7231, 6.5.11 + StatusRequestURITooLong = 414 // RFC 7231, 6.5.12 + StatusUnsupportedMediaType = 415 // RFC 7231, 6.5.13 + StatusRequestedRangeNotSatisfiable = 416 // RFC 7233, 4.4 + StatusExpectationFailed = 417 // RFC 7231, 6.5.14 + StatusTeapot = 418 // RFC 7168, 2.3.3 + StatusUnprocessableEntity = 422 // RFC 4918, 11.2 + StatusLocked = 423 // RFC 4918, 11.3 + StatusFailedDependency = 424 // RFC 4918, 11.4 + StatusUpgradeRequired = 426 // RFC 7231, 6.5.15 + StatusPreconditionRequired = 428 // RFC 6585, 3 + StatusTooManyRequests = 429 // RFC 6585, 4 + StatusRequestHeaderFieldsTooLarge = 431 // RFC 6585, 5 + StatusUnavailableForLegalReasons = 451 // RFC 7725, 3 + + StatusInternalServerError = 500 // RFC 7231, 6.6.1 + StatusNotImplemented = 501 // RFC 7231, 6.6.2 + StatusBadGateway = 502 // RFC 7231, 6.6.3 + StatusServiceUnavailable = 503 // RFC 7231, 6.6.4 + StatusGatewayTimeout = 504 // RFC 7231, 6.6.5 + StatusHTTPVersionNotSupported = 505 // RFC 7231, 6.6.6 + StatusVariantAlsoNegotiates = 506 // RFC 2295, 8.1 + StatusInsufficientStorage = 507 // RFC 4918, 11.5 + StatusLoopDetected = 508 // RFC 5842, 7.2 + StatusNotExtended = 510 // RFC 2774, 7 + StatusNetworkAuthenticationRequired = 511 // RFC 6585, 6 +) + +var ( + statusLines atomic.Value + + statusMessages = map[int]string{ + StatusContinue: "Continue", + StatusSwitchingProtocols: "Switching Protocols", + StatusProcessing: "Processing", + + StatusOK: "OK", + StatusCreated: "Created", + StatusAccepted: "Accepted", + StatusNonAuthoritativeInfo: "Non-Authoritative Information", + StatusNoContent: "No Content", + StatusResetContent: "Reset Content", + StatusPartialContent: "Partial Content", + StatusMultiStatus: "Multi-Status", + StatusAlreadyReported: "Already Reported", + StatusIMUsed: "IM Used", + + StatusMultipleChoices: "Multiple Choices", + StatusMovedPermanently: "Moved Permanently", + StatusFound: "Found", + StatusSeeOther: "See Other", + StatusNotModified: "Not Modified", + StatusUseProxy: "Use Proxy", + StatusTemporaryRedirect: "Temporary Redirect", + StatusPermanentRedirect: "Permanent Redirect", + + StatusBadRequest: "Bad Request", + StatusUnauthorized: "Unauthorized", + StatusPaymentRequired: "Payment Required", + StatusForbidden: "Forbidden", + StatusNotFound: "Not Found", + StatusMethodNotAllowed: "Method Not Allowed", + StatusNotAcceptable: "Not Acceptable", + StatusProxyAuthRequired: "Proxy Authentication Required", + StatusRequestTimeout: "Request Timeout", + StatusConflict: "Conflict", + StatusGone: "Gone", + StatusLengthRequired: "Length Required", + StatusPreconditionFailed: "Precondition Failed", + StatusRequestEntityTooLarge: "Request Entity Too Large", + StatusRequestURITooLong: "Request URI Too Long", + StatusUnsupportedMediaType: "Unsupported Media Type", + StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable", + StatusExpectationFailed: "Expectation Failed", + StatusTeapot: "I'm a teapot", + StatusUnprocessableEntity: "Unprocessable Entity", + StatusLocked: "Locked", + StatusFailedDependency: "Failed Dependency", + StatusUpgradeRequired: "Upgrade Required", + StatusPreconditionRequired: "Precondition Required", + StatusTooManyRequests: "Too Many Requests", + StatusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large", + StatusUnavailableForLegalReasons: "Unavailable For Legal Reasons", + + StatusInternalServerError: "Internal Server Error", + StatusNotImplemented: "Not Implemented", + StatusBadGateway: "Bad Gateway", + StatusServiceUnavailable: "Service Unavailable", + StatusGatewayTimeout: "Gateway Timeout", + StatusHTTPVersionNotSupported: "HTTP Version Not Supported", + StatusVariantAlsoNegotiates: "Variant Also Negotiates", + StatusInsufficientStorage: "Insufficient Storage", + StatusLoopDetected: "Loop Detected", + StatusNotExtended: "Not Extended", + StatusNetworkAuthenticationRequired: "Network Authentication Required", + } +) + +// StatusMessage returns HTTP status message for the given status code. +func StatusMessage(statusCode int) string { + s := statusMessages[statusCode] + if s == "" { + s = "Unknown Status Code" + } + return s +} + +func init() { + statusLines.Store(make(map[int][]byte)) +} + +func statusLine(statusCode int) []byte { + m := statusLines.Load().(map[int][]byte) + h := m[statusCode] + if h != nil { + return h + } + + statusText := StatusMessage(statusCode) + + h = []byte(fmt.Sprintf("HTTP/1.1 %d %s\r\n", statusCode, statusText)) + newM := make(map[int][]byte, len(m)+1) + for k, v := range m { + newM[k] = v + } + newM[statusCode] = h + statusLines.Store(newM) + return h +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/stream.go b/vendor/github.com/erikdubbelboer/fasthttp/stream.go new file mode 100644 index 0000000..801f2bb --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/stream.go @@ -0,0 +1,54 @@ +package fasthttp + +import ( + "bufio" + "io" + "sync" + + "github.com/erikdubbelboer/fasthttp/fasthttputil" +) + +// StreamWriter must write data to w. +// +// Usually StreamWriter writes data to w in a loop (aka 'data streaming'). +// +// StreamWriter must return immediately if w returns error. +// +// Since the written data is buffered, do not forget calling w.Flush +// when the data must be propagated to reader. +type StreamWriter func(w *bufio.Writer) + +// NewStreamReader returns a reader, which replays all the data generated by sw. +// +// The returned reader may be passed to Response.SetBodyStream. +// +// Close must be called on the returned reader after all the required data +// has been read. Otherwise goroutine leak may occur. +// +// See also Response.SetBodyStreamWriter. +func NewStreamReader(sw StreamWriter) io.ReadCloser { + pc := fasthttputil.NewPipeConns() + pw := pc.Conn1() + pr := pc.Conn2() + + var bw *bufio.Writer + v := streamWriterBufPool.Get() + if v == nil { + bw = bufio.NewWriter(pw) + } else { + bw = v.(*bufio.Writer) + bw.Reset(pw) + } + + go func() { + sw(bw) + bw.Flush() + pw.Close() + + streamWriterBufPool.Put(bw) + }() + + return pr +} + +var streamWriterBufPool sync.Pool diff --git a/vendor/github.com/erikdubbelboer/fasthttp/stream_test.go b/vendor/github.com/erikdubbelboer/fasthttp/stream_test.go new file mode 100644 index 0000000..a61d3fd --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/stream_test.go @@ -0,0 +1,102 @@ +package fasthttp + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "testing" + "time" +) + +func TestNewStreamReader(t *testing.T) { + ch := make(chan struct{}) + r := NewStreamReader(func(w *bufio.Writer) { + fmt.Fprintf(w, "Hello, world\n") + fmt.Fprintf(w, "Line #2\n") + close(ch) + }) + + data, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + expectedData := "Hello, world\nLine #2\n" + if string(data) != expectedData { + t.Fatalf("unexpected data %q. Expecting %q", data, expectedData) + } + + if err = r.Close(); err != nil { + t.Fatalf("unexpected error") + } + + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestStreamReaderClose(t *testing.T) { + firstLine := "the first line must pass" + ch := make(chan error, 1) + r := NewStreamReader(func(w *bufio.Writer) { + fmt.Fprintf(w, "%s", firstLine) + if err := w.Flush(); err != nil { + ch <- fmt.Errorf("unexpected error on first flush: %s", err) + return + } + + data := createFixedBody(4000) + for i := 0; i < 100; i++ { + w.Write(data) + } + if err := w.Flush(); err == nil { + ch <- fmt.Errorf("expecting error on the second flush") + } + ch <- nil + }) + + buf := make([]byte, len(firstLine)) + n, err := io.ReadFull(r, buf) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if n != len(buf) { + t.Fatalf("unexpected number of bytes read: %d. Expecting %d", n, len(buf)) + } + if string(buf) != firstLine { + t.Fatalf("unexpected result: %q. Expecting %q", buf, firstLine) + } + + if err := r.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case err := <-ch: + if err != nil { + t.Fatalf("error returned from stream reader: %s", err) + } + case <-time.After(time.Second): + t.Fatalf("timeout when waiting for stream reader") + } + + // read trailing data + go func() { + if _, err := ioutil.ReadAll(r); err != nil { + ch <- fmt.Errorf("unexpected error when reading trailing data: %s", err) + return + } + ch <- nil + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("error returned when reading tail data: %s", err) + } + case <-time.After(time.Second): + t.Fatalf("timeout when reading tail data") + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/stream_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/stream_timing_test.go new file mode 100644 index 0000000..facca3a --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/stream_timing_test.go @@ -0,0 +1,70 @@ +package fasthttp + +import ( + "bufio" + "io" + "testing" + "time" +) + +func BenchmarkStreamReader1(b *testing.B) { + benchmarkStreamReader(b, 1) +} + +func BenchmarkStreamReader10(b *testing.B) { + benchmarkStreamReader(b, 10) +} + +func BenchmarkStreamReader100(b *testing.B) { + benchmarkStreamReader(b, 100) +} + +func BenchmarkStreamReader1K(b *testing.B) { + benchmarkStreamReader(b, 1000) +} + +func BenchmarkStreamReader10K(b *testing.B) { + benchmarkStreamReader(b, 10000) +} + +func benchmarkStreamReader(b *testing.B, size int) { + src := createFixedBody(size) + b.SetBytes(int64(size)) + + b.RunParallel(func(pb *testing.PB) { + dst := make([]byte, size) + ch := make(chan error, 1) + sr := NewStreamReader(func(w *bufio.Writer) { + for pb.Next() { + if _, err := w.Write(src); err != nil { + ch <- err + return + } + if err := w.Flush(); err != nil { + ch <- err + return + } + } + ch <- nil + }) + for { + if _, err := sr.Read(dst); err != nil { + if err == io.EOF { + break + } + b.Fatalf("unexpected error when reading from stream reader: %s", err) + } + } + if err := sr.Close(); err != nil { + b.Fatalf("unexpected error when closing stream reader: %s", err) + } + select { + case err := <-ch: + if err != nil { + b.Fatalf("unexpected error from stream reader: %s", err) + } + case <-time.After(time.Second): + b.Fatalf("timeout") + } + }) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/strings.go b/vendor/github.com/erikdubbelboer/fasthttp/strings.go new file mode 100644 index 0000000..734701d --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/strings.go @@ -0,0 +1,74 @@ +package fasthttp + +var ( + defaultServerName = []byte("fasthttp") + defaultUserAgent = []byte("fasthttp") + defaultContentType = []byte("text/plain; charset=utf-8") +) + +var ( + strSlash = []byte("/") + strSlashSlash = []byte("//") + strSlashDotDot = []byte("/..") + strSlashDotSlash = []byte("/./") + strSlashDotDotSlash = []byte("/../") + strCRLF = []byte("\r\n") + strHTTP = []byte("http") + strHTTPS = []byte("https") + strHTTP11 = []byte("HTTP/1.1") + strColonSlashSlash = []byte("://") + strColonSpace = []byte(": ") + strGMT = []byte("GMT") + + strResponseContinue = []byte("HTTP/1.1 100 Continue\r\n\r\n") + + strGet = []byte("GET") + strHead = []byte("HEAD") + strPost = []byte("POST") + strPut = []byte("PUT") + strDelete = []byte("DELETE") + strConnect = []byte("CONNECT") + + strExpect = []byte("Expect") + strConnection = []byte("Connection") + strContentLength = []byte("Content-Length") + strContentType = []byte("Content-Type") + strDate = []byte("Date") + strHost = []byte("Host") + strReferer = []byte("Referer") + strServer = []byte("Server") + strTransferEncoding = []byte("Transfer-Encoding") + strContentEncoding = []byte("Content-Encoding") + strAcceptEncoding = []byte("Accept-Encoding") + strUserAgent = []byte("User-Agent") + strCookie = []byte("Cookie") + strSetCookie = []byte("Set-Cookie") + strLocation = []byte("Location") + strIfModifiedSince = []byte("If-Modified-Since") + strLastModified = []byte("Last-Modified") + strAcceptRanges = []byte("Accept-Ranges") + strRange = []byte("Range") + strContentRange = []byte("Content-Range") + + strCookieExpires = []byte("expires") + strCookieDomain = []byte("domain") + strCookiePath = []byte("path") + strCookieHTTPOnly = []byte("HttpOnly") + strCookieSecure = []byte("secure") + + strClose = []byte("close") + strGzip = []byte("gzip") + strDeflate = []byte("deflate") + strKeepAlive = []byte("keep-alive") + strKeepAliveCamelCase = []byte("Keep-Alive") + strUpgrade = []byte("Upgrade") + strChunked = []byte("chunked") + strIdentity = []byte("identity") + str100Continue = []byte("100-continue") + strPostArgsContentType = []byte("application/x-www-form-urlencoded") + strMultipartFormData = []byte("multipart/form-data") + strBoundary = []byte("boundary") + strBytes = []byte("bytes") + strTextSlash = []byte("text/") + strApplicationSlash = []byte("application/") +) diff --git a/vendor/github.com/erikdubbelboer/fasthttp/tcpdialer.go b/vendor/github.com/erikdubbelboer/fasthttp/tcpdialer.go new file mode 100644 index 0000000..e31fd75 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/tcpdialer.go @@ -0,0 +1,369 @@ +package fasthttp + +import ( + "errors" + "net" + "strconv" + "sync" + "sync/atomic" + "time" +) + +// Dial dials the given TCP addr using tcp4. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// * It returns ErrDialTimeout if connection cannot be established during +// DefaultDialTimeout seconds. Use DialTimeout for customizing dial timeout. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func Dial(addr string) (net.Conn, error) { + return getDialer(DefaultDialTimeout, false)(addr) +} + +// DialTimeout dials the given TCP addr using tcp4 using the given timeout. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func DialTimeout(addr string, timeout time.Duration) (net.Conn, error) { + return getDialer(timeout, false)(addr) +} + +// DialDualStack dials the given TCP addr using both tcp4 and tcp6. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// * It returns ErrDialTimeout if connection cannot be established during +// DefaultDialTimeout seconds. Use DialDualStackTimeout for custom dial +// timeout. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func DialDualStack(addr string) (net.Conn, error) { + return getDialer(DefaultDialTimeout, true)(addr) +} + +// DialDualStackTimeout dials the given TCP addr using both tcp4 and tcp6 +// using the given timeout. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func DialDualStackTimeout(addr string, timeout time.Duration) (net.Conn, error) { + return getDialer(timeout, true)(addr) +} + +func getDialer(timeout time.Duration, dualStack bool) DialFunc { + if timeout <= 0 { + timeout = DefaultDialTimeout + } + timeoutRounded := int(timeout.Seconds()*10 + 9) + + m := dialMap + if dualStack { + m = dialDualStackMap + } + + dialMapLock.Lock() + d := m[timeoutRounded] + if d == nil { + dialer := dialerStd + if dualStack { + dialer = dialerDualStack + } + d = dialer.NewDial(timeout) + m[timeoutRounded] = d + } + dialMapLock.Unlock() + return d +} + +var ( + dialerStd = &tcpDialer{} + dialerDualStack = &tcpDialer{DualStack: true} + + dialMap = make(map[int]DialFunc) + dialDualStackMap = make(map[int]DialFunc) + dialMapLock sync.Mutex +) + +type tcpDialer struct { + DualStack bool + + tcpAddrsLock sync.Mutex + tcpAddrsMap map[string]*tcpAddrEntry + + concurrencyCh chan struct{} + + once sync.Once +} + +const maxDialConcurrency = 1000 + +func (d *tcpDialer) NewDial(timeout time.Duration) DialFunc { + d.once.Do(func() { + d.concurrencyCh = make(chan struct{}, maxDialConcurrency) + d.tcpAddrsMap = make(map[string]*tcpAddrEntry) + go d.tcpAddrsClean() + }) + + return func(addr string) (net.Conn, error) { + addrs, idx, err := d.getTCPAddrs(addr) + if err != nil { + return nil, err + } + network := "tcp4" + if d.DualStack { + network = "tcp" + } + + var conn net.Conn + n := uint32(len(addrs)) + deadline := time.Now().Add(timeout) + for n > 0 { + conn, err = tryDial(network, &addrs[idx%n], deadline, d.concurrencyCh) + if err == nil { + return conn, nil + } + if err == ErrDialTimeout { + return nil, err + } + idx++ + n-- + } + return nil, err + } +} + +func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyCh chan struct{}) (net.Conn, error) { + timeout := -time.Since(deadline) + if timeout <= 0 { + return nil, ErrDialTimeout + } + + select { + case concurrencyCh <- struct{}{}: + default: + tc := acquireTimer(timeout) + isTimeout := false + select { + case concurrencyCh <- struct{}{}: + case <-tc.C: + isTimeout = true + } + releaseTimer(tc) + if isTimeout { + return nil, ErrDialTimeout + } + } + + timeout = -time.Since(deadline) + if timeout <= 0 { + <-concurrencyCh + return nil, ErrDialTimeout + } + + chv := dialResultChanPool.Get() + if chv == nil { + chv = make(chan dialResult, 1) + } + ch := chv.(chan dialResult) + go func() { + var dr dialResult + dr.conn, dr.err = net.DialTCP(network, nil, addr) + ch <- dr + <-concurrencyCh + }() + + var ( + conn net.Conn + err error + ) + + tc := acquireTimer(timeout) + select { + case dr := <-ch: + conn = dr.conn + err = dr.err + dialResultChanPool.Put(ch) + case <-tc.C: + err = ErrDialTimeout + } + releaseTimer(tc) + + return conn, err +} + +var dialResultChanPool sync.Pool + +type dialResult struct { + conn net.Conn + err error +} + +// ErrDialTimeout is returned when TCP dialing is timed out. +var ErrDialTimeout = errors.New("dialing to the given TCP address timed out") + +// DefaultDialTimeout is timeout used by Dial and DialDualStack +// for establishing TCP connections. +const DefaultDialTimeout = 3 * time.Second + +type tcpAddrEntry struct { + addrs []net.TCPAddr + addrsIdx uint32 + + resolveTime time.Time + pending bool +} + +// DefaultDNSCacheDuration is the duration for caching resolved TCP addresses +// by Dial* functions. +const DefaultDNSCacheDuration = time.Minute + +func (d *tcpDialer) tcpAddrsClean() { + expireDuration := 2 * DefaultDNSCacheDuration + for { + time.Sleep(time.Second) + t := time.Now() + + d.tcpAddrsLock.Lock() + for k, e := range d.tcpAddrsMap { + if t.Sub(e.resolveTime) > expireDuration { + delete(d.tcpAddrsMap, k) + } + } + d.tcpAddrsLock.Unlock() + } +} + +func (d *tcpDialer) getTCPAddrs(addr string) ([]net.TCPAddr, uint32, error) { + d.tcpAddrsLock.Lock() + e := d.tcpAddrsMap[addr] + if e != nil && !e.pending && time.Since(e.resolveTime) > DefaultDNSCacheDuration { + e.pending = true + e = nil + } + d.tcpAddrsLock.Unlock() + + if e == nil { + addrs, err := resolveTCPAddrs(addr, d.DualStack) + if err != nil { + d.tcpAddrsLock.Lock() + e = d.tcpAddrsMap[addr] + if e != nil && e.pending { + e.pending = false + } + d.tcpAddrsLock.Unlock() + return nil, 0, err + } + + e = &tcpAddrEntry{ + addrs: addrs, + resolveTime: time.Now(), + } + + d.tcpAddrsLock.Lock() + d.tcpAddrsMap[addr] = e + d.tcpAddrsLock.Unlock() + } + + idx := atomic.AddUint32(&e.addrsIdx, 1) + return e.addrs, idx, nil +} + +func resolveTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, error) { + host, portS, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.Atoi(portS) + if err != nil { + return nil, err + } + + ips, err := net.LookupIP(host) + if err != nil { + return nil, err + } + + n := len(ips) + addrs := make([]net.TCPAddr, 0, n) + for i := 0; i < n; i++ { + ip := ips[i] + if !dualStack && ip.To4() == nil { + continue + } + addrs = append(addrs, net.TCPAddr{ + IP: ip, + Port: port, + }) + } + if len(addrs) == 0 { + return nil, errNoDNSEntries + } + return addrs, nil +} + +var errNoDNSEntries = errors.New("couldn't find DNS entries for the given domain. Try using DialDualStack") diff --git a/vendor/github.com/erikdubbelboer/fasthttp/timer.go b/vendor/github.com/erikdubbelboer/fasthttp/timer.go new file mode 100644 index 0000000..bb12acb --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/timer.go @@ -0,0 +1,44 @@ +package fasthttp + +import ( + "sync" + "time" +) + +func initTimer(t *time.Timer, timeout time.Duration) *time.Timer { + if t == nil { + return time.NewTimer(timeout) + } + if t.Reset(timeout) { + panic("BUG: active timer trapped into initTimer()") + } + return t +} + +func stopTimer(t *time.Timer) { + if !t.Stop() { + // Collect possibly added time from the channel + // if timer has been stopped and nobody collected its' value. + select { + case <-t.C: + default: + } + } +} + +func acquireTimer(timeout time.Duration) *time.Timer { + v := timerPool.Get() + if v == nil { + return time.NewTimer(timeout) + } + t := v.(*time.Timer) + initTimer(t, timeout) + return t +} + +func releaseTimer(t *time.Timer) { + stopTimer(t) + timerPool.Put(t) +} + +var timerPool sync.Pool diff --git a/vendor/github.com/erikdubbelboer/fasthttp/uri.go b/vendor/github.com/erikdubbelboer/fasthttp/uri.go new file mode 100644 index 0000000..37572f5 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/uri.go @@ -0,0 +1,525 @@ +package fasthttp + +import ( + "bytes" + "io" + "sync" +) + +// AcquireURI returns an empty URI instance from the pool. +// +// Release the URI with ReleaseURI after the URI is no longer needed. +// This allows reducing GC load. +func AcquireURI() *URI { + return uriPool.Get().(*URI) +} + +// ReleaseURI releases the URI acquired via AcquireURI. +// +// The released URI mustn't be used after releasing it, otherwise data races +// may occur. +func ReleaseURI(u *URI) { + u.Reset() + uriPool.Put(u) +} + +var uriPool = &sync.Pool{ + New: func() interface{} { + return &URI{} + }, +} + +// URI represents URI :) . +// +// It is forbidden copying URI instances. Create new instance and use CopyTo +// instead. +// +// URI instance MUST NOT be used from concurrently running goroutines. +type URI struct { + noCopy noCopy + + pathOriginal []byte + scheme []byte + path []byte + queryString []byte + hash []byte + host []byte + + queryArgs Args + parsedQueryArgs bool + + fullURI []byte + requestURI []byte + + h *RequestHeader +} + +// CopyTo copies uri contents to dst. +func (u *URI) CopyTo(dst *URI) { + dst.Reset() + dst.pathOriginal = append(dst.pathOriginal[:0], u.pathOriginal...) + dst.scheme = append(dst.scheme[:0], u.scheme...) + dst.path = append(dst.path[:0], u.path...) + dst.queryString = append(dst.queryString[:0], u.queryString...) + dst.hash = append(dst.hash[:0], u.hash...) + dst.host = append(dst.host[:0], u.host...) + + u.queryArgs.CopyTo(&dst.queryArgs) + dst.parsedQueryArgs = u.parsedQueryArgs + + // fullURI and requestURI shouldn't be copied, since they are created + // from scratch on each FullURI() and RequestURI() call. + dst.h = u.h +} + +// Hash returns URI hash, i.e. qwe of http://aaa.com/foo/bar?baz=123#qwe . +// +// The returned value is valid until the next URI method call. +func (u *URI) Hash() []byte { + return u.hash +} + +// SetHash sets URI hash. +func (u *URI) SetHash(hash string) { + u.hash = append(u.hash[:0], hash...) +} + +// SetHashBytes sets URI hash. +func (u *URI) SetHashBytes(hash []byte) { + u.hash = append(u.hash[:0], hash...) +} + +// QueryString returns URI query string, +// i.e. baz=123 of http://aaa.com/foo/bar?baz=123#qwe . +// +// The returned value is valid until the next URI method call. +func (u *URI) QueryString() []byte { + return u.queryString +} + +// SetQueryString sets URI query string. +func (u *URI) SetQueryString(queryString string) { + u.queryString = append(u.queryString[:0], queryString...) + u.parsedQueryArgs = false +} + +// SetQueryStringBytes sets URI query string. +func (u *URI) SetQueryStringBytes(queryString []byte) { + u.queryString = append(u.queryString[:0], queryString...) + u.parsedQueryArgs = false +} + +// Path returns URI path, i.e. /foo/bar of http://aaa.com/foo/bar?baz=123#qwe . +// +// The returned path is always urldecoded and normalized, +// i.e. '//f%20obar/baz/../zzz' becomes '/f obar/zzz'. +// +// The returned value is valid until the next URI method call. +func (u *URI) Path() []byte { + path := u.path + if len(path) == 0 { + path = strSlash + } + return path +} + +// SetPath sets URI path. +func (u *URI) SetPath(path string) { + u.pathOriginal = append(u.pathOriginal[:0], path...) + u.path = normalizePath(u.path, u.pathOriginal) +} + +// SetPathBytes sets URI path. +func (u *URI) SetPathBytes(path []byte) { + u.pathOriginal = append(u.pathOriginal[:0], path...) + u.path = normalizePath(u.path, u.pathOriginal) +} + +// PathOriginal returns the original path from requestURI passed to URI.Parse(). +// +// The returned value is valid until the next URI method call. +func (u *URI) PathOriginal() []byte { + return u.pathOriginal +} + +// Scheme returns URI scheme, i.e. http of http://aaa.com/foo/bar?baz=123#qwe . +// +// Returned scheme is always lowercased. +// +// The returned value is valid until the next URI method call. +func (u *URI) Scheme() []byte { + scheme := u.scheme + if len(scheme) == 0 { + scheme = strHTTP + } + return scheme +} + +// SetScheme sets URI scheme, i.e. http, https, ftp, etc. +func (u *URI) SetScheme(scheme string) { + u.scheme = append(u.scheme[:0], scheme...) + lowercaseBytes(u.scheme) +} + +// SetSchemeBytes sets URI scheme, i.e. http, https, ftp, etc. +func (u *URI) SetSchemeBytes(scheme []byte) { + u.scheme = append(u.scheme[:0], scheme...) + lowercaseBytes(u.scheme) +} + +// Reset clears uri. +func (u *URI) Reset() { + u.pathOriginal = u.pathOriginal[:0] + u.scheme = u.scheme[:0] + u.path = u.path[:0] + u.queryString = u.queryString[:0] + u.hash = u.hash[:0] + + u.host = u.host[:0] + u.queryArgs.Reset() + u.parsedQueryArgs = false + + // There is no need in u.fullURI = u.fullURI[:0], since full uri + // is calucalted on each call to FullURI(). + + // There is no need in u.requestURI = u.requestURI[:0], since requestURI + // is calculated on each call to RequestURI(). + + u.h = nil +} + +// Host returns host part, i.e. aaa.com of http://aaa.com/foo/bar?baz=123#qwe . +// +// Host is always lowercased. +func (u *URI) Host() []byte { + if len(u.host) == 0 && u.h != nil { + u.host = append(u.host[:0], u.h.Host()...) + lowercaseBytes(u.host) + u.h = nil + } + return u.host +} + +// SetHost sets host for the uri. +func (u *URI) SetHost(host string) { + u.host = append(u.host[:0], host...) + lowercaseBytes(u.host) +} + +// SetHostBytes sets host for the uri. +func (u *URI) SetHostBytes(host []byte) { + u.host = append(u.host[:0], host...) + lowercaseBytes(u.host) +} + +// Parse initializes URI from the given host and uri. +// +// host may be nil. In this case uri must contain fully qualified uri, +// i.e. with scheme and host. http is assumed if scheme is omitted. +// +// uri may contain e.g. RequestURI without scheme and host if host is non-empty. +func (u *URI) Parse(host, uri []byte) { + u.parse(host, uri, nil) +} + +func (u *URI) parseQuick(uri []byte, h *RequestHeader, isTLS bool) { + u.parse(nil, uri, h) + if isTLS { + u.scheme = append(u.scheme[:0], strHTTPS...) + } +} + +func (u *URI) parse(host, uri []byte, h *RequestHeader) { + u.Reset() + u.h = h + + scheme, host, uri := splitHostURI(host, uri) + u.scheme = append(u.scheme, scheme...) + lowercaseBytes(u.scheme) + u.host = append(u.host, host...) + lowercaseBytes(u.host) + + b := uri + queryIndex := bytes.IndexByte(b, '?') + fragmentIndex := bytes.IndexByte(b, '#') + // Ignore query in fragment part + if fragmentIndex >= 0 && queryIndex > fragmentIndex { + queryIndex = -1 + } + + if queryIndex < 0 && fragmentIndex < 0 { + u.pathOriginal = append(u.pathOriginal, b...) + u.path = normalizePath(u.path, u.pathOriginal) + return + } + + if queryIndex >= 0 { + // Path is everything up to the start of the query + u.pathOriginal = append(u.pathOriginal, b[:queryIndex]...) + u.path = normalizePath(u.path, u.pathOriginal) + + if fragmentIndex < 0 { + u.queryString = append(u.queryString, b[queryIndex+1:]...) + } else { + u.queryString = append(u.queryString, b[queryIndex+1:fragmentIndex]...) + u.hash = append(u.hash, b[fragmentIndex+1:]...) + } + return + } + + // fragmentIndex >= 0 && queryIndex < 0 + // Path is up to the start of fragment + u.pathOriginal = append(u.pathOriginal, b[:fragmentIndex]...) + u.path = normalizePath(u.path, u.pathOriginal) + u.hash = append(u.hash, b[fragmentIndex+1:]...) +} + +func normalizePath(dst, src []byte) []byte { + dst = dst[:0] + dst = addLeadingSlash(dst, src) + dst = decodeArgAppendNoPlus(dst, src) + + // remove duplicate slashes + b := dst + bSize := len(b) + for { + n := bytes.Index(b, strSlashSlash) + if n < 0 { + break + } + b = b[n:] + copy(b, b[1:]) + b = b[:len(b)-1] + bSize-- + } + dst = dst[:bSize] + + // remove /./ parts + b = dst + for { + n := bytes.Index(b, strSlashDotSlash) + if n < 0 { + break + } + nn := n + len(strSlashDotSlash) - 1 + copy(b[n:], b[nn:]) + b = b[:len(b)-nn+n] + } + + // remove /foo/../ parts + for { + n := bytes.Index(b, strSlashDotDotSlash) + if n < 0 { + break + } + nn := bytes.LastIndexByte(b[:n], '/') + if nn < 0 { + nn = 0 + } + n += len(strSlashDotDotSlash) - 1 + copy(b[nn:], b[n:]) + b = b[:len(b)-n+nn] + } + + // remove trailing /foo/.. + n := bytes.LastIndex(b, strSlashDotDot) + if n >= 0 && n+len(strSlashDotDot) == len(b) { + nn := bytes.LastIndexByte(b[:n], '/') + if nn < 0 { + return strSlash + } + b = b[:nn+1] + } + + return b +} + +// RequestURI returns RequestURI - i.e. URI without Scheme and Host. +func (u *URI) RequestURI() []byte { + dst := appendQuotedPath(u.requestURI[:0], u.Path()) + if u.queryArgs.Len() > 0 { + dst = append(dst, '?') + dst = u.queryArgs.AppendBytes(dst) + } else if len(u.queryString) > 0 { + dst = append(dst, '?') + dst = append(dst, u.queryString...) + } + if len(u.hash) > 0 { + dst = append(dst, '#') + dst = append(dst, u.hash...) + } + u.requestURI = dst + return u.requestURI +} + +// LastPathSegment returns the last part of uri path after '/'. +// +// Examples: +// +// * For /foo/bar/baz.html path returns baz.html. +// * For /foo/bar/ returns empty byte slice. +// * For /foobar.js returns foobar.js. +func (u *URI) LastPathSegment() []byte { + path := u.Path() + n := bytes.LastIndexByte(path, '/') + if n < 0 { + return path + } + return path[n+1:] +} + +// Update updates uri. +// +// The following newURI types are accepted: +// +// * Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original +// uri is replaced by newURI. +// * Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case +// the original scheme is preserved. +// * Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part +// of the original uri is replaced. +// * Relative path, i.e. xx?yy=abc . In this case the original RequestURI +// is updated according to the new relative path. +func (u *URI) Update(newURI string) { + u.UpdateBytes(s2b(newURI)) +} + +// UpdateBytes updates uri. +// +// The following newURI types are accepted: +// +// * Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original +// uri is replaced by newURI. +// * Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case +// the original scheme is preserved. +// * Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part +// of the original uri is replaced. +// * Relative path, i.e. xx?yy=abc . In this case the original RequestURI +// is updated according to the new relative path. +func (u *URI) UpdateBytes(newURI []byte) { + u.requestURI = u.updateBytes(newURI, u.requestURI) +} + +func (u *URI) updateBytes(newURI, buf []byte) []byte { + if len(newURI) == 0 { + return buf + } + + n := bytes.Index(newURI, strSlashSlash) + if n >= 0 { + // absolute uri + var b [32]byte + schemeOriginal := b[:0] + if len(u.scheme) > 0 { + schemeOriginal = append([]byte(nil), u.scheme...) + } + u.Parse(nil, newURI) + if len(schemeOriginal) > 0 && len(u.scheme) == 0 { + u.scheme = append(u.scheme[:0], schemeOriginal...) + } + return buf + } + + if newURI[0] == '/' { + // uri without host + buf = u.appendSchemeHost(buf[:0]) + buf = append(buf, newURI...) + u.Parse(nil, buf) + return buf + } + + // relative path + switch newURI[0] { + case '?': + // query string only update + u.SetQueryStringBytes(newURI[1:]) + return append(buf[:0], u.FullURI()...) + case '#': + // update only hash + u.SetHashBytes(newURI[1:]) + return append(buf[:0], u.FullURI()...) + default: + // update the last path part after the slash + path := u.Path() + n = bytes.LastIndexByte(path, '/') + if n < 0 { + panic("BUG: path must contain at least one slash") + } + buf = u.appendSchemeHost(buf[:0]) + buf = appendQuotedPath(buf, path[:n+1]) + buf = append(buf, newURI...) + u.Parse(nil, buf) + return buf + } +} + +// FullURI returns full uri in the form {Scheme}://{Host}{RequestURI}#{Hash}. +func (u *URI) FullURI() []byte { + u.fullURI = u.AppendBytes(u.fullURI[:0]) + return u.fullURI +} + +// AppendBytes appends full uri to dst and returns the extended dst. +func (u *URI) AppendBytes(dst []byte) []byte { + dst = u.appendSchemeHost(dst) + return append(dst, u.RequestURI()...) +} + +func (u *URI) appendSchemeHost(dst []byte) []byte { + dst = append(dst, u.Scheme()...) + dst = append(dst, strColonSlashSlash...) + return append(dst, u.Host()...) +} + +// WriteTo writes full uri to w. +// +// WriteTo implements io.WriterTo interface. +func (u *URI) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(u.FullURI()) + return int64(n), err +} + +// String returns full uri. +func (u *URI) String() string { + return string(u.FullURI()) +} + +func splitHostURI(host, uri []byte) ([]byte, []byte, []byte) { + n := bytes.Index(uri, strSlashSlash) + if n < 0 { + return strHTTP, host, uri + } + scheme := uri[:n] + if bytes.IndexByte(scheme, '/') >= 0 { + return strHTTP, host, uri + } + if len(scheme) > 0 && scheme[len(scheme)-1] == ':' { + scheme = scheme[:len(scheme)-1] + } + n += len(strSlashSlash) + uri = uri[n:] + n = bytes.IndexByte(uri, '/') + if n < 0 { + // A hack for bogus urls like foobar.com?a=b without + // slash after host. + if n = bytes.IndexByte(uri, '?'); n >= 0 { + return scheme, uri[:n], uri[n:] + } + return scheme, uri, strSlash + } + return scheme, uri[:n], uri[n:] +} + +// QueryArgs returns query args. +func (u *URI) QueryArgs() *Args { + u.parseQueryArgs() + return &u.queryArgs +} + +func (u *URI) parseQueryArgs() { + if u.parsedQueryArgs { + return + } + u.queryArgs.ParseBytes(u.queryString) + u.parsedQueryArgs = true +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/uri_test.go b/vendor/github.com/erikdubbelboer/fasthttp/uri_test.go new file mode 100644 index 0000000..beb4f7d --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/uri_test.go @@ -0,0 +1,325 @@ +package fasthttp + +import ( + "bytes" + "fmt" + "testing" + "time" +) + +func TestURICopyToQueryArgs(t *testing.T) { + var u URI + a := u.QueryArgs() + a.Set("foo", "bar") + + var u1 URI + u.CopyTo(&u1) + a1 := u1.QueryArgs() + + if string(a1.Peek("foo")) != "bar" { + t.Fatalf("unexpected query args value %q. Expecting %q", a1.Peek("foo"), "bar") + } +} + +func TestURIAcquireReleaseSequential(t *testing.T) { + testURIAcquireRelease(t) +} + +func TestURIAcquireReleaseConcurrent(t *testing.T) { + ch := make(chan struct{}, 10) + for i := 0; i < 10; i++ { + go func() { + testURIAcquireRelease(t) + ch <- struct{}{} + }() + } + + for i := 0; i < 10; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } +} + +func testURIAcquireRelease(t *testing.T) { + for i := 0; i < 10; i++ { + u := AcquireURI() + host := fmt.Sprintf("host.%d.com", i*23) + path := fmt.Sprintf("/foo/%d/bar", i*17) + queryArgs := "?foo=bar&baz=aass" + u.Parse([]byte(host), []byte(path+queryArgs)) + if string(u.Host()) != host { + t.Fatalf("unexpected host %q. Expecting %q", u.Host(), host) + } + if string(u.Path()) != path { + t.Fatalf("unexpected path %q. Expecting %q", u.Path(), path) + } + ReleaseURI(u) + } +} + +func TestURILastPathSegment(t *testing.T) { + testURILastPathSegment(t, "", "") + testURILastPathSegment(t, "/", "") + testURILastPathSegment(t, "/foo/bar/", "") + testURILastPathSegment(t, "/foobar.js", "foobar.js") + testURILastPathSegment(t, "/foo/bar/baz.html", "baz.html") +} + +func testURILastPathSegment(t *testing.T, path, expectedSegment string) { + var u URI + u.SetPath(path) + segment := u.LastPathSegment() + if string(segment) != expectedSegment { + t.Fatalf("unexpected last path segment for path %q: %q. Expecting %q", path, segment, expectedSegment) + } +} + +func TestURIPathEscape(t *testing.T) { + testURIPathEscape(t, "/foo/bar", "/foo/bar") + testURIPathEscape(t, "/f_o-o=b:ar,b.c&q", "/f_o-o=b:ar,b.c&q") + testURIPathEscape(t, "/aa?bb.тест~qq", "/aa%3Fbb.%D1%82%D0%B5%D1%81%D1%82~qq") +} + +func testURIPathEscape(t *testing.T, path, expectedRequestURI string) { + var u URI + u.SetPath(path) + requestURI := u.RequestURI() + if string(requestURI) != expectedRequestURI { + t.Fatalf("unexpected requestURI %q. Expecting %q. path %q", requestURI, expectedRequestURI, path) + } +} + +func TestURIUpdate(t *testing.T) { + // full uri + testURIUpdate(t, "http://foo.bar/baz?aaa=22#aaa", "https://aa.com/bb", "https://aa.com/bb") + + // empty uri + testURIUpdate(t, "http://aaa.com/aaa.html?234=234#add", "", "http://aaa.com/aaa.html?234=234#add") + + // request uri + testURIUpdate(t, "ftp://aaa/xxx/yyy?aaa=bb#aa", "/boo/bar?xx", "ftp://aaa/boo/bar?xx") + + // relative uri + testURIUpdate(t, "http://foo.bar/baz/xxx.html?aaa=22#aaa", "bb.html?xx=12#pp", "http://foo.bar/baz/bb.html?xx=12#pp") + testURIUpdate(t, "http://xx/a/b/c/d", "../qwe/p?zx=34", "http://xx/a/b/qwe/p?zx=34") + testURIUpdate(t, "https://qqq/aaa.html?foo=bar", "?baz=434&aaa#xcv", "https://qqq/aaa.html?baz=434&aaa#xcv") + testURIUpdate(t, "http://foo.bar/baz", "~a/%20b=c,тест?йцу=ке", "http://foo.bar/~a/%20b=c,%D1%82%D0%B5%D1%81%D1%82?йцу=ке") + testURIUpdate(t, "http://foo.bar/baz", "/qwe#fragment", "http://foo.bar/qwe#fragment") + testURIUpdate(t, "http://foobar/baz/xxx", "aaa.html#bb?cc=dd&ee=dfd", "http://foobar/baz/aaa.html#bb?cc=dd&ee=dfd") + + // hash + testURIUpdate(t, "http://foo.bar/baz#aaa", "#fragment", "http://foo.bar/baz#fragment") + + // uri without scheme + testURIUpdate(t, "https://foo.bar/baz", "//aaa.bbb/cc?dd", "https://aaa.bbb/cc?dd") + testURIUpdate(t, "http://foo.bar/baz", "//aaa.bbb/cc?dd", "http://aaa.bbb/cc?dd") +} + +func testURIUpdate(t *testing.T, base, update, result string) { + var u URI + u.Parse(nil, []byte(base)) + u.Update(update) + s := u.String() + if s != result { + t.Fatalf("unexpected result %q. Expecting %q. base=%q, update=%q", s, result, base, update) + } +} + +func TestURIPathNormalize(t *testing.T) { + var u URI + + // double slash + testURIPathNormalize(t, &u, "/aa//bb", "/aa/bb") + + // triple slash + testURIPathNormalize(t, &u, "/x///y/", "/x/y/") + + // multi slashes + testURIPathNormalize(t, &u, "/abc//de///fg////", "/abc/de/fg/") + + // encoded slashes + testURIPathNormalize(t, &u, "/xxxx%2fyyy%2f%2F%2F", "/xxxx/yyy/") + + // dotdot + testURIPathNormalize(t, &u, "/aaa/..", "/") + + // dotdot with trailing slash + testURIPathNormalize(t, &u, "/xxx/yyy/../", "/xxx/") + + // multi dotdots + testURIPathNormalize(t, &u, "/aaa/bbb/ccc/../../ddd", "/aaa/ddd") + + // dotdots separated by other data + testURIPathNormalize(t, &u, "/a/b/../c/d/../e/..", "/a/c/") + + // too many dotdots + testURIPathNormalize(t, &u, "/aaa/../../../../xxx", "/xxx") + testURIPathNormalize(t, &u, "/../../../../../..", "/") + testURIPathNormalize(t, &u, "/../../../../../../", "/") + + // encoded dotdots + testURIPathNormalize(t, &u, "/aaa%2Fbbb%2F%2E.%2Fxxx", "/aaa/xxx") + + // double slash with dotdots + testURIPathNormalize(t, &u, "/aaa////..//b", "/b") + + // fake dotdot + testURIPathNormalize(t, &u, "/aaa/..bbb/ccc/..", "/aaa/..bbb/") + + // single dot + testURIPathNormalize(t, &u, "/a/./b/././c/./d.html", "/a/b/c/d.html") + testURIPathNormalize(t, &u, "./foo/", "/foo/") + testURIPathNormalize(t, &u, "./../.././../../aaa/bbb/../../../././../", "/") + testURIPathNormalize(t, &u, "./a/./.././../b/./foo.html", "/b/foo.html") +} + +func testURIPathNormalize(t *testing.T, u *URI, requestURI, expectedPath string) { + u.Parse(nil, []byte(requestURI)) + if string(u.Path()) != expectedPath { + t.Fatalf("Unexpected path %q. Expected %q. requestURI=%q", u.Path(), expectedPath, requestURI) + } +} + +func TestURIFullURI(t *testing.T) { + var args Args + + // empty scheme, path and hash + testURIFullURI(t, "", "foobar.com", "", "", &args, "http://foobar.com/") + + // empty scheme and hash + testURIFullURI(t, "", "aa.com", "/foo/bar", "", &args, "http://aa.com/foo/bar") + + // empty hash + testURIFullURI(t, "fTP", "XXx.com", "/foo", "", &args, "ftp://xxx.com/foo") + + // empty args + testURIFullURI(t, "https", "xx.com", "/", "aaa", &args, "https://xx.com/#aaa") + + // non-empty args and non-ASCII path + args.Set("foo", "bar") + args.Set("xxx", "йух") + testURIFullURI(t, "", "xxx.com", "/тест123", "2er", &args, "http://xxx.com/%D1%82%D0%B5%D1%81%D1%82123?foo=bar&xxx=%D0%B9%D1%83%D1%85#2er") + + // test with empty args and non-empty query string + var u URI + u.Parse([]byte("google.com"), []byte("/foo?bar=baz&baraz#qqqq")) + uri := u.FullURI() + expectedURI := "http://google.com/foo?bar=baz&baraz#qqqq" + if string(uri) != expectedURI { + t.Fatalf("Unexpected URI: %q. Expected %q", uri, expectedURI) + } +} + +func testURIFullURI(t *testing.T, scheme, host, path, hash string, args *Args, expectedURI string) { + var u URI + + u.SetScheme(scheme) + u.SetHost(host) + u.SetPath(path) + u.SetHash(hash) + args.CopyTo(u.QueryArgs()) + + uri := u.FullURI() + if string(uri) != expectedURI { + t.Fatalf("Unexpected URI: %q. Expected %q", uri, expectedURI) + } +} + +func TestURIParseNilHost(t *testing.T) { + testURIParseScheme(t, "http://google.com/foo?bar#baz", "http", "google.com", "/foo?bar#baz") + testURIParseScheme(t, "HTtP://google.com/", "http", "google.com", "/") + testURIParseScheme(t, "://google.com/xyz", "http", "google.com", "/xyz") + testURIParseScheme(t, "//google.com/foobar", "http", "google.com", "/foobar") + testURIParseScheme(t, "fTP://aaa.com", "ftp", "aaa.com", "/") + testURIParseScheme(t, "httPS://aaa.com", "https", "aaa.com", "/") + + // missing slash after hostname + testURIParseScheme(t, "http://foobar.com?baz=111", "http", "foobar.com", "/?baz=111") +} + +func testURIParseScheme(t *testing.T, uri, expectedScheme, expectedHost, expectedRequestURI string) { + var u URI + u.Parse(nil, []byte(uri)) + if string(u.Scheme()) != expectedScheme { + t.Fatalf("Unexpected scheme %q. Expecting %q for uri %q", u.Scheme(), expectedScheme, uri) + } + if string(u.Host()) != expectedHost { + t.Fatalf("Unexepcted host %q. Expecting %q for uri %q", u.Host(), expectedHost, uri) + } + if string(u.RequestURI()) != expectedRequestURI { + t.Fatalf("Unexepcted requestURI %q. Expecting %q for uri %q", u.RequestURI(), expectedRequestURI, uri) + } +} + +func TestURIParse(t *testing.T) { + var u URI + + // no args + testURIParse(t, &u, "aaa", "sdfdsf", + "http://aaa/sdfdsf", "aaa", "/sdfdsf", "sdfdsf", "", "") + + // args + testURIParse(t, &u, "xx", "/aa?ss", + "http://xx/aa?ss", "xx", "/aa", "/aa", "ss", "") + + // args and hash + testURIParse(t, &u, "foobar.com", "/a.b.c?def=gkl#mnop", + "http://foobar.com/a.b.c?def=gkl#mnop", "foobar.com", "/a.b.c", "/a.b.c", "def=gkl", "mnop") + + // '?' and '#' in hash + testURIParse(t, &u, "aaa.com", "/foo#bar?baz=aaa#bbb", + "http://aaa.com/foo#bar?baz=aaa#bbb", "aaa.com", "/foo", "/foo", "", "bar?baz=aaa#bbb") + + // encoded path + testURIParse(t, &u, "aa.com", "/Test%20+%20%D0%BF%D1%80%D0%B8?asdf=%20%20&s=12#sdf", + "http://aa.com/Test%20%2B%20%D0%BF%D1%80%D0%B8?asdf=%20%20&s=12#sdf", "aa.com", "/Test + при", "/Test%20+%20%D0%BF%D1%80%D0%B8", "asdf=%20%20&s=12", "sdf") + + // host in uppercase + testURIParse(t, &u, "FOObar.COM", "/bC?De=F#Gh", + "http://foobar.com/bC?De=F#Gh", "foobar.com", "/bC", "/bC", "De=F", "Gh") + + // uri with hostname + testURIParse(t, &u, "xxx.com", "http://aaa.com/foo/bar?baz=aaa#ddd", + "http://aaa.com/foo/bar?baz=aaa#ddd", "aaa.com", "/foo/bar", "/foo/bar", "baz=aaa", "ddd") + testURIParse(t, &u, "xxx.com", "https://ab.com/f/b%20r?baz=aaa#ddd", + "https://ab.com/f/b%20r?baz=aaa#ddd", "ab.com", "/f/b r", "/f/b%20r", "baz=aaa", "ddd") + + // no slash after hostname in uri + testURIParse(t, &u, "aaa.com", "http://google.com", + "http://google.com/", "google.com", "/", "/", "", "") + + // uppercase hostname in uri + testURIParse(t, &u, "abc.com", "http://GoGLE.com/aaa", + "http://gogle.com/aaa", "gogle.com", "/aaa", "/aaa", "", "") + + // http:// in query params + testURIParse(t, &u, "aaa.com", "/foo?bar=http://google.com", + "http://aaa.com/foo?bar=http://google.com", "aaa.com", "/foo", "/foo", "bar=http://google.com", "") +} + +func testURIParse(t *testing.T, u *URI, host, uri, + expectedURI, expectedHost, expectedPath, expectedPathOriginal, expectedArgs, expectedHash string) { + u.Parse([]byte(host), []byte(uri)) + + if !bytes.Equal(u.FullURI(), []byte(expectedURI)) { + t.Fatalf("Unexpected uri %q. Expected %q. host=%q, uri=%q", u.FullURI(), expectedURI, host, uri) + } + if !bytes.Equal(u.Host(), []byte(expectedHost)) { + t.Fatalf("Unexpected host %q. Expected %q. host=%q, uri=%q", u.Host(), expectedHost, host, uri) + } + if !bytes.Equal(u.PathOriginal(), []byte(expectedPathOriginal)) { + t.Fatalf("Unexpected original path %q. Expected %q. host=%q, uri=%q", u.PathOriginal(), expectedPathOriginal, host, uri) + } + if !bytes.Equal(u.Path(), []byte(expectedPath)) { + t.Fatalf("Unexpected path %q. Expected %q. host=%q, uri=%q", u.Path(), expectedPath, host, uri) + } + if !bytes.Equal(u.QueryString(), []byte(expectedArgs)) { + t.Fatalf("Unexpected args %q. Expected %q. host=%q, uri=%q", u.QueryString(), expectedArgs, host, uri) + } + if !bytes.Equal(u.Hash(), []byte(expectedHash)) { + t.Fatalf("Unexpected hash %q. Expected %q. host=%q, uri=%q", u.Hash(), expectedHash, host, uri) + } +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/uri_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/uri_timing_test.go new file mode 100644 index 0000000..44a1d29 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/uri_timing_test.go @@ -0,0 +1,49 @@ +package fasthttp + +import ( + "testing" +) + +func BenchmarkURIParsePath(b *testing.B) { + benchmarkURIParse(b, "google.com", "/foo/bar") +} + +func BenchmarkURIParsePathQueryString(b *testing.B) { + benchmarkURIParse(b, "google.com", "/foo/bar?query=string&other=value") +} + +func BenchmarkURIParsePathQueryStringHash(b *testing.B) { + benchmarkURIParse(b, "google.com", "/foo/bar?query=string&other=value#hashstring") +} + +func BenchmarkURIParseHostname(b *testing.B) { + benchmarkURIParse(b, "google.com", "http://foobar.com/foo/bar?query=string&other=value#hashstring") +} + +func BenchmarkURIFullURI(b *testing.B) { + host := []byte("foobar.com") + requestURI := []byte("/foobar/baz?aaa=bbb&ccc=ddd") + uriLen := len(host) + len(requestURI) + 7 + + b.RunParallel(func(pb *testing.PB) { + var u URI + u.Parse(host, requestURI) + for pb.Next() { + uri := u.FullURI() + if len(uri) != uriLen { + b.Fatalf("unexpected uri len %d. Expecting %d", len(uri), uriLen) + } + } + }) +} + +func benchmarkURIParse(b *testing.B, host, uri string) { + strHost, strURI := []byte(host), []byte(uri) + + b.RunParallel(func(pb *testing.PB) { + var u URI + for pb.Next() { + u.Parse(strHost, strURI) + } + }) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/uri_unix.go b/vendor/github.com/erikdubbelboer/fasthttp/uri_unix.go new file mode 100644 index 0000000..1e30733 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/uri_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package fasthttp + +func addLeadingSlash(dst, src []byte) []byte { + // add leading slash for unix paths + if len(src) == 0 || src[0] != '/' { + dst = append(dst, '/') + } + + return dst +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/uri_windows.go b/vendor/github.com/erikdubbelboer/fasthttp/uri_windows.go new file mode 100644 index 0000000..95917a6 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/uri_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package fasthttp + +func addLeadingSlash(dst, src []byte) []byte { + // zero length and "C:/" case + if len(src) == 0 || (len(src) > 2 && src[1] != ':') { + dst = append(dst, '/') + } + + return dst +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/uri_windows_test.go b/vendor/github.com/erikdubbelboer/fasthttp/uri_windows_test.go new file mode 100644 index 0000000..61d1a2c --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/uri_windows_test.go @@ -0,0 +1,12 @@ +// +build windows + +package fasthttp + +import "testing" + +func TestURIPathNormalizeIssue86(t *testing.T) { + // see https://github.com/valyala/fasthttp/issues/86 + var u URI + + testURIPathNormalize(t, &u, `C:\a\b\c\fs.go`, `C:\a\b\c\fs.go`) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/userdata.go b/vendor/github.com/erikdubbelboer/fasthttp/userdata.go new file mode 100644 index 0000000..bd3e28a --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/userdata.go @@ -0,0 +1,71 @@ +package fasthttp + +import ( + "io" +) + +type userDataKV struct { + key []byte + value interface{} +} + +type userData []userDataKV + +func (d *userData) Set(key string, value interface{}) { + args := *d + n := len(args) + for i := 0; i < n; i++ { + kv := &args[i] + if string(kv.key) == key { + kv.value = value + return + } + } + + c := cap(args) + if c > n { + args = args[:n+1] + kv := &args[n] + kv.key = append(kv.key[:0], key...) + kv.value = value + *d = args + return + } + + kv := userDataKV{} + kv.key = append(kv.key[:0], key...) + kv.value = value + *d = append(args, kv) +} + +func (d *userData) SetBytes(key []byte, value interface{}) { + d.Set(b2s(key), value) +} + +func (d *userData) Get(key string) interface{} { + args := *d + n := len(args) + for i := 0; i < n; i++ { + kv := &args[i] + if string(kv.key) == key { + return kv.value + } + } + return nil +} + +func (d *userData) GetBytes(key []byte) interface{} { + return d.Get(b2s(key)) +} + +func (d *userData) Reset() { + args := *d + n := len(args) + for i := 0; i < n; i++ { + v := args[i].value + if vc, ok := v.(io.Closer); ok { + vc.Close() + } + } + *d = (*d)[:0] +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/userdata_test.go b/vendor/github.com/erikdubbelboer/fasthttp/userdata_test.go new file mode 100644 index 0000000..ce14204 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/userdata_test.go @@ -0,0 +1,74 @@ +package fasthttp + +import ( + "fmt" + "reflect" + "testing" +) + +func TestUserData(t *testing.T) { + var u userData + + for i := 0; i < 10; i++ { + key := []byte(fmt.Sprintf("key_%d", i)) + u.SetBytes(key, i+5) + testUserDataGet(t, &u, key, i+5) + u.SetBytes(key, i) + testUserDataGet(t, &u, key, i) + } + + for i := 0; i < 10; i++ { + key := []byte(fmt.Sprintf("key_%d", i)) + testUserDataGet(t, &u, key, i) + } + + u.Reset() + + for i := 0; i < 10; i++ { + key := []byte(fmt.Sprintf("key_%d", i)) + testUserDataGet(t, &u, key, nil) + } +} + +func testUserDataGet(t *testing.T, u *userData, key []byte, value interface{}) { + v := u.GetBytes(key) + if v == nil && value != nil { + t.Fatalf("cannot obtain value for key=%q", key) + } + if !reflect.DeepEqual(v, value) { + t.Fatalf("unexpected value for key=%q: %d. Expecting %d", key, v, value) + } +} + +func TestUserDataValueClose(t *testing.T) { + var u userData + + closeCalls := 0 + + // store values implementing io.Closer + for i := 0; i < 5; i++ { + key := fmt.Sprintf("key_%d", i) + u.Set(key, &closerValue{&closeCalls}) + } + + // store values without io.Closer + for i := 0; i < 10; i++ { + key := fmt.Sprintf("key_noclose_%d", i) + u.Set(key, i) + } + + u.Reset() + + if closeCalls != 5 { + t.Fatalf("unexpected number of Close calls: %d. Expecting 10", closeCalls) + } +} + +type closerValue struct { + closeCalls *int +} + +func (cv *closerValue) Close() error { + (*cv.closeCalls)++ + return nil +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/userdata_timing_test.go b/vendor/github.com/erikdubbelboer/fasthttp/userdata_timing_test.go new file mode 100644 index 0000000..3822de3 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/userdata_timing_test.go @@ -0,0 +1,48 @@ +package fasthttp + +import ( + "testing" +) + +func BenchmarkUserDataCustom(b *testing.B) { + keys := []string{"foobar", "baz", "aaa", "bsdfs"} + b.RunParallel(func(pb *testing.PB) { + var u userData + var v interface{} = u + for pb.Next() { + for _, key := range keys { + u.Set(key, v) + } + for _, key := range keys { + vv := u.Get(key) + if _, ok := vv.(userData); !ok { + b.Fatalf("unexpected value %v for key %q", vv, key) + } + } + u.Reset() + } + }) +} + +func BenchmarkUserDataStdMap(b *testing.B) { + keys := []string{"foobar", "baz", "aaa", "bsdfs"} + b.RunParallel(func(pb *testing.PB) { + u := make(map[string]interface{}) + var v interface{} = u + for pb.Next() { + for _, key := range keys { + u[key] = v + } + for _, key := range keys { + vv := u[key] + if _, ok := vv.(map[string]interface{}); !ok { + b.Fatalf("unexpected value %v for key %q", vv, key) + } + } + + for k := range u { + delete(u, k) + } + } + }) +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/workerpool.go b/vendor/github.com/erikdubbelboer/fasthttp/workerpool.go new file mode 100644 index 0000000..752b0f2 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/workerpool.go @@ -0,0 +1,233 @@ +package fasthttp + +import ( + "net" + "runtime" + "strings" + "sync" + "time" +) + +// workerPool serves incoming connections via a pool of workers +// in FILO order, i.e. the most recently stopped worker will serve the next +// incoming connection. +// +// Such a scheme keeps CPU caches hot (in theory). +type workerPool struct { + // Function for serving server connections. + // It must leave c unclosed. + WorkerFunc func(c net.Conn) error + + MaxWorkersCount int + + LogAllErrors bool + + MaxIdleWorkerDuration time.Duration + + Logger Logger + + lock sync.Mutex + workersCount int + mustStop bool + + ready []*workerChan + + stopCh chan struct{} + + workerChanPool sync.Pool +} + +type workerChan struct { + lastUseTime time.Time + ch chan net.Conn +} + +func (wp *workerPool) Start() { + if wp.stopCh != nil { + panic("BUG: workerPool already started") + } + wp.stopCh = make(chan struct{}) + stopCh := wp.stopCh + go func() { + var scratch []*workerChan + for { + wp.clean(&scratch) + select { + case <-stopCh: + return + default: + time.Sleep(wp.getMaxIdleWorkerDuration()) + } + } + }() +} + +func (wp *workerPool) Stop() { + if wp.stopCh == nil { + panic("BUG: workerPool wasn't started") + } + close(wp.stopCh) + wp.stopCh = nil + + // Stop all the workers waiting for incoming connections. + // Do not wait for busy workers - they will stop after + // serving the connection and noticing wp.mustStop = true. + wp.lock.Lock() + ready := wp.ready + for i, ch := range ready { + ch.ch <- nil + ready[i] = nil + } + wp.ready = ready[:0] + wp.mustStop = true + wp.lock.Unlock() +} + +func (wp *workerPool) getMaxIdleWorkerDuration() time.Duration { + if wp.MaxIdleWorkerDuration <= 0 { + return 10 * time.Second + } + return wp.MaxIdleWorkerDuration +} + +func (wp *workerPool) clean(scratch *[]*workerChan) { + maxIdleWorkerDuration := wp.getMaxIdleWorkerDuration() + + // Clean least recently used workers if they didn't serve connections + // for more than maxIdleWorkerDuration. + currentTime := time.Now() + + wp.lock.Lock() + ready := wp.ready + n := len(ready) + i := 0 + for i < n && currentTime.Sub(ready[i].lastUseTime) > maxIdleWorkerDuration { + i++ + } + *scratch = append((*scratch)[:0], ready[:i]...) + if i > 0 { + m := copy(ready, ready[i:]) + for i = m; i < n; i++ { + ready[i] = nil + } + wp.ready = ready[:m] + } + wp.lock.Unlock() + + // Notify obsolete workers to stop. + // This notification must be outside the wp.lock, since ch.ch + // may be blocking and may consume a lot of time if many workers + // are located on non-local CPUs. + tmp := *scratch + for i, ch := range tmp { + ch.ch <- nil + tmp[i] = nil + } +} + +func (wp *workerPool) Serve(c net.Conn) bool { + ch := wp.getCh() + if ch == nil { + return false + } + ch.ch <- c + return true +} + +var workerChanCap = func() int { + // Use blocking workerChan if GOMAXPROCS=1. + // This immediately switches Serve to WorkerFunc, which results + // in higher performance (under go1.5 at least). + if runtime.GOMAXPROCS(0) == 1 { + return 0 + } + + // Use non-blocking workerChan if GOMAXPROCS>1, + // since otherwise the Serve caller (Acceptor) may lag accepting + // new connections if WorkerFunc is CPU-bound. + return 1 +}() + +func (wp *workerPool) getCh() *workerChan { + var ch *workerChan + createWorker := false + + wp.lock.Lock() + ready := wp.ready + n := len(ready) - 1 + if n < 0 { + if wp.workersCount < wp.MaxWorkersCount { + createWorker = true + wp.workersCount++ + } + } else { + ch = ready[n] + ready[n] = nil + wp.ready = ready[:n] + } + wp.lock.Unlock() + + if ch == nil { + if !createWorker { + return nil + } + vch := wp.workerChanPool.Get() + if vch == nil { + vch = &workerChan{ + ch: make(chan net.Conn, workerChanCap), + } + } + ch = vch.(*workerChan) + go func() { + wp.workerFunc(ch) + wp.workerChanPool.Put(vch) + }() + } + return ch +} + +func (wp *workerPool) release(ch *workerChan) bool { + ch.lastUseTime = time.Now() + wp.lock.Lock() + if wp.mustStop { + wp.lock.Unlock() + return false + } + wp.ready = append(wp.ready, ch) + wp.lock.Unlock() + return true +} + +func (wp *workerPool) workerFunc(ch *workerChan) { + var c net.Conn + + var err error + for c = range ch.ch { + if c == nil { + break + } + + if err = wp.WorkerFunc(c); err != nil && err != errHijacked { + errStr := err.Error() + if wp.LogAllErrors || + !(strings.Contains(errStr, "broken pipe") || + strings.Contains(errStr, "reset by peer") || + strings.Contains(errStr, "i/o timeout") || + strings.Contains(errStr, "request headers: small read buffer")) { + wp.Logger.Printf("error when serving connection %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err) + } + } + if err != errHijacked { + c.Close() + } + c = nil + + if !wp.release(ch) { + break + } + } + + wp.lock.Lock() + wp.workersCount-- + wp.lock.Unlock() +} diff --git a/vendor/github.com/erikdubbelboer/fasthttp/workerpool_test.go b/vendor/github.com/erikdubbelboer/fasthttp/workerpool_test.go new file mode 100644 index 0000000..39022c3 --- /dev/null +++ b/vendor/github.com/erikdubbelboer/fasthttp/workerpool_test.go @@ -0,0 +1,168 @@ +package fasthttp + +import ( + "io/ioutil" + "net" + "testing" + "time" + + "github.com/erikdubbelboer/fasthttp/fasthttputil" +) + +func TestWorkerPoolStartStopSerial(t *testing.T) { + testWorkerPoolStartStop(t) +} + +func TestWorkerPoolStartStopConcurrent(t *testing.T) { + concurrency := 10 + ch := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + testWorkerPoolStartStop(t) + ch <- struct{}{} + }() + } + for i := 0; i < concurrency; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } +} + +func testWorkerPoolStartStop(t *testing.T) { + wp := &workerPool{ + WorkerFunc: func(conn net.Conn) error { return nil }, + MaxWorkersCount: 10, + Logger: defaultLogger, + } + for i := 0; i < 10; i++ { + wp.Start() + wp.Stop() + } +} + +func TestWorkerPoolMaxWorkersCountSerial(t *testing.T) { + testWorkerPoolMaxWorkersCountMulti(t) +} + +func TestWorkerPoolMaxWorkersCountConcurrent(t *testing.T) { + concurrency := 4 + ch := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + testWorkerPoolMaxWorkersCountMulti(t) + ch <- struct{}{} + }() + } + for i := 0; i < concurrency; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } +} + +func testWorkerPoolMaxWorkersCountMulti(t *testing.T) { + for i := 0; i < 5; i++ { + testWorkerPoolMaxWorkersCount(t) + } +} + +func testWorkerPoolMaxWorkersCount(t *testing.T) { + ready := make(chan struct{}) + wp := &workerPool{ + WorkerFunc: func(conn net.Conn) error { + buf := make([]byte, 100) + n, err := conn.Read(buf) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + buf = buf[:n] + if string(buf) != "foobar" { + t.Fatalf("unexpected data read: %q. Expecting %q", buf, "foobar") + } + if _, err = conn.Write([]byte("baz")); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + <-ready + + return nil + }, + MaxWorkersCount: 10, + Logger: defaultLogger, + } + wp.Start() + + ln := fasthttputil.NewInmemoryListener() + + clientCh := make(chan struct{}, wp.MaxWorkersCount) + for i := 0; i < wp.MaxWorkersCount; i++ { + go func() { + conn, err := ln.Dial() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if _, err = conn.Write([]byte("foobar")); err != nil { + t.Fatalf("unexpected error: %s", err) + } + data, err := ioutil.ReadAll(conn) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if string(data) != "baz" { + t.Fatalf("unexpected value read: %q. Expecting %q", data, "baz") + } + if err = conn.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + clientCh <- struct{}{} + }() + } + + for i := 0; i < wp.MaxWorkersCount; i++ { + conn, err := ln.Accept() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !wp.Serve(conn) { + t.Fatalf("worker pool must have enough workers to serve the conn") + } + } + + go func() { + if _, err := ln.Dial(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + }() + conn, err := ln.Accept() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + for i := 0; i < 5; i++ { + if wp.Serve(conn) { + t.Fatalf("worker pool must be full") + } + } + if err = conn.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + close(ready) + + for i := 0; i < wp.MaxWorkersCount; i++ { + select { + case <-clientCh: + case <-time.After(time.Second): + t.Fatalf("timeout") + } + } + + if err := ln.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + wp.Stop() +} diff --git a/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..d9771f1 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,21 @@ +### Issue description +Tell us what should happen and what happens instead + +### Example code +```go +If possible, please enter some example code here to reproduce the issue. +``` + +### Error log +``` +If you have an error log, please paste it here. +``` + +### Configuration +*Driver version (or git SHA):* + +*Go version:* run `go version` in your console + +*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20 + +*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10 diff --git a/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..6f5c7eb --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,9 @@ +### Description +Please explain the changes you made here. + +### Checklist +- [ ] Code compiles correctly +- [ ] Created tests which fail without the change (if possible) +- [ ] All tests passing +- [ ] Extended the README / documentation, if necessary +- [ ] Added myself / the copyright holder to the AUTHORS file diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore new file mode 100644 index 0000000..ba8e0cb --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.gitignore @@ -0,0 +1,8 @@ +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +Icon? +ehthumbs.db +Thumbs.db diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml new file mode 100644 index 0000000..c1cc10a --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/.travis.yml @@ -0,0 +1,13 @@ +sudo: false +language: go +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip + +before_script: + - mysql -e 'create database gotest;' diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS new file mode 100644 index 0000000..692c186 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -0,0 +1,56 @@ +# This is the official list of Go-MySQL-Driver authors for copyright purposes. + +# If you are submitting a patch, please add your name or the name of the +# organization which holds the copyright to this list in alphabetical order. + +# Names should be added to this file as +# Name +# The email address is not required for organizations. +# Please keep the list sorted. + + +# Individual Persons + +Aaron Hopkins +Arne Hormann +Carlos Nieto +Chris Moos +Daniel Nichter +Daniël van Eeden +DisposaBoy +Frederick Mayle +Gustavo Kristic +Hanno Braun +Henri Yandell +Hirotaka Yamamoto +INADA Naoki +James Harr +Jian Zhen +Joshua Prunier +Julien Lefevre +Julien Schmidt +Kamil Dziedzic +Kevin Malachowski +Lennart Rudolph +Leonardo YongUk Kim +Luca Looz +Lucas Liu +Luke Scott +Michael Woolnough +Nicola Peduzzi +Olivier Mengué +Paul Bonser +Runrioter Wung +Soroush Pour +Stan Putrya +Stanley Gunawan +Xiangyu Hu +Xiaobing Jiang +Xiuming Chen +Zhenye Xie + +# Organizations + +Barracuda Networks, Inc. +Google Inc. +Stripe Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md new file mode 100644 index 0000000..6bcad7e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -0,0 +1,119 @@ +## Version 1.3 (2016-12-01) + +Changes: + + - Go 1.1 is no longer supported + - Use decimals fields in MySQL to format time types (#249) + - Buffer optimizations (#269) + - TLS ServerName defaults to the host (#283) + - Refactoring (#400, #410, #437) + - Adjusted documentation for second generation CloudSQL (#485) + - Documented DSN system var quoting rules (#502) + - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512) + +New Features: + + - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) + - Support for returning table alias on Columns() (#289, #359, #382) + - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490) + - Support for uint64 parameters with high bit set (#332, #345) + - Cleartext authentication plugin support (#327) + - Exported ParseDSN function and the Config struct (#403, #419, #429) + - Read / Write timeouts (#401) + - Support for JSON field type (#414) + - Support for multi-statements and multi-results (#411, #431) + - DSN parameter to set the driver-side max_allowed_packet value manually (#489) + - Native password authentication plugin support (#494, #524) + +Bugfixes: + + - Fixed handling of queries without columns and rows (#255) + - Fixed a panic when SetKeepAlive() failed (#298) + - Handle ERR packets while reading rows (#321) + - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349) + - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356) + - Actually zero out bytes in handshake response (#378) + - Fixed race condition in registering LOAD DATA INFILE handler (#383) + - Fixed tests with MySQL 5.7.9+ (#380) + - QueryUnescape TLS config names (#397) + - Fixed "broken pipe" error by writing to closed socket (#390) + - Fixed LOAD LOCAL DATA INFILE buffering (#424) + - Fixed parsing of floats into float64 when placeholders are used (#434) + - Fixed DSN tests with Go 1.7+ (#459) + - Handle ERR packets while waiting for EOF (#473) + - Invalidate connection on error while discarding additional results (#513) + - Allow terminating packets of length 0 (#516) + + +## Version 1.2 (2014-06-03) + +Changes: + + - We switched back to a "rolling release". `go get` installs the current master branch again + - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver + - Exported errors to allow easy checking from application code + - Enabled TCP Keepalives on TCP connections + - Optimized INFILE handling (better buffer size calculation, lazy init, ...) + - The DSN parser also checks for a missing separating slash + - Faster binary date / datetime to string formatting + - Also exported the MySQLWarning type + - mysqlConn.Close returns the first error encountered instead of ignoring all errors + - writePacket() automatically writes the packet size to the header + - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets + +New Features: + + - `RegisterDial` allows the usage of a custom dial function to establish the network connection + - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter + - Logging of critical errors is configurable with `SetLogger` + - Google CloudSQL support + +Bugfixes: + + - Allow more than 32 parameters in prepared statements + - Various old_password fixes + - Fixed TestConcurrent test to pass Go's race detection + - Fixed appendLengthEncodedInteger for large numbers + - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) + + +## Version 1.1 (2013-11-02) + +Changes: + + - Go-MySQL-Driver now requires Go 1.1 + - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore + - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors + - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` + - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. + - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries + - Optimized the buffer for reading + - stmt.Query now caches column metadata + - New Logo + - Changed the copyright header to include all contributors + - Improved the LOAD INFILE documentation + - The driver struct is now exported to make the driver directly accessible + - Refactored the driver tests + - Added more benchmarks and moved all to a separate file + - Other small refactoring + +New Features: + + - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure + - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs + - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used + +Bugfixes: + + - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification + - Convert to DB timezone when inserting `time.Time` + - Splitted packets (more than 16MB) are now merged correctly + - Fixed false positive `io.EOF` errors when the data was fully read + - Avoid panics on reuse of closed connections + - Fixed empty string producing false nil values + - Fixed sign byte for positive TIME fields + + +## Version 1.0 (2013-05-14) + +Initial Release diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md new file mode 100644 index 0000000..8fe16bc --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing Guidelines + +## Reporting Issues + +Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). + +## Contributing Code + +By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. +Don't forget to add yourself to the AUTHORS file. + +### Code Review + +Everyone is invited to review and comment on pull requests. +If it looks fine to you, comment with "LGTM" (Looks good to me). + +If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. + +Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". + +## Development Ideas + +If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE new file mode 100644 index 0000000..14e2f77 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md new file mode 100644 index 0000000..a16012f --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -0,0 +1,443 @@ +# Go-MySQL-Driver + +A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package + +![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") + +--------------------------------------- + * [Features](#features) + * [Requirements](#requirements) + * [Installation](#installation) + * [Usage](#usage) + * [DSN (Data Source Name)](#dsn-data-source-name) + * [Password](#password) + * [Protocol](#protocol) + * [Address](#address) + * [Parameters](#parameters) + * [Examples](#examples) + * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) + * [time.Time support](#timetime-support) + * [Unicode support](#unicode-support) + * [Testing / Development](#testing--development) + * [License](#license) + +--------------------------------------- + +## Features + * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") + * Native Go implementation. No C-bindings, just pure Go + * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc) + * Automatic handling of broken connections + * Automatic Connection Pooling *(by database/sql package)* + * Supports queries larger than 16MB + * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support. + * Intelligent `LONG DATA` handling in prepared statements + * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support + * Optional `time.Time` parsing + * Optional placeholder interpolation + +## Requirements + * Go 1.2 or higher + * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) + +--------------------------------------- + +## Installation +Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell: +```bash +$ go get github.com/go-sql-driver/mysql +``` +Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`. + +## Usage +_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then. + +Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: +```go +import "database/sql" +import _ "github.com/go-sql-driver/mysql" + +db, err := sql.Open("mysql", "user:password@/dbname") +``` + +[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). + + +### DSN (Data Source Name) + +The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): +``` +[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] +``` + +A DSN in its fullest form: +``` +username:password@protocol(address)/dbname?param=value +``` + +Except for the databasename, all values are optional. So the minimal DSN is: +``` +/dbname +``` + +If you do not want to preselect a database, leave `dbname` empty: +``` +/ +``` +This has the same effect as an empty DSN string: +``` + +``` + +Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct. + +#### Password +Passwords can consist of any character. Escaping is **not** necessary. + +#### Protocol +See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available. +In general you should use an Unix domain socket if available and TCP otherwise for best performance. + +#### Address +For TCP and UDP networks, addresses have the form `host:port`. +If `host` is a literal IPv6 address, it must be enclosed in square brackets. +The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. + +For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. + +#### Parameters +*Parameters are case-sensitive!* + +Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. + +##### `allowAllFiles` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. +[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) + +##### `allowCleartextPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. + +##### `allowNativePasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` +`allowNativePasswords=true` allows the usage of the mysql native password method. + +##### `allowOldPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` +`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). + +##### `charset` + +``` +Type: string +Valid Values: +Default: none +``` + +Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). + +Usage of the `charset` parameter is discouraged because it issues additional queries to the server. +Unless you need the fallback behavior, please use `collation` instead. + +##### `collation` + +``` +Type: string +Valid Values: +Default: utf8_general_ci +``` + +Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. + +A list of valid charsets for a server is retrievable with `SHOW COLLATION`. + +##### `clientFoundRows` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. + +##### `columnsWithAlias` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: + +``` +SELECT u.id FROM users as u +``` + +will return `u.id` instead of just `id` if `columnsWithAlias=true`. + +##### `interpolateParams` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. + +*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* + +##### `loc` + +``` +Type: string +Valid Values: +Default: UTC +``` + +Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details. + +Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. + +Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. + +##### `maxAllowedPacket` +``` +Type: decimal number +Default: 0 +``` + +Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server. + +##### `multiStatements` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded. + +When `multiStatements` is used, `?` parameters must only be used in the first statement. + +##### `parseTime` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` + + +##### `readTimeout` + +``` +Type: decimal number +Default: 0 +``` + +I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. + +##### `strict` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations. + +A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable. + +By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. + +##### `timeout` + +``` +Type: decimal number +Default: OS default +``` + +*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout). + +##### `tls` + +``` +Type: bool / string +Valid Values: true, false, skip-verify, +Default: false +``` + +`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). + +##### `writeTimeout` + +``` +Type: decimal number +Default: 0 +``` + +I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. + + +##### System Variables + +Any other parameters are interpreted as system variables: + * `=`: `SET =` + * `=`: `SET =` + * `=%27%27`: `SET =''` + +Rules: +* The values for string variables must be quoted with ' +* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed! + (which implies values of string variables must be wrapped with `%27`) + +Examples: + * `autocommit=1`: `SET autocommit=1` + * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'` + * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'` + + +#### Examples +``` +user@unix(/path/to/socket)/dbname +``` + +``` +root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local +``` + +``` +user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true +``` + +Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html): +``` +user:password@/dbname?sql_mode=TRADITIONAL +``` + +TCP via IPv6: +``` +user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci +``` + +TCP on a remote host, e.g. Amazon RDS: +``` +id:password@tcp(your-amazonaws-uri.com:3306)/dbname +``` + +Google Cloud SQL on App Engine (First Generation MySQL Server): +``` +user@cloudsql(project-id:instance-name)/dbname +``` + +Google Cloud SQL on App Engine (Second Generation MySQL Server): +``` +user@cloudsql(project-id:regionname:instance-name)/dbname +``` + +TCP using default port (3306) on localhost: +``` +user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped +``` + +Use the default protocol (tcp) and host (localhost:3306): +``` +user:password@/dbname +``` + +No Database preselected: +``` +user:password@/ +``` + +### `LOAD DATA LOCAL INFILE` support +For this feature you need direct access to the package. Therefore you must change the import path (no `_`): +```go +import "github.com/go-sql-driver/mysql" +``` + +Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). + +To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore. + +See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. + + +### `time.Time` support +The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm. + +However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter. + +**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). + +Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. + + +### Unicode support +Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. + +Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. + +Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. + +See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. + + +## Testing / Development +To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. + +Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. +If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). + +See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details. + +--------------------------------------- + +## License +Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +Mozilla summarizes the license scope as follows: +> MPL: The copyleft applies to any files containing MPLed code. + + +That means: + * You can **use** the **unchanged** source code both in private and commercially + * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0) + * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged** + +Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license. + +You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") + diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go new file mode 100644 index 0000000..565614e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/appengine.go @@ -0,0 +1,19 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build appengine + +package mysql + +import ( + "appengine/cloudsql" +) + +func init() { + RegisterDial("cloudsql", cloudsql.Dial) +} diff --git a/vendor/github.com/go-sql-driver/mysql/benchmark_test.go b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go new file mode 100644 index 0000000..7da833a --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/benchmark_test.go @@ -0,0 +1,246 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "math" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +type TB testing.B + +func (tb *TB) check(err error) { + if err != nil { + tb.Fatal(err) + } +} + +func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB { + tb.check(err) + return db +} + +func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows { + tb.check(err) + return rows +} + +func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt { + tb.check(err) + return stmt +} + +func initDB(b *testing.B, queries ...string) *sql.DB { + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + for _, query := range queries { + if _, err := db.Exec(query); err != nil { + if w, ok := err.(MySQLWarnings); ok { + b.Logf("warning on %q: %v", query, w) + } else { + b.Fatalf("error on %q: %v", query, err) + } + } + } + return db +} + +const concurrencyLevel = 10 + +func BenchmarkQuery(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := initDB(b, + "DROP TABLE IF EXISTS foo", + "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))", + `INSERT INTO foo VALUES (1, "one")`, + `INSERT INTO foo VALUES (2, "two")`, + ) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + var got string + tb.check(stmt.QueryRow(1).Scan(&got)) + if got != "one" { + b.Errorf("query = %q; want one", got) + wg.Done() + return + } + } + }() + } +} + +func BenchmarkExec(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := tb.checkDB(sql.Open("mysql", dsn)) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("DO 1")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + if _, err := stmt.Exec(); err != nil { + b.Fatal(err.Error()) + } + } + }() + } +} + +// data, but no db writes +var roundtripSample []byte + +func initRoundtripBenchmarks() ([]byte, int, int) { + if roundtripSample == nil { + roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024)) + } + return roundtripSample, 16, len(roundtripSample) +} + +func BenchmarkRoundtripTxt(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + sampleString := string(sample) + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + b.StartTimer() + var result string + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sampleString[0:length] + rows := tb.checkRows(db.Query(`SELECT "` + test + `"`)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if result != test { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkRoundtripBin(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + stmt := tb.checkStmt(db.Prepare("SELECT ?")) + defer stmt.Close() + b.StartTimer() + var result sql.RawBytes + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sample[0:length] + rows := tb.checkRows(stmt.Query(test)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if !bytes.Equal(result, test) { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkInterpolation(b *testing.B) { + mc := &mysqlConn{ + cfg: &Config{ + InterpolateParams: true, + Loc: time.UTC, + }, + maxAllowedPacket: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + buf: newBuffer(nil), + } + + args := []driver.Value{ + int64(42424242), + float64(math.Pi), + false, + time.Unix(1423411542, 807015000), + []byte("bytes containing special chars ' \" \a \x00"), + "string containing special chars ' \" \a \x00", + } + q := "SELECT ?, ?, ?, ?, ?, ?" + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := mc.interpolateParams(q, args) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go new file mode 100644 index 0000000..2001fea --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/buffer.go @@ -0,0 +1,147 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "io" + "net" + "time" +) + +const defaultBufSize = 4096 + +// A buffer which is used for both reading and writing. +// This is possible since communication on each connection is synchronous. +// In other words, we can't write and read simultaneously on the same connection. +// The buffer is similar to bufio.Reader / Writer but zero-copy-ish +// Also highly optimized for this particular use case. +type buffer struct { + buf []byte + nc net.Conn + idx int + length int + timeout time.Duration +} + +func newBuffer(nc net.Conn) buffer { + var b [defaultBufSize]byte + return buffer{ + buf: b[:], + nc: nc, + } +} + +// fill reads into the buffer until at least _need_ bytes are in it +func (b *buffer) fill(need int) error { + n := b.length + + // move existing data to the beginning + if n > 0 && b.idx > 0 { + copy(b.buf[0:n], b.buf[b.idx:]) + } + + // grow buffer if necessary + // TODO: let the buffer shrink again at some point + // Maybe keep the org buf slice and swap back? + if need > len(b.buf) { + // Round up to the next multiple of the default size + newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) + copy(newBuf, b.buf) + b.buf = newBuf + } + + b.idx = 0 + + for { + if b.timeout > 0 { + if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil { + return err + } + } + + nn, err := b.nc.Read(b.buf[n:]) + n += nn + + switch err { + case nil: + if n < need { + continue + } + b.length = n + return nil + + case io.EOF: + if n >= need { + b.length = n + return nil + } + return io.ErrUnexpectedEOF + + default: + return err + } + } +} + +// returns next N bytes from buffer. +// The returned slice is only guaranteed to be valid until the next read +func (b *buffer) readNext(need int) ([]byte, error) { + if b.length < need { + // refill + if err := b.fill(need); err != nil { + return nil, err + } + } + + offset := b.idx + b.idx += need + b.length -= need + return b.buf[offset:b.idx], nil +} + +// returns a buffer with the requested size. +// If possible, a slice from the existing buffer is returned. +// Otherwise a bigger buffer is made. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeBuffer(length int) []byte { + if b.length > 0 { + return nil + } + + // test (cheap) general case first + if length <= defaultBufSize || length <= cap(b.buf) { + return b.buf[:length] + } + + if length < maxPacketSize { + b.buf = make([]byte, length) + return b.buf + } + return make([]byte, length) +} + +// shortcut which can be used if the requested buffer is guaranteed to be +// smaller than defaultBufSize +// Only one buffer (total) can be used at a time. +func (b *buffer) takeSmallBuffer(length int) []byte { + if b.length == 0 { + return b.buf[:length] + } + return nil +} + +// takeCompleteBuffer returns the complete existing buffer. +// This can be used if the necessary buffer size is unknown. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeCompleteBuffer() []byte { + if b.length == 0 { + return b.buf + } + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go new file mode 100644 index 0000000..82079cf --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/collations.go @@ -0,0 +1,250 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const defaultCollation = "utf8_general_ci" + +// A list of available collations mapped to the internal ID. +// To update this map use the following MySQL query: +// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS +var collations = map[string]byte{ + "big5_chinese_ci": 1, + "latin2_czech_cs": 2, + "dec8_swedish_ci": 3, + "cp850_general_ci": 4, + "latin1_german1_ci": 5, + "hp8_english_ci": 6, + "koi8r_general_ci": 7, + "latin1_swedish_ci": 8, + "latin2_general_ci": 9, + "swe7_swedish_ci": 10, + "ascii_general_ci": 11, + "ujis_japanese_ci": 12, + "sjis_japanese_ci": 13, + "cp1251_bulgarian_ci": 14, + "latin1_danish_ci": 15, + "hebrew_general_ci": 16, + "tis620_thai_ci": 18, + "euckr_korean_ci": 19, + "latin7_estonian_cs": 20, + "latin2_hungarian_ci": 21, + "koi8u_general_ci": 22, + "cp1251_ukrainian_ci": 23, + "gb2312_chinese_ci": 24, + "greek_general_ci": 25, + "cp1250_general_ci": 26, + "latin2_croatian_ci": 27, + "gbk_chinese_ci": 28, + "cp1257_lithuanian_ci": 29, + "latin5_turkish_ci": 30, + "latin1_german2_ci": 31, + "armscii8_general_ci": 32, + "utf8_general_ci": 33, + "cp1250_czech_cs": 34, + "ucs2_general_ci": 35, + "cp866_general_ci": 36, + "keybcs2_general_ci": 37, + "macce_general_ci": 38, + "macroman_general_ci": 39, + "cp852_general_ci": 40, + "latin7_general_ci": 41, + "latin7_general_cs": 42, + "macce_bin": 43, + "cp1250_croatian_ci": 44, + "utf8mb4_general_ci": 45, + "utf8mb4_bin": 46, + "latin1_bin": 47, + "latin1_general_ci": 48, + "latin1_general_cs": 49, + "cp1251_bin": 50, + "cp1251_general_ci": 51, + "cp1251_general_cs": 52, + "macroman_bin": 53, + "utf16_general_ci": 54, + "utf16_bin": 55, + "utf16le_general_ci": 56, + "cp1256_general_ci": 57, + "cp1257_bin": 58, + "cp1257_general_ci": 59, + "utf32_general_ci": 60, + "utf32_bin": 61, + "utf16le_bin": 62, + "binary": 63, + "armscii8_bin": 64, + "ascii_bin": 65, + "cp1250_bin": 66, + "cp1256_bin": 67, + "cp866_bin": 68, + "dec8_bin": 69, + "greek_bin": 70, + "hebrew_bin": 71, + "hp8_bin": 72, + "keybcs2_bin": 73, + "koi8r_bin": 74, + "koi8u_bin": 75, + "latin2_bin": 77, + "latin5_bin": 78, + "latin7_bin": 79, + "cp850_bin": 80, + "cp852_bin": 81, + "swe7_bin": 82, + "utf8_bin": 83, + "big5_bin": 84, + "euckr_bin": 85, + "gb2312_bin": 86, + "gbk_bin": 87, + "sjis_bin": 88, + "tis620_bin": 89, + "ucs2_bin": 90, + "ujis_bin": 91, + "geostd8_general_ci": 92, + "geostd8_bin": 93, + "latin1_spanish_ci": 94, + "cp932_japanese_ci": 95, + "cp932_bin": 96, + "eucjpms_japanese_ci": 97, + "eucjpms_bin": 98, + "cp1250_polish_ci": 99, + "utf16_unicode_ci": 101, + "utf16_icelandic_ci": 102, + "utf16_latvian_ci": 103, + "utf16_romanian_ci": 104, + "utf16_slovenian_ci": 105, + "utf16_polish_ci": 106, + "utf16_estonian_ci": 107, + "utf16_spanish_ci": 108, + "utf16_swedish_ci": 109, + "utf16_turkish_ci": 110, + "utf16_czech_ci": 111, + "utf16_danish_ci": 112, + "utf16_lithuanian_ci": 113, + "utf16_slovak_ci": 114, + "utf16_spanish2_ci": 115, + "utf16_roman_ci": 116, + "utf16_persian_ci": 117, + "utf16_esperanto_ci": 118, + "utf16_hungarian_ci": 119, + "utf16_sinhala_ci": 120, + "utf16_german2_ci": 121, + "utf16_croatian_ci": 122, + "utf16_unicode_520_ci": 123, + "utf16_vietnamese_ci": 124, + "ucs2_unicode_ci": 128, + "ucs2_icelandic_ci": 129, + "ucs2_latvian_ci": 130, + "ucs2_romanian_ci": 131, + "ucs2_slovenian_ci": 132, + "ucs2_polish_ci": 133, + "ucs2_estonian_ci": 134, + "ucs2_spanish_ci": 135, + "ucs2_swedish_ci": 136, + "ucs2_turkish_ci": 137, + "ucs2_czech_ci": 138, + "ucs2_danish_ci": 139, + "ucs2_lithuanian_ci": 140, + "ucs2_slovak_ci": 141, + "ucs2_spanish2_ci": 142, + "ucs2_roman_ci": 143, + "ucs2_persian_ci": 144, + "ucs2_esperanto_ci": 145, + "ucs2_hungarian_ci": 146, + "ucs2_sinhala_ci": 147, + "ucs2_german2_ci": 148, + "ucs2_croatian_ci": 149, + "ucs2_unicode_520_ci": 150, + "ucs2_vietnamese_ci": 151, + "ucs2_general_mysql500_ci": 159, + "utf32_unicode_ci": 160, + "utf32_icelandic_ci": 161, + "utf32_latvian_ci": 162, + "utf32_romanian_ci": 163, + "utf32_slovenian_ci": 164, + "utf32_polish_ci": 165, + "utf32_estonian_ci": 166, + "utf32_spanish_ci": 167, + "utf32_swedish_ci": 168, + "utf32_turkish_ci": 169, + "utf32_czech_ci": 170, + "utf32_danish_ci": 171, + "utf32_lithuanian_ci": 172, + "utf32_slovak_ci": 173, + "utf32_spanish2_ci": 174, + "utf32_roman_ci": 175, + "utf32_persian_ci": 176, + "utf32_esperanto_ci": 177, + "utf32_hungarian_ci": 178, + "utf32_sinhala_ci": 179, + "utf32_german2_ci": 180, + "utf32_croatian_ci": 181, + "utf32_unicode_520_ci": 182, + "utf32_vietnamese_ci": 183, + "utf8_unicode_ci": 192, + "utf8_icelandic_ci": 193, + "utf8_latvian_ci": 194, + "utf8_romanian_ci": 195, + "utf8_slovenian_ci": 196, + "utf8_polish_ci": 197, + "utf8_estonian_ci": 198, + "utf8_spanish_ci": 199, + "utf8_swedish_ci": 200, + "utf8_turkish_ci": 201, + "utf8_czech_ci": 202, + "utf8_danish_ci": 203, + "utf8_lithuanian_ci": 204, + "utf8_slovak_ci": 205, + "utf8_spanish2_ci": 206, + "utf8_roman_ci": 207, + "utf8_persian_ci": 208, + "utf8_esperanto_ci": 209, + "utf8_hungarian_ci": 210, + "utf8_sinhala_ci": 211, + "utf8_german2_ci": 212, + "utf8_croatian_ci": 213, + "utf8_unicode_520_ci": 214, + "utf8_vietnamese_ci": 215, + "utf8_general_mysql500_ci": 223, + "utf8mb4_unicode_ci": 224, + "utf8mb4_icelandic_ci": 225, + "utf8mb4_latvian_ci": 226, + "utf8mb4_romanian_ci": 227, + "utf8mb4_slovenian_ci": 228, + "utf8mb4_polish_ci": 229, + "utf8mb4_estonian_ci": 230, + "utf8mb4_spanish_ci": 231, + "utf8mb4_swedish_ci": 232, + "utf8mb4_turkish_ci": 233, + "utf8mb4_czech_ci": 234, + "utf8mb4_danish_ci": 235, + "utf8mb4_lithuanian_ci": 236, + "utf8mb4_slovak_ci": 237, + "utf8mb4_spanish2_ci": 238, + "utf8mb4_roman_ci": 239, + "utf8mb4_persian_ci": 240, + "utf8mb4_esperanto_ci": 241, + "utf8mb4_hungarian_ci": 242, + "utf8mb4_sinhala_ci": 243, + "utf8mb4_german2_ci": 244, + "utf8mb4_croatian_ci": 245, + "utf8mb4_unicode_520_ci": 246, + "utf8mb4_vietnamese_ci": 247, +} + +// A blacklist of collations which is unsafe to interpolate parameters. +// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. +var unsafeCollations = map[string]bool{ + "big5_chinese_ci": true, + "sjis_japanese_ci": true, + "gbk_chinese_ci": true, + "big5_bin": true, + "gb2312_bin": true, + "gbk_bin": true, + "sjis_bin": true, + "cp932_japanese_ci": true, + "cp932_bin": true, +} diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go new file mode 100644 index 0000000..d82c728 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connection.go @@ -0,0 +1,377 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "net" + "strconv" + "strings" + "time" +) + +type mysqlConn struct { + buf buffer + netConn net.Conn + affectedRows uint64 + insertId uint64 + cfg *Config + maxAllowedPacket int + maxWriteSize int + writeTimeout time.Duration + flags clientFlag + status statusFlag + sequence uint8 + parseTime bool + strict bool +} + +// Handles parameters set in DSN after the connection is established +func (mc *mysqlConn) handleParams() (err error) { + for param, val := range mc.cfg.Params { + switch param { + // Charset + case "charset": + charsets := strings.Split(val, ",") + for i := range charsets { + // ignore errors here - a charset may not exist + err = mc.exec("SET NAMES " + charsets[i]) + if err == nil { + break + } + } + if err != nil { + return + } + + // System Vars + default: + err = mc.exec("SET " + param + "=" + val + "") + if err != nil { + return + } + } + } + + return +} + +func (mc *mysqlConn) Begin() (driver.Tx, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + err := mc.exec("START TRANSACTION") + if err == nil { + return &mysqlTx{mc}, err + } + + return nil, err +} + +func (mc *mysqlConn) Close() (err error) { + // Makes Close idempotent + if mc.netConn != nil { + err = mc.writeCommandPacket(comQuit) + } + + mc.cleanup() + + return +} + +// Closes the network connection and unsets internal variables. Do not call this +// function after successfully authentication, call Close instead. This function +// is called before auth or on auth failure because MySQL will have already +// closed the network connection. +func (mc *mysqlConn) cleanup() { + // Makes cleanup idempotent + if mc.netConn != nil { + if err := mc.netConn.Close(); err != nil { + errLog.Print(err) + } + mc.netConn = nil + } + mc.cfg = nil + mc.buf.nc = nil +} + +func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := mc.writeCommandPacketStr(comStmtPrepare, query) + if err != nil { + return nil, err + } + + stmt := &mysqlStmt{ + mc: mc, + } + + // Read Result + columnCount, err := stmt.readPrepareResultPacket() + if err == nil { + if stmt.paramCount > 0 { + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if columnCount > 0 { + err = mc.readUntilEOF() + } + } + + return stmt, err +} + +func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { + // Number of ? should be same to len(args) + if strings.Count(query, "?") != len(args) { + return "", driver.ErrSkip + } + + buf := mc.buf.takeCompleteBuffer() + if buf == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return "", driver.ErrBadConn + } + buf = buf[:0] + argPos := 0 + + for i := 0; i < len(query); i++ { + q := strings.IndexByte(query[i:], '?') + if q == -1 { + buf = append(buf, query[i:]...) + break + } + buf = append(buf, query[i:i+q]...) + i += q + + arg := args[argPos] + argPos++ + + if arg == nil { + buf = append(buf, "NULL"...) + continue + } + + switch v := arg.(type) { + case int64: + buf = strconv.AppendInt(buf, v, 10) + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case bool: + if v { + buf = append(buf, '1') + } else { + buf = append(buf, '0') + } + case time.Time: + if v.IsZero() { + buf = append(buf, "'0000-00-00'"...) + } else { + v := v.In(mc.cfg.Loc) + v = v.Add(time.Nanosecond * 500) // To round under microsecond + year := v.Year() + year100 := year / 100 + year1 := year % 100 + month := v.Month() + day := v.Day() + hour := v.Hour() + minute := v.Minute() + second := v.Second() + micro := v.Nanosecond() / 1000 + + buf = append(buf, []byte{ + '\'', + digits10[year100], digits01[year100], + digits10[year1], digits01[year1], + '-', + digits10[month], digits01[month], + '-', + digits10[day], digits01[day], + ' ', + digits10[hour], digits01[hour], + ':', + digits10[minute], digits01[minute], + ':', + digits10[second], digits01[second], + }...) + + if micro != 0 { + micro10000 := micro / 10000 + micro100 := micro / 100 % 100 + micro1 := micro % 100 + buf = append(buf, []byte{ + '.', + digits10[micro10000], digits01[micro10000], + digits10[micro100], digits01[micro100], + digits10[micro1], digits01[micro1], + }...) + } + buf = append(buf, '\'') + } + case []byte: + if v == nil { + buf = append(buf, "NULL"...) + } else { + buf = append(buf, "_binary'"...) + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeBytesBackslash(buf, v) + } else { + buf = escapeBytesQuotes(buf, v) + } + buf = append(buf, '\'') + } + case string: + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeStringBackslash(buf, v) + } else { + buf = escapeStringQuotes(buf, v) + } + buf = append(buf, '\'') + default: + return "", driver.ErrSkip + } + + if len(buf)+4 > mc.maxAllowedPacket { + return "", driver.ErrSkip + } + } + if argPos != len(args) { + return "", driver.ErrSkip + } + return string(buf), nil +} + +func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + args = nil + } + mc.affectedRows = 0 + mc.insertId = 0 + + err := mc.exec(query) + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, err + } + return nil, err +} + +// Internal function to execute commands +func (mc *mysqlConn) exec(query string) error { + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err != nil { + return err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil && resLen > 0 { + if err = mc.readUntilEOF(); err != nil { + return err + } + + err = mc.readUntilEOF() + } + + return err +} + +func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try client-side prepare to reduce roundtrip + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + args = nil + } + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err == nil { + // Read Result + var resLen int + resLen, err = mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen == 0 { + // no columns, no more data + return emptyRows{}, nil + } + // Columns + rows.columns, err = mc.readColumns(resLen) + return rows, err + } + } + return nil, err +} + +// Gets the value of the given MySQL System Variable +// The returned byte slice is only valid until the next read +func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { + // Send command + if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { + return nil, err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + rows.columns = []mysqlField{{fieldType: fieldTypeVarChar}} + + if resLen > 0 { + // Columns + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + dest := make([]driver.Value, resLen) + if err = rows.readRow(dest); err == nil { + return dest[0].([]byte), mc.readUntilEOF() + } + } + return nil, err +} diff --git a/vendor/github.com/go-sql-driver/mysql/connection_test.go b/vendor/github.com/go-sql-driver/mysql/connection_test.go new file mode 100644 index 0000000..65325f1 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connection_test.go @@ -0,0 +1,67 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "testing" +) + +func TestInterpolateParams(t *testing.T) { + mc := &mysqlConn{ + buf: newBuffer(nil), + maxAllowedPacket: maxPacketSize, + cfg: &Config{ + InterpolateParams: true, + }, + } + + q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42), "gopher"}) + if err != nil { + t.Errorf("Expected err=nil, got %#v", err) + return + } + expected := `SELECT 42+'gopher'` + if q != expected { + t.Errorf("Expected: %q\nGot: %q", expected, q) + } +} + +func TestInterpolateParamsTooManyPlaceholders(t *testing.T) { + mc := &mysqlConn{ + buf: newBuffer(nil), + maxAllowedPacket: maxPacketSize, + cfg: &Config{ + InterpolateParams: true, + }, + } + + q, err := mc.interpolateParams("SELECT ?+?", []driver.Value{int64(42)}) + if err != driver.ErrSkip { + t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q) + } +} + +// We don't support placeholder in string literal for now. +// https://github.com/go-sql-driver/mysql/pull/490 +func TestInterpolateParamsPlaceholderInString(t *testing.T) { + mc := &mysqlConn{ + buf: newBuffer(nil), + maxAllowedPacket: maxPacketSize, + cfg: &Config{ + InterpolateParams: true, + }, + } + + q, err := mc.interpolateParams("SELECT 'abc?xyz',?", []driver.Value{int64(42)}) + // When InterpolateParams support string literal, this should return `"SELECT 'abc?xyz', 42` + if err != driver.ErrSkip { + t.Errorf("Expected err=driver.ErrSkip, got err=%#v, q=%#v", err, q) + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go new file mode 100644 index 0000000..88cfff3 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/const.go @@ -0,0 +1,163 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const ( + minProtocolVersion byte = 10 + maxPacketSize = 1<<24 - 1 + timeFormat = "2006-01-02 15:04:05.999999" +) + +// MySQL constants documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +const ( + iOK byte = 0x00 + iLocalInFile byte = 0xfb + iEOF byte = 0xfe + iERR byte = 0xff +) + +// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags +type clientFlag uint32 + +const ( + clientLongPassword clientFlag = 1 << iota + clientFoundRows + clientLongFlag + clientConnectWithDB + clientNoSchema + clientCompress + clientODBC + clientLocalFiles + clientIgnoreSpace + clientProtocol41 + clientInteractive + clientSSL + clientIgnoreSIGPIPE + clientTransactions + clientReserved + clientSecureConn + clientMultiStatements + clientMultiResults + clientPSMultiResults + clientPluginAuth + clientConnectAttrs + clientPluginAuthLenEncClientData + clientCanHandleExpiredPasswords + clientSessionTrack + clientDeprecateEOF +) + +const ( + comQuit byte = iota + 1 + comInitDB + comQuery + comFieldList + comCreateDB + comDropDB + comRefresh + comShutdown + comStatistics + comProcessInfo + comConnect + comProcessKill + comDebug + comPing + comTime + comDelayedInsert + comChangeUser + comBinlogDump + comTableDump + comConnectOut + comRegisterSlave + comStmtPrepare + comStmtExecute + comStmtSendLongData + comStmtClose + comStmtReset + comSetOption + comStmtFetch +) + +// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType +const ( + fieldTypeDecimal byte = iota + fieldTypeTiny + fieldTypeShort + fieldTypeLong + fieldTypeFloat + fieldTypeDouble + fieldTypeNULL + fieldTypeTimestamp + fieldTypeLongLong + fieldTypeInt24 + fieldTypeDate + fieldTypeTime + fieldTypeDateTime + fieldTypeYear + fieldTypeNewDate + fieldTypeVarChar + fieldTypeBit +) +const ( + fieldTypeJSON byte = iota + 0xf5 + fieldTypeNewDecimal + fieldTypeEnum + fieldTypeSet + fieldTypeTinyBLOB + fieldTypeMediumBLOB + fieldTypeLongBLOB + fieldTypeBLOB + fieldTypeVarString + fieldTypeString + fieldTypeGeometry +) + +type fieldFlag uint16 + +const ( + flagNotNULL fieldFlag = 1 << iota + flagPriKey + flagUniqueKey + flagMultipleKey + flagBLOB + flagUnsigned + flagZeroFill + flagBinary + flagEnum + flagAutoIncrement + flagTimestamp + flagSet + flagUnknown1 + flagUnknown2 + flagUnknown3 + flagUnknown4 +) + +// http://dev.mysql.com/doc/internals/en/status-flags.html +type statusFlag uint16 + +const ( + statusInTrans statusFlag = 1 << iota + statusInAutocommit + statusReserved // Not in documentation + statusMoreResultsExists + statusNoGoodIndexUsed + statusNoIndexUsed + statusCursorExists + statusLastRowSent + statusDbDropped + statusNoBackslashEscapes + statusMetadataChanged + statusQueryWasSlow + statusPsOutParams + statusInTransReadonly + statusSessionStateChanged +) diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go new file mode 100644 index 0000000..0022d1f --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/driver.go @@ -0,0 +1,183 @@ +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// Package mysql provides a MySQL driver for Go's database/sql package +// +// The driver should be used via the database/sql package: +// +// import "database/sql" +// import _ "github.com/go-sql-driver/mysql" +// +// db, err := sql.Open("mysql", "user:password@/dbname") +// +// See https://github.com/go-sql-driver/mysql#usage for details +package mysql + +import ( + "database/sql" + "database/sql/driver" + "net" +) + +// MySQLDriver is exported to make the driver directly accessible. +// In general the driver is used via the database/sql package. +type MySQLDriver struct{} + +// DialFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDial +type DialFunc func(addr string) (net.Conn, error) + +var dials map[string]DialFunc + +// RegisterDial registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// addr is passed as a parameter to the dial function. +func RegisterDial(net string, dial DialFunc) { + if dials == nil { + dials = make(map[string]DialFunc) + } + dials[net] = dial +} + +// Open new Connection. +// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how +// the DSN string is formated +func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { + var err error + + // New mysqlConn + mc := &mysqlConn{ + maxAllowedPacket: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + } + mc.cfg, err = ParseDSN(dsn) + if err != nil { + return nil, err + } + mc.parseTime = mc.cfg.ParseTime + mc.strict = mc.cfg.Strict + + // Connect to Server + if dial, ok := dials[mc.cfg.Net]; ok { + mc.netConn, err = dial(mc.cfg.Addr) + } else { + nd := net.Dialer{Timeout: mc.cfg.Timeout} + mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr) + } + if err != nil { + return nil, err + } + + // Enable TCP Keepalives on TCP connections + if tc, ok := mc.netConn.(*net.TCPConn); ok { + if err := tc.SetKeepAlive(true); err != nil { + // Don't send COM_QUIT before handshake. + mc.netConn.Close() + mc.netConn = nil + return nil, err + } + } + + mc.buf = newBuffer(mc.netConn) + + // Set I/O timeouts + mc.buf.timeout = mc.cfg.ReadTimeout + mc.writeTimeout = mc.cfg.WriteTimeout + + // Reading Handshake Initialization Packet + cipher, err := mc.readInitPacket() + if err != nil { + mc.cleanup() + return nil, err + } + + // Send Client Authentication Packet + if err = mc.writeAuthPacket(cipher); err != nil { + mc.cleanup() + return nil, err + } + + // Handle response to auth packet, switch methods if possible + if err = handleAuthResult(mc, cipher); err != nil { + // Authentication failed and MySQL has already closed the connection + // (https://dev.mysql.com/doc/internals/en/authentication-fails.html). + // Do not send COM_QUIT, just cleanup and return the error. + mc.cleanup() + return nil, err + } + + if mc.cfg.MaxAllowedPacket > 0 { + mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket + } else { + // Get max allowed packet size + maxap, err := mc.getSystemVar("max_allowed_packet") + if err != nil { + mc.Close() + return nil, err + } + mc.maxAllowedPacket = stringToInt(maxap) - 1 + } + if mc.maxAllowedPacket < maxPacketSize { + mc.maxWriteSize = mc.maxAllowedPacket + } + + // Handle DSN Params + err = mc.handleParams() + if err != nil { + mc.Close() + return nil, err + } + + return mc, nil +} + +func handleAuthResult(mc *mysqlConn, oldCipher []byte) error { + // Read Result Packet + cipher, err := mc.readResultOK() + if err == nil { + return nil // auth successful + } + + if mc.cfg == nil { + return err // auth failed and retry not possible + } + + // Retry auth if configured to do so. + if mc.cfg.AllowOldPasswords && err == ErrOldPassword { + // Retry with old authentication method. Note: there are edge cases + // where this should work but doesn't; this is currently "wontfix": + // https://github.com/go-sql-driver/mysql/issues/184 + + // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is + // sent and we have to keep using the cipher sent in the init packet. + if cipher == nil { + cipher = oldCipher + } + + if err = mc.writeOldAuthPacket(cipher); err != nil { + return err + } + _, err = mc.readResultOK() + } else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword { + // Retry with clear text password for + // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html + // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html + if err = mc.writeClearAuthPacket(); err != nil { + return err + } + _, err = mc.readResultOK() + } else if mc.cfg.AllowNativePasswords && err == ErrNativePassword { + if err = mc.writeNativeAuthPacket(cipher); err != nil { + return err + } + _, err = mc.readResultOK() + } + return err +} + +func init() { + sql.Register("mysql", &MySQLDriver{}) +} diff --git a/vendor/github.com/go-sql-driver/mysql/driver_test.go b/vendor/github.com/go-sql-driver/mysql/driver_test.go new file mode 100644 index 0000000..78e68f5 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/driver_test.go @@ -0,0 +1,1904 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/url" + "os" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +var ( + user string + pass string + prot string + addr string + dbname string + dsn string + netAddr string + available bool +) + +var ( + tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC) + sDate = "2012-06-14" + tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC) + sDateTime = "2011-11-20 21:27:37" + tDate0 = time.Time{} + sDate0 = "0000-00-00" + sDateTime0 = "0000-00-00 00:00:00" +) + +// See https://github.com/go-sql-driver/mysql/wiki/Testing +func init() { + // get environment variables + env := func(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue + } + user = env("MYSQL_TEST_USER", "root") + pass = env("MYSQL_TEST_PASS", "") + prot = env("MYSQL_TEST_PROT", "tcp") + addr = env("MYSQL_TEST_ADDR", "localhost:3306") + dbname = env("MYSQL_TEST_DBNAME", "gotest") + netAddr = fmt.Sprintf("%s(%s)", prot, addr) + dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s&strict=true", user, pass, netAddr, dbname) + c, err := net.Dial(prot, addr) + if err == nil { + available = true + c.Close() + } +} + +type DBTest struct { + *testing.T + db *sql.DB +} + +func runTestsWithMultiStatement(t *testing.T, dsn string, tests ...func(dbt *DBTest)) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + dsn += "&multiStatements=true" + var db *sql.DB + if _, err := ParseDSN(dsn); err != errInvalidDSNUnsafeCollation { + db, err = sql.Open("mysql", dsn) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + } + + dbt := &DBTest{t, db} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + } +} + +func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + db, err := sql.Open("mysql", dsn) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + + db.Exec("DROP TABLE IF EXISTS test") + + dsn2 := dsn + "&interpolateParams=true" + var db2 *sql.DB + if _, err := ParseDSN(dsn2); err != errInvalidDSNUnsafeCollation { + db2, err = sql.Open("mysql", dsn2) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db2.Close() + } + + dsn3 := dsn + "&multiStatements=true" + var db3 *sql.DB + if _, err := ParseDSN(dsn3); err != errInvalidDSNUnsafeCollation { + db3, err = sql.Open("mysql", dsn3) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db3.Close() + } + + dbt := &DBTest{t, db} + dbt2 := &DBTest{t, db2} + dbt3 := &DBTest{t, db3} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + if db2 != nil { + test(dbt2) + dbt2.db.Exec("DROP TABLE IF EXISTS test") + } + if db3 != nil { + test(dbt3) + dbt3.db.Exec("DROP TABLE IF EXISTS test") + } + } +} + +func (dbt *DBTest) fail(method, query string, err error) { + if len(query) > 300 { + query = "[query too large to print]" + } + dbt.Fatalf("error on %s %s: %s", method, query, err.Error()) +} + +func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) { + res, err := dbt.db.Exec(query, args...) + if err != nil { + dbt.fail("exec", query, err) + } + return res +} + +func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) { + rows, err := dbt.db.Query(query, args...) + if err != nil { + dbt.fail("query", query, err) + } + return rows +} + +func TestEmptyQuery(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // just a comment, no query + rows := dbt.mustQuery("--") + // will hang before #255 + if rows.Next() { + dbt.Errorf("next on rows must be false") + } + }) +} + +func TestCRUD(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // Create Table + dbt.mustExec("CREATE TABLE test (value BOOL)") + + // Test for unexpected data + var out bool + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + dbt.Error("unexpected data in empty table") + } + + // Create Data + res := dbt.mustExec("INSERT INTO test VALUES (1)") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + id, err := res.LastInsertId() + if err != nil { + dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error()) + } + if id != 0 { + dbt.Fatalf("expected InsertId 0, got %d", id) + } + + // Read + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if true != out { + dbt.Errorf("true != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Update + res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Check Update + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if false != out { + dbt.Errorf("false != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Delete + res = dbt.mustExec("DELETE FROM test WHERE value = ?", false) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Check for unexpected rows + res = dbt.mustExec("DELETE FROM test") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 0 { + dbt.Fatalf("expected 0 affected row, got %d", count) + } + }) +} + +func TestMultiQuery(t *testing.T) { + runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) { + // Create Table + dbt.mustExec("CREATE TABLE `test` (`id` int(11) NOT NULL, `value` int(11) NOT NULL) ") + + // Create Data + res := dbt.mustExec("INSERT INTO test VALUES (1, 1)") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Update + res = dbt.mustExec("UPDATE test SET value = 3 WHERE id = 1; UPDATE test SET value = 4 WHERE id = 1; UPDATE test SET value = 5 WHERE id = 1;") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("expected 1 affected row, got %d", count) + } + + // Read + var out int + rows := dbt.mustQuery("SELECT value FROM test WHERE id=1;") + if rows.Next() { + rows.Scan(&out) + if 5 != out { + dbt.Errorf("5 != %d", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + }) +} + +func TestInt(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"} + in := int64(42) + var out int64 + var rows *sql.Rows + + // SIGNED + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // UNSIGNED ZEROFILL + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s ZEROFILL: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat32(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + in := float32(42.23) + var out float32 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (?)", in) + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %g != %g", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat64(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + var expected float64 = 42.23 + var out float64 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (42.23)") + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if expected != out { + dbt.Errorf("%s: %g != %g", v, expected, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat64Placeholder(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + var expected float64 = 42.23 + var out float64 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (id int, value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (1, 42.23)") + rows = dbt.mustQuery("SELECT value FROM test WHERE id = ?", 1) + if rows.Next() { + rows.Scan(&out) + if expected != out { + dbt.Errorf("%s: %g != %g", v, expected, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestString(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"} + in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย" + var out string + var rows *sql.Rows + + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %s != %s", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // BLOB + dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8") + + id := 2 + in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " + + "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet." + dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in) + + err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out) + if err != nil { + dbt.Fatalf("Error on BLOB-Query: %s", err.Error()) + } else if out != in { + dbt.Errorf("BLOB: %s != %s", in, out) + } + }) +} + +type timeTests struct { + dbtype string + tlayout string + tests []timeTest +} + +type timeTest struct { + s string // leading "!": do not use t as value in queries + t time.Time +} + +type timeMode byte + +func (t timeMode) String() string { + switch t { + case binaryString: + return "binary:string" + case binaryTime: + return "binary:time.Time" + case textString: + return "text:string" + } + panic("unsupported timeMode") +} + +func (t timeMode) Binary() bool { + switch t { + case binaryString, binaryTime: + return true + } + return false +} + +const ( + binaryString timeMode = iota + binaryTime + textString +) + +func (t timeTest) genQuery(dbtype string, mode timeMode) string { + var inner string + if mode.Binary() { + inner = "?" + } else { + inner = `"%s"` + } + return `SELECT cast(` + inner + ` as ` + dbtype + `)` +} + +func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) { + var rows *sql.Rows + query := t.genQuery(dbtype, mode) + switch mode { + case binaryString: + rows = dbt.mustQuery(query, t.s) + case binaryTime: + rows = dbt.mustQuery(query, t.t) + case textString: + query = fmt.Sprintf(query, t.s) + rows = dbt.mustQuery(query) + default: + panic("unsupported mode") + } + defer rows.Close() + var err error + if !rows.Next() { + err = rows.Err() + if err == nil { + err = fmt.Errorf("no data") + } + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + var dst interface{} + err = rows.Scan(&dst) + if err != nil { + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + switch val := dst.(type) { + case []uint8: + str := string(val) + if str == t.s { + return + } + if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s { + // a fix mainly for TravisCI: + // accept full microsecond resolution in result for DATETIME columns + // where the binary protocol was used + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, str, + ) + case time.Time: + if val == t.t { + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, val.Format(tlayout), + ) + default: + fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t}) + dbt.Errorf("%s [%s]: unhandled type %T (is '%v')", + dbtype, mode, + val, val, + ) + } +} + +func TestDateTime(t *testing.T) { + afterTime := func(t time.Time, d string) time.Time { + dur, err := time.ParseDuration(d) + if err != nil { + panic(err) + } + return t.Add(dur) + } + // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests + format := "2006-01-02 15:04:05.999999" + t0 := time.Time{} + tstr0 := "0000-00-00 00:00:00.000000" + testcases := []timeTests{ + {"DATE", format[:10], []timeTest{ + {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)}, + {t: t0, s: tstr0[:10]}, + }}, + {"DATETIME", format[:19], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(0)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(1)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)}, + {t: t0, s: tstr0[:21]}, + }}, + {"DATETIME(6)", format, []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)}, + {t: t0, s: tstr0}, + }}, + {"TIME", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(0)", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(1)", format[11:21], []timeTest{ + {t: afterTime(t0, "12345600ms")}, + {s: "!-12:34:56.7"}, + {s: "!-838:59:58.9"}, + {s: "!838:59:58.9"}, + {t: t0, s: tstr0[11:21]}, + }}, + {"TIME(6)", format[11:], []timeTest{ + {t: afterTime(t0, "1234567890123000ns")}, + {s: "!-12:34:56.789012"}, + {s: "!-838:59:58.999999"}, + {s: "!838:59:58.999999"}, + {t: t0, s: tstr0[11:]}, + }}, + } + dsns := []string{ + dsn + "&parseTime=true", + dsn + "&parseTime=false", + } + for _, testdsn := range dsns { + runTests(t, testdsn, func(dbt *DBTest) { + microsecsSupported := false + zeroDateSupported := false + var rows *sql.Rows + var err error + rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`) + if err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`) + if err == nil { + rows.Scan(&zeroDateSupported) + rows.Close() + } + for _, setups := range testcases { + if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" { + // skip fractional second tests if unsupported by server + continue + } + for _, setup := range setups.tests { + allowBinTime := true + if setup.s == "" { + // fill time string whereever Go can reliable produce it + setup.s = setup.t.Format(setups.tlayout) + } else if setup.s[0] == '!' { + // skip tests using setup.t as source in queries + allowBinTime = false + // fix setup.s - remove the "!" + setup.s = setup.s[1:] + } + if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] { + // skip disallowed 0000-00-00 date + continue + } + setup.run(dbt, setups.dbtype, setups.tlayout, textString) + setup.run(dbt, setups.dbtype, setups.tlayout, binaryString) + if allowBinTime { + setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime) + } + } + } + }) + } +} + +func TestTimestampMicros(t *testing.T) { + format := "2006-01-02 15:04:05.999999" + f0 := format[:19] + f1 := format[:21] + f6 := format[:26] + runTests(t, dsn, func(dbt *DBTest) { + // check if microseconds are supported. + // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width + // and not precision. + // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html + microsecsSupported := false + if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + if !microsecsSupported { + // skip test + return + } + _, err := dbt.db.Exec(` + CREATE TABLE test ( + value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `', + value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `', + value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `' + )`, + ) + if err != nil { + dbt.Error(err) + } + defer dbt.mustExec("DROP TABLE IF EXISTS test") + dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6) + var res0, res1, res6 string + rows := dbt.mustQuery("SELECT * FROM test") + if !rows.Next() { + dbt.Errorf("test contained no selectable values") + } + err = rows.Scan(&res0, &res1, &res6) + if err != nil { + dbt.Error(err) + } + if res0 != f0 { + dbt.Errorf("expected %q, got %q", f0, res0) + } + if res1 != f1 { + dbt.Errorf("expected %q, got %q", f1, res1) + } + if res6 != f6 { + dbt.Errorf("expected %q, got %q", f6, res6) + } + }) +} + +func TestNULL(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + nullStmt, err := dbt.db.Prepare("SELECT NULL") + if err != nil { + dbt.Fatal(err) + } + defer nullStmt.Close() + + nonNullStmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + defer nonNullStmt.Close() + + // NullBool + var nb sql.NullBool + // Invalid + if err = nullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if nb.Valid { + dbt.Error("valid NullBool which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if !nb.Valid { + dbt.Error("invalid NullBool which should be valid") + } else if nb.Bool != true { + dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool) + } + + // NullFloat64 + var nf sql.NullFloat64 + // Invalid + if err = nullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if nf.Valid { + dbt.Error("valid NullFloat64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if !nf.Valid { + dbt.Error("invalid NullFloat64 which should be valid") + } else if nf.Float64 != float64(1) { + dbt.Errorf("unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64) + } + + // NullInt64 + var ni sql.NullInt64 + // Invalid + if err = nullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if ni.Valid { + dbt.Error("valid NullInt64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if !ni.Valid { + dbt.Error("invalid NullInt64 which should be valid") + } else if ni.Int64 != int64(1) { + dbt.Errorf("unexpected NullInt64 value: %d (should be 1)", ni.Int64) + } + + // NullString + var ns sql.NullString + // Invalid + if err = nullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if ns.Valid { + dbt.Error("valid NullString which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if !ns.Valid { + dbt.Error("invalid NullString which should be valid") + } else if ns.String != `1` { + dbt.Error("unexpected NullString value:" + ns.String + " (should be `1`)") + } + + // nil-bytes + var b []byte + // Read nil + if err = nullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("non-nil []byte wich should be nil") + } + // Read non-nil + if err = nonNullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("nil []byte wich should be non-nil") + } + // Insert nil + b = nil + success := false + if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil { + dbt.Fatal(err) + } + if !success { + dbt.Error("inserting []byte(nil) as NULL failed") + } + // Check input==output with input==nil + b = nil + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("non-nil echo from nil input") + } + // Check input==output with input!=nil + b = []byte("") + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("nil echo from non-nil input") + } + + // Insert NULL + dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)") + + dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2) + + var out interface{} + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + rows.Scan(&out) + if out != nil { + dbt.Errorf("%v != nil", out) + } + } else { + dbt.Error("no data") + } + }) +} + +func TestUint64(t *testing.T) { + const ( + u0 = uint64(0) + uall = ^u0 + uhigh = uall >> 1 + utop = ^uhigh + s0 = int64(0) + sall = ^s0 + shigh = int64(uhigh) + stop = ^shigh + ) + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + row := stmt.QueryRow( + u0, uhigh, utop, uall, + s0, shigh, stop, sall, + ) + + var ua, ub, uc, ud uint64 + var sa, sb, sc, sd int64 + + err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd) + if err != nil { + dbt.Fatal(err) + } + switch { + case ua != u0, + ub != uhigh, + uc != utop, + ud != uall, + sa != s0, + sb != shigh, + sc != stop, + sd != sall: + dbt.Fatal("unexpected result value") + } + }) +} + +func TestLongData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + var maxAllowedPacketSize int + err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize) + if err != nil { + dbt.Fatal(err) + } + maxAllowedPacketSize-- + + // don't get too ambitious + if maxAllowedPacketSize > 1<<25 { + maxAllowedPacketSize = 1 << 25 + } + + dbt.mustExec("CREATE TABLE test (value LONGBLOB)") + + in := strings.Repeat(`a`, maxAllowedPacketSize+1) + var out string + var rows *sql.Rows + + // Long text data + const nonDataQueryLen = 28 // length query w/o value + inS := in[:maxAllowedPacketSize-nonDataQueryLen] + dbt.mustExec("INSERT INTO test VALUES('" + inS + "')") + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if inS != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + dbt.Fatalf("LONGBLOB: no data") + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Long binary data + dbt.mustExec("INSERT INTO test VALUES(?)", in) + rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1) + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + if err = rows.Err(); err != nil { + dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error()) + } else { + dbt.Fatal("LONGBLOB: no data (err: )") + } + } + }) +} + +func TestLoadData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + verifyLoadDataResult := func() { + rows, err := dbt.db.Query("SELECT * FROM test") + if err != nil { + dbt.Fatal(err.Error()) + } + + i := 0 + values := [4]string{ + "a string", + "a string containing a \t", + "a string containing a \n", + "a string containing both \t\n", + } + + var id int + var value string + + for rows.Next() { + i++ + err = rows.Scan(&id, &value) + if err != nil { + dbt.Fatal(err.Error()) + } + if i != id { + dbt.Fatalf("%d != %d", i, id) + } + if values[i-1] != value { + dbt.Fatalf("%q != %q", values[i-1], value) + } + } + err = rows.Err() + if err != nil { + dbt.Fatal(err.Error()) + } + + if i != 4 { + dbt.Fatalf("rows count mismatch. Got %d, want 4", i) + } + } + file, err := ioutil.TempFile("", "gotest") + defer os.Remove(file.Name()) + if err != nil { + dbt.Fatal(err) + } + file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n") + file.Close() + + dbt.db.Exec("DROP TABLE IF EXISTS test") + dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8") + + // Local File + RegisterLocalFile(file.Name()) + dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name())) + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("load non-existent file didn't fail") + } else if err.Error() != "local file 'doesnotexist' is not registered" { + dbt.Fatal(err.Error()) + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Reader + RegisterReaderHandler("test", func() io.Reader { + file, err = os.Open(file.Name()) + if err != nil { + dbt.Fatal(err) + } + return file + }) + dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test") + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("load non-existent Reader didn't fail") + } else if err.Error() != "Reader 'doesnotexist' is not registered" { + dbt.Fatal(err.Error()) + } + }) +} + +func TestFoundRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + }) + runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 matched rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 3 { + dbt.Fatalf("Expected 3 matched rows, got %d", count) + } + }) +} + +func TestStrict(t *testing.T) { + // ALLOW_INVALID_DATES to get rid of stricter modes - we want to test for warnings, not errors + relaxedDsn := dsn + "&sql_mode='ALLOW_INVALID_DATES,NO_AUTO_CREATE_USER'" + // make sure the MySQL version is recent enough with a separate connection + // before running the test + conn, err := MySQLDriver{}.Open(relaxedDsn) + if conn != nil { + conn.Close() + } + if me, ok := err.(*MySQLError); ok && me.Number == 1231 { + // Error 1231: Variable 'sql_mode' can't be set to the value of 'ALLOW_INVALID_DATES' + // => skip test, MySQL server version is too old + return + } + runTests(t, relaxedDsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (a TINYINT NOT NULL, b CHAR(4))") + + var queries = [...]struct { + in string + codes []string + }{ + {"DROP TABLE IF EXISTS no_such_table", []string{"1051"}}, + {"INSERT INTO test VALUES(10,'mysql'),(NULL,'test'),(300,'Open Source')", []string{"1265", "1048", "1264", "1265"}}, + } + var err error + + var checkWarnings = func(err error, mode string, idx int) { + if err == nil { + dbt.Errorf("expected STRICT error on query [%s] %s", mode, queries[idx].in) + } + + if warnings, ok := err.(MySQLWarnings); ok { + var codes = make([]string, len(warnings)) + for i := range warnings { + codes[i] = warnings[i].Code + } + if len(codes) != len(queries[idx].codes) { + dbt.Errorf("unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + } + + for i := range warnings { + if codes[i] != queries[idx].codes[i] { + dbt.Errorf("unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + return + } + } + + } else { + dbt.Errorf("unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error()) + } + } + + // text protocol + for i := range queries { + _, err = dbt.db.Exec(queries[i].in) + checkWarnings(err, "text", i) + } + + var stmt *sql.Stmt + + // binary protocol + for i := range queries { + stmt, err = dbt.db.Prepare(queries[i].in) + if err != nil { + dbt.Errorf("error on preparing query %s: %s", queries[i].in, err.Error()) + } + + _, err = stmt.Exec() + checkWarnings(err, "binary", i) + + err = stmt.Close() + if err != nil { + dbt.Errorf("error on closing stmt for query %s: %s", queries[i].in, err.Error()) + } + } + }) +} + +func TestTLS(t *testing.T) { + tlsTest := func(dbt *DBTest) { + if err := dbt.db.Ping(); err != nil { + if err == ErrNoTLS { + dbt.Skip("server does not support TLS") + } else { + dbt.Fatalf("error on Ping: %s", err.Error()) + } + } + + rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'") + + var variable, value *sql.RawBytes + for rows.Next() { + if err := rows.Scan(&variable, &value); err != nil { + dbt.Fatal(err.Error()) + } + + if value == nil { + dbt.Fatal("no Cipher") + } + } + } + + runTests(t, dsn+"&tls=skip-verify", tlsTest) + + // Verify that registering / using a custom cfg works + RegisterTLSConfig("custom-skip-verify", &tls.Config{ + InsecureSkipVerify: true, + }) + runTests(t, dsn+"&tls=custom-skip-verify", tlsTest) +} + +func TestReuseClosedConnection(t *testing.T) { + // this test does not use sql.database, it uses the driver directly + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + md := &MySQLDriver{} + conn, err := md.Open(dsn) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + stmt, err := conn.Prepare("DO 1") + if err != nil { + t.Fatalf("error preparing statement: %s", err.Error()) + } + _, err = stmt.Exec(nil) + if err != nil { + t.Fatalf("error executing statement: %s", err.Error()) + } + err = conn.Close() + if err != nil { + t.Fatalf("error closing connection: %s", err.Error()) + } + + defer func() { + if err := recover(); err != nil { + t.Errorf("panic after reusing a closed connection: %v", err) + } + }() + _, err = stmt.Exec(nil) + if err != nil && err != driver.ErrBadConn { + t.Errorf("unexpected error '%s', expected '%s'", + err.Error(), driver.ErrBadConn.Error()) + } +} + +func TestCharset(t *testing.T) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + mustSetCharset := func(charsetParam, expected string) { + runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT @@character_set_connection") + defer rows.Close() + + if !rows.Next() { + dbt.Fatalf("error getting connection charset: %s", rows.Err()) + } + + var got string + rows.Scan(&got) + + if got != expected { + dbt.Fatalf("expected connection charset %s but got %s", expected, got) + } + }) + } + + // non utf8 test + mustSetCharset("charset=ascii", "ascii") + + // when the first charset is invalid, use the second + mustSetCharset("charset=none,utf8", "utf8") + + // when the first charset is valid, use it + mustSetCharset("charset=ascii,utf8", "ascii") + mustSetCharset("charset=utf8,ascii", "utf8") +} + +func TestFailingCharset(t *testing.T) { + runTests(t, dsn+"&charset=none", func(dbt *DBTest) { + // run query to really establish connection... + _, err := dbt.db.Exec("SELECT 1") + if err == nil { + dbt.db.Close() + t.Fatalf("connection must not succeed without a valid charset") + } + }) +} + +func TestCollation(t *testing.T) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + defaultCollation := "utf8_general_ci" + testCollations := []string{ + "", // do not set + defaultCollation, // driver default + "latin1_general_ci", + "binary", + "utf8_unicode_ci", + "cp1257_bin", + } + + for _, collation := range testCollations { + var expected, tdsn string + if collation != "" { + tdsn = dsn + "&collation=" + collation + expected = collation + } else { + tdsn = dsn + expected = defaultCollation + } + + runTests(t, tdsn, func(dbt *DBTest) { + var got string + if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil { + dbt.Fatal(err) + } + + if got != expected { + dbt.Fatalf("expected connection collation %s but got %s", expected, got) + } + }) + } +} + +func TestColumnsWithAlias(t *testing.T) { + runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT 1 AS A") + defer rows.Close() + cols, _ := rows.Columns() + if len(cols) != 1 { + t.Fatalf("expected 1 column, got %d", len(cols)) + } + if cols[0] != "A" { + t.Fatalf("expected column name \"A\", got \"%s\"", cols[0]) + } + rows.Close() + + rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A") + cols, _ = rows.Columns() + if len(cols) != 1 { + t.Fatalf("expected 1 column, got %d", len(cols)) + } + if cols[0] != "A.one" { + t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0]) + } + }) +} + +func TestRawBytesResultExceedsBuffer(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // defaultBufSize from buffer.go + expected := strings.Repeat("abc", defaultBufSize) + + rows := dbt.mustQuery("SELECT '" + expected + "'") + defer rows.Close() + if !rows.Next() { + dbt.Error("expected result, got none") + } + var result sql.RawBytes + rows.Scan(&result) + if expected != string(result) { + dbt.Error("result did not match expected value") + } + }) +} + +func TestTimezoneConversion(t *testing.T) { + zones := []string{"UTC", "US/Central", "US/Pacific", "Local"} + + // Regression test for timezone handling + tzTest := func(dbt *DBTest) { + + // Create table + dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)") + + // Insert local time into database (should be converted) + usCentral, _ := time.LoadLocation("US/Central") + reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral) + dbt.mustExec("INSERT INTO test VALUE (?)", reftime) + + // Retrieve time from DB + rows := dbt.mustQuery("SELECT ts FROM test") + if !rows.Next() { + dbt.Fatal("did not get any rows out") + } + + var dbTime time.Time + err := rows.Scan(&dbTime) + if err != nil { + dbt.Fatal("Err", err) + } + + // Check that dates match + if reftime.Unix() != dbTime.Unix() { + dbt.Errorf("times do not match.\n") + dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime) + dbt.Errorf(" Now(UTC)=%v\n", dbTime) + } + } + + for _, tz := range zones { + runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest) + } +} + +// Special cases + +func TestRowsClose(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + rows, err := dbt.db.Query("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + err = rows.Close() + if err != nil { + dbt.Fatal(err) + } + + if rows.Next() { + dbt.Fatal("unexpected row after rows.Close()") + } + + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + }) +} + +// dangling statements +// http://code.google.com/p/go/issues/detail?id=3865 +func TestCloseStmtBeforeRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + rows, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + + err = stmt.Close() + if err != nil { + dbt.Fatal(err) + } + + if !rows.Next() { + dbt.Fatal("getting row failed") + } else { + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + + var out bool + err = rows.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + }) +} + +// It is valid to have multiple Rows for the same Stmt +// http://code.google.com/p/go/issues/detail?id=3734 +func TestStmtMultiRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0") + if err != nil { + dbt.Fatal(err) + } + + rows1, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows1.Close() + + rows2, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows2.Close() + + var out bool + + // 1 + if !rows1.Next() { + dbt.Fatal("first rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + if !rows2.Next() { + dbt.Fatal("first rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + // 2 + if !rows1.Next() { + dbt.Fatal("second rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows1.Next() { + dbt.Fatal("unexpected row on rows1") + } + err = rows1.Close() + if err != nil { + dbt.Fatal(err) + } + } + + if !rows2.Next() { + dbt.Fatal("second rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows2.Next() { + dbt.Fatal("unexpected row on rows2") + } + err = rows2.Close() + if err != nil { + dbt.Fatal(err) + } + } + }) +} + +// Regression test for +// * more than 32 NULL parameters (issue 209) +// * more parameters than fit into the buffer (issue 201) +func TestPreparedManyCols(t *testing.T) { + const numParams = defaultBufSize + runTests(t, dsn, func(dbt *DBTest) { + query := "SELECT ?" + strings.Repeat(",?", numParams-1) + stmt, err := dbt.db.Prepare(query) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + // create more parameters than fit into the buffer + // which will take nil-values + params := make([]interface{}, numParams) + rows, err := stmt.Query(params...) + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + }) +} + +func TestConcurrent(t *testing.T) { + if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled { + t.Skip("MYSQL_TEST_CONCURRENT env var not set") + } + + runTests(t, dsn, func(dbt *DBTest) { + var max int + err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + dbt.Logf("testing up to %d concurrent connections \r\n", max) + + var remaining, succeeded int32 = int32(max), 0 + + var wg sync.WaitGroup + wg.Add(max) + + var fatalError string + var once sync.Once + fatalf := func(s string, vals ...interface{}) { + once.Do(func() { + fatalError = fmt.Sprintf(s, vals...) + }) + } + + for i := 0; i < max; i++ { + go func(id int) { + defer wg.Done() + + tx, err := dbt.db.Begin() + atomic.AddInt32(&remaining, -1) + + if err != nil { + if err.Error() != "Error 1040: Too many connections" { + fatalf("error on conn %d: %s", id, err.Error()) + } + return + } + + // keep the connection busy until all connections are open + for remaining > 0 { + if _, err = tx.Exec("DO 1"); err != nil { + fatalf("error on conn %d: %s", id, err.Error()) + return + } + } + + if err = tx.Commit(); err != nil { + fatalf("error on conn %d: %s", id, err.Error()) + return + } + + // everything went fine with this connection + atomic.AddInt32(&succeeded, 1) + }(i) + } + + // wait until all conections are open + wg.Wait() + + if fatalError != "" { + dbt.Fatal(fatalError) + } + + dbt.Logf("reached %d concurrent connections\r\n", succeeded) + }) +} + +// Tests custom dial functions +func TestCustomDial(t *testing.T) { + if !available { + t.Skipf("MySQL server not running on %s", netAddr) + } + + // our custom dial function which justs wraps net.Dial here + RegisterDial("mydial", func(addr string) (net.Conn, error) { + return net.Dial(prot, addr) + }) + + db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s&strict=true", user, pass, addr, dbname)) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + + if _, err = db.Exec("DO 1"); err != nil { + t.Fatalf("connection failed: %s", err.Error()) + } +} + +func TestSQLInjection(t *testing.T) { + createTest := func(arg string) func(dbt *DBTest) { + return func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v INTEGER)") + dbt.mustExec("INSERT INTO test VALUES (?)", 1) + + var v int + // NULL can't be equal to anything, the idea here is to inject query so it returns row + // This test verifies that escapeQuotes and escapeBackslash are working properly + err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v) + if err == sql.ErrNoRows { + return // success, sql injection failed + } else if err == nil { + dbt.Errorf("sql injection successful with arg: %s", arg) + } else { + dbt.Errorf("error running query with arg: %s; err: %s", arg, err.Error()) + } + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'", + } + for _, testdsn := range dsns { + runTests(t, testdsn, createTest("1 OR 1=1")) + runTests(t, testdsn, createTest("' OR '1'='1")) + } +} + +// Test if inserted data is correctly retrieved after being escaped +func TestInsertRetrieveEscapedData(t *testing.T) { + testData := func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v VARCHAR(255))") + + // All sequences that are escaped by escapeQuotes and escapeBackslash + v := "foo \x00\n\r\x1a\"'\\" + dbt.mustExec("INSERT INTO test VALUES (?)", v) + + var out string + err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + + if out != v { + dbt.Errorf("%q != %q", out, v) + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode='NO_BACKSLASH_ESCAPES,NO_AUTO_CREATE_USER'", + } + for _, testdsn := range dsns { + runTests(t, testdsn, testData) + } +} + +func TestUnixSocketAuthFail(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // Save the current logger so we can restore it. + oldLogger := errLog + + // Set a new logger so we can capture its output. + buffer := bytes.NewBuffer(make([]byte, 0, 64)) + newLogger := log.New(buffer, "prefix: ", 0) + SetLogger(newLogger) + + // Restore the logger. + defer SetLogger(oldLogger) + + // Make a new DSN that uses the MySQL socket file and a bad password, which + // we can make by simply appending any character to the real password. + badPass := pass + "x" + socket := "" + if prot == "unix" { + socket = addr + } else { + // Get socket file from MySQL. + err := dbt.db.QueryRow("SELECT @@socket").Scan(&socket) + if err != nil { + t.Fatalf("error on SELECT @@socket: %s", err.Error()) + } + } + t.Logf("socket: %s", socket) + badDSN := fmt.Sprintf("%s:%s@unix(%s)/%s?timeout=30s&strict=true", user, badPass, socket, dbname) + db, err := sql.Open("mysql", badDSN) + if err != nil { + t.Fatalf("error connecting: %s", err.Error()) + } + defer db.Close() + + // Connect to MySQL for real. This will cause an auth failure. + err = db.Ping() + if err == nil { + t.Error("expected Ping() to return an error") + } + + // The driver should not log anything. + if actual := buffer.String(); actual != "" { + t.Errorf("expected no output, got %q", actual) + } + }) +} + +// See Issue #422 +func TestInterruptBySignal(t *testing.T) { + runTestsWithMultiStatement(t, dsn, func(dbt *DBTest) { + dbt.mustExec(` + DROP PROCEDURE IF EXISTS test_signal; + CREATE PROCEDURE test_signal(ret INT) + BEGIN + SELECT ret; + SIGNAL SQLSTATE + '45001' + SET + MESSAGE_TEXT = "an error", + MYSQL_ERRNO = 45001; + END + `) + defer dbt.mustExec("DROP PROCEDURE test_signal") + + var val int + + // text protocol + rows, err := dbt.db.Query("CALL test_signal(42)") + if err != nil { + dbt.Fatalf("error on text query: %s", err.Error()) + } + for rows.Next() { + if err := rows.Scan(&val); err != nil { + dbt.Error(err) + } else if val != 42 { + dbt.Errorf("expected val to be 42") + } + } + + // binary protocol + rows, err = dbt.db.Query("CALL test_signal(?)", 42) + if err != nil { + dbt.Fatalf("error on binary query: %s", err.Error()) + } + for rows.Next() { + if err := rows.Scan(&val); err != nil { + dbt.Error(err) + } else if val != 42 { + dbt.Errorf("expected val to be 42") + } + } + }) +} diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go new file mode 100644 index 0000000..ac00dce --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -0,0 +1,548 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "strconv" + "strings" + "time" +) + +var ( + errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?") + errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)") + errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name") + errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations") +) + +// Config is a configuration parsed from a DSN string +type Config struct { + User string // Username + Passwd string // Password (requires User) + Net string // Network type + Addr string // Network address (requires Net) + DBName string // Database name + Params map[string]string // Connection parameters + Collation string // Connection collation + Loc *time.Location // Location for time.Time values + MaxAllowedPacket int // Max packet size allowed + TLSConfig string // TLS configuration name + tls *tls.Config // TLS configuration + Timeout time.Duration // Dial timeout + ReadTimeout time.Duration // I/O read timeout + WriteTimeout time.Duration // I/O write timeout + + AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE + AllowCleartextPasswords bool // Allows the cleartext client side plugin + AllowNativePasswords bool // Allows the native password authentication method + AllowOldPasswords bool // Allows the old insecure password method + ClientFoundRows bool // Return number of matching rows instead of rows changed + ColumnsWithAlias bool // Prepend table alias to column names + InterpolateParams bool // Interpolate placeholders into query string + MultiStatements bool // Allow multiple statements in one query + ParseTime bool // Parse time values to time.Time + Strict bool // Return warnings as errors +} + +// FormatDSN formats the given Config into a DSN string which can be passed to +// the driver. +func (cfg *Config) FormatDSN() string { + var buf bytes.Buffer + + // [username[:password]@] + if len(cfg.User) > 0 { + buf.WriteString(cfg.User) + if len(cfg.Passwd) > 0 { + buf.WriteByte(':') + buf.WriteString(cfg.Passwd) + } + buf.WriteByte('@') + } + + // [protocol[(address)]] + if len(cfg.Net) > 0 { + buf.WriteString(cfg.Net) + if len(cfg.Addr) > 0 { + buf.WriteByte('(') + buf.WriteString(cfg.Addr) + buf.WriteByte(')') + } + } + + // /dbname + buf.WriteByte('/') + buf.WriteString(cfg.DBName) + + // [?param1=value1&...¶mN=valueN] + hasParam := false + + if cfg.AllowAllFiles { + hasParam = true + buf.WriteString("?allowAllFiles=true") + } + + if cfg.AllowCleartextPasswords { + if hasParam { + buf.WriteString("&allowCleartextPasswords=true") + } else { + hasParam = true + buf.WriteString("?allowCleartextPasswords=true") + } + } + + if cfg.AllowNativePasswords { + if hasParam { + buf.WriteString("&allowNativePasswords=true") + } else { + hasParam = true + buf.WriteString("?allowNativePasswords=true") + } + } + + if cfg.AllowOldPasswords { + if hasParam { + buf.WriteString("&allowOldPasswords=true") + } else { + hasParam = true + buf.WriteString("?allowOldPasswords=true") + } + } + + if cfg.ClientFoundRows { + if hasParam { + buf.WriteString("&clientFoundRows=true") + } else { + hasParam = true + buf.WriteString("?clientFoundRows=true") + } + } + + if col := cfg.Collation; col != defaultCollation && len(col) > 0 { + if hasParam { + buf.WriteString("&collation=") + } else { + hasParam = true + buf.WriteString("?collation=") + } + buf.WriteString(col) + } + + if cfg.ColumnsWithAlias { + if hasParam { + buf.WriteString("&columnsWithAlias=true") + } else { + hasParam = true + buf.WriteString("?columnsWithAlias=true") + } + } + + if cfg.InterpolateParams { + if hasParam { + buf.WriteString("&interpolateParams=true") + } else { + hasParam = true + buf.WriteString("?interpolateParams=true") + } + } + + if cfg.Loc != time.UTC && cfg.Loc != nil { + if hasParam { + buf.WriteString("&loc=") + } else { + hasParam = true + buf.WriteString("?loc=") + } + buf.WriteString(url.QueryEscape(cfg.Loc.String())) + } + + if cfg.MultiStatements { + if hasParam { + buf.WriteString("&multiStatements=true") + } else { + hasParam = true + buf.WriteString("?multiStatements=true") + } + } + + if cfg.ParseTime { + if hasParam { + buf.WriteString("&parseTime=true") + } else { + hasParam = true + buf.WriteString("?parseTime=true") + } + } + + if cfg.ReadTimeout > 0 { + if hasParam { + buf.WriteString("&readTimeout=") + } else { + hasParam = true + buf.WriteString("?readTimeout=") + } + buf.WriteString(cfg.ReadTimeout.String()) + } + + if cfg.Strict { + if hasParam { + buf.WriteString("&strict=true") + } else { + hasParam = true + buf.WriteString("?strict=true") + } + } + + if cfg.Timeout > 0 { + if hasParam { + buf.WriteString("&timeout=") + } else { + hasParam = true + buf.WriteString("?timeout=") + } + buf.WriteString(cfg.Timeout.String()) + } + + if len(cfg.TLSConfig) > 0 { + if hasParam { + buf.WriteString("&tls=") + } else { + hasParam = true + buf.WriteString("?tls=") + } + buf.WriteString(url.QueryEscape(cfg.TLSConfig)) + } + + if cfg.WriteTimeout > 0 { + if hasParam { + buf.WriteString("&writeTimeout=") + } else { + hasParam = true + buf.WriteString("?writeTimeout=") + } + buf.WriteString(cfg.WriteTimeout.String()) + } + + if cfg.MaxAllowedPacket > 0 { + if hasParam { + buf.WriteString("&maxAllowedPacket=") + } else { + hasParam = true + buf.WriteString("?maxAllowedPacket=") + } + buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket)) + + } + + // other params + if cfg.Params != nil { + for param, value := range cfg.Params { + if hasParam { + buf.WriteByte('&') + } else { + hasParam = true + buf.WriteByte('?') + } + + buf.WriteString(param) + buf.WriteByte('=') + buf.WriteString(url.QueryEscape(value)) + } + } + + return buf.String() +} + +// ParseDSN parses the DSN string to a Config +func ParseDSN(dsn string) (cfg *Config, err error) { + // New config with some default values + cfg = &Config{ + Loc: time.UTC, + Collation: defaultCollation, + } + + // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] + // Find the last '/' (since the password or the net addr might contain a '/') + foundSlash := false + for i := len(dsn) - 1; i >= 0; i-- { + if dsn[i] == '/' { + foundSlash = true + var j, k int + + // left part is empty if i <= 0 + if i > 0 { + // [username[:password]@][protocol[(address)]] + // Find the last '@' in dsn[:i] + for j = i; j >= 0; j-- { + if dsn[j] == '@' { + // username[:password] + // Find the first ':' in dsn[:j] + for k = 0; k < j; k++ { + if dsn[k] == ':' { + cfg.Passwd = dsn[k+1 : j] + break + } + } + cfg.User = dsn[:k] + + break + } + } + + // [protocol[(address)]] + // Find the first '(' in dsn[j+1:i] + for k = j + 1; k < i; k++ { + if dsn[k] == '(' { + // dsn[i-1] must be == ')' if an address is specified + if dsn[i-1] != ')' { + if strings.ContainsRune(dsn[k+1:i], ')') { + return nil, errInvalidDSNUnescaped + } + return nil, errInvalidDSNAddr + } + cfg.Addr = dsn[k+1 : i-1] + break + } + } + cfg.Net = dsn[j+1 : k] + } + + // dbname[?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + for j = i + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + cfg.DBName = dsn[i+1 : j] + + break + } + } + + if !foundSlash && len(dsn) > 0 { + return nil, errInvalidDSNNoSlash + } + + if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { + return nil, errInvalidDSNUnsafeCollation + } + + // Set default network if empty + if cfg.Net == "" { + cfg.Net = "tcp" + } + + // Set default address if empty + if cfg.Addr == "" { + switch cfg.Net { + case "tcp": + cfg.Addr = "127.0.0.1:3306" + case "unix": + cfg.Addr = "/tmp/mysql.sock" + default: + return nil, errors.New("default addr for network '" + cfg.Net + "' unknown") + } + + } + + return +} + +// parseDSNParams parses the DSN "query string" +// Values must be url.QueryEscape'ed +func parseDSNParams(cfg *Config, params string) (err error) { + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + + // cfg params + switch value := param[1]; param[0] { + + // Disable INFILE whitelist / enable all files + case "allowAllFiles": + var isBool bool + cfg.AllowAllFiles, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use cleartext authentication mode (MySQL 5.5.10+) + case "allowCleartextPasswords": + var isBool bool + cfg.AllowCleartextPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use native password authentication + case "allowNativePasswords": + var isBool bool + cfg.AllowNativePasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use old authentication mode (pre MySQL 4.1) + case "allowOldPasswords": + var isBool bool + cfg.AllowOldPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Switch "rowsAffected" mode + case "clientFoundRows": + var isBool bool + cfg.ClientFoundRows, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Collation + case "collation": + cfg.Collation = value + break + + case "columnsWithAlias": + var isBool bool + cfg.ColumnsWithAlias, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Compression + case "compress": + return errors.New("compression not implemented yet") + + // Enable client side placeholder substitution + case "interpolateParams": + var isBool bool + cfg.InterpolateParams, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Time Location + case "loc": + if value, err = url.QueryUnescape(value); err != nil { + return + } + cfg.Loc, err = time.LoadLocation(value) + if err != nil { + return + } + + // multiple statements in one query + case "multiStatements": + var isBool bool + cfg.MultiStatements, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // time.Time parsing + case "parseTime": + var isBool bool + cfg.ParseTime, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // I/O read Timeout + case "readTimeout": + cfg.ReadTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // Strict mode + case "strict": + var isBool bool + cfg.Strict, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Dial Timeout + case "timeout": + cfg.Timeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // TLS-Encryption + case "tls": + boolValue, isBool := readBool(value) + if isBool { + if boolValue { + cfg.TLSConfig = "true" + cfg.tls = &tls.Config{} + } else { + cfg.TLSConfig = "false" + } + } else if vl := strings.ToLower(value); vl == "skip-verify" { + cfg.TLSConfig = vl + cfg.tls = &tls.Config{InsecureSkipVerify: true} + } else { + name, err := url.QueryUnescape(value) + if err != nil { + return fmt.Errorf("invalid value for TLS config name: %v", err) + } + + if tlsConfig, ok := tlsConfigRegister[name]; ok { + if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { + host, _, err := net.SplitHostPort(cfg.Addr) + if err == nil { + tlsConfig.ServerName = host + } + } + + cfg.TLSConfig = name + cfg.tls = tlsConfig + } else { + return errors.New("invalid value / unknown config name: " + name) + } + } + + // I/O write Timeout + case "writeTimeout": + cfg.WriteTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + case "maxAllowedPacket": + cfg.MaxAllowedPacket, err = strconv.Atoi(value) + if err != nil { + return + } + default: + // lazy init + if cfg.Params == nil { + cfg.Params = make(map[string]string) + } + + if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil { + return + } + } + } + + return +} diff --git a/vendor/github.com/go-sql-driver/mysql/dsn_test.go b/vendor/github.com/go-sql-driver/mysql/dsn_test.go new file mode 100644 index 0000000..0693192 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/dsn_test.go @@ -0,0 +1,231 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "fmt" + "net/url" + "reflect" + "testing" + "time" +) + +var testDSNs = []struct { + in string + out *Config +}{{ + "username:password@protocol(address)/dbname?param=value", + &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true", + &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, ColumnsWithAlias: true}, +}, { + "username:password@protocol(address)/dbname?param=value&columnsWithAlias=true&multiStatements=true", + &Config{User: "username", Passwd: "password", Net: "protocol", Addr: "address", DBName: "dbname", Params: map[string]string{"param": "value"}, Collation: "utf8_general_ci", Loc: time.UTC, ColumnsWithAlias: true, MultiStatements: true}, +}, { + "user@unix(/path/to/socket)/dbname?charset=utf8", + &Config{User: "user", Net: "unix", Addr: "/path/to/socket", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true", + &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, TLSConfig: "true"}, +}, { + "user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify", + &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "localhost:5555", DBName: "dbname", Params: map[string]string{"charset": "utf8mb4,utf8"}, Collation: "utf8_general_ci", Loc: time.UTC, TLSConfig: "skip-verify"}, +}, { + "user:password@/dbname?loc=UTC&timeout=30s&readTimeout=1s&writeTimeout=1s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci&maxAllowedPacket=16777216", + &Config{User: "user", Passwd: "password", Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8mb4_unicode_ci", Loc: time.UTC, Timeout: 30 * time.Second, ReadTimeout: time.Second, WriteTimeout: time.Second, AllowAllFiles: true, AllowOldPasswords: true, ClientFoundRows: true, MaxAllowedPacket: 16777216}, +}, { + "user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local", + &Config{User: "user", Passwd: "p@ss(word)", Net: "tcp", Addr: "[de:ad:be:ef::ca:fe]:80", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.Local}, +}, { + "/dbname", + &Config{Net: "tcp", Addr: "127.0.0.1:3306", DBName: "dbname", Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "@/", + &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "/", + &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "", + &Config{Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "user:p@/ssword@/", + &Config{User: "user", Passwd: "p@/ssword", Net: "tcp", Addr: "127.0.0.1:3306", Collation: "utf8_general_ci", Loc: time.UTC}, +}, { + "unix/?arg=%2Fsome%2Fpath.ext", + &Config{Net: "unix", Addr: "/tmp/mysql.sock", Params: map[string]string{"arg": "/some/path.ext"}, Collation: "utf8_general_ci", Loc: time.UTC}, +}} + +func TestDSNParser(t *testing.T) { + for i, tst := range testDSNs { + cfg, err := ParseDSN(tst.in) + if err != nil { + t.Error(err.Error()) + } + + // pointer not static + cfg.tls = nil + + if !reflect.DeepEqual(cfg, tst.out) { + t.Errorf("%d. ParseDSN(%q) mismatch:\ngot %+v\nwant %+v", i, tst.in, cfg, tst.out) + } + } +} + +func TestDSNParserInvalid(t *testing.T) { + var invalidDSNs = []string{ + "@net(addr/", // no closing brace + "@tcp(/", // no closing brace + "tcp(/", // no closing brace + "(/", // no closing brace + "net(addr)//", // unescaped + "User:pass@tcp(1.2.3.4:3306)", // no trailing slash + //"/dbname?arg=/some/unescaped/path", + } + + for i, tst := range invalidDSNs { + if _, err := ParseDSN(tst); err == nil { + t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst) + } + } +} + +func TestDSNReformat(t *testing.T) { + for i, tst := range testDSNs { + dsn1 := tst.in + cfg1, err := ParseDSN(dsn1) + if err != nil { + t.Error(err.Error()) + continue + } + cfg1.tls = nil // pointer not static + res1 := fmt.Sprintf("%+v", cfg1) + + dsn2 := cfg1.FormatDSN() + cfg2, err := ParseDSN(dsn2) + if err != nil { + t.Error(err.Error()) + continue + } + cfg2.tls = nil // pointer not static + res2 := fmt.Sprintf("%+v", cfg2) + + if res1 != res2 { + t.Errorf("%d. %q does not match %q", i, res2, res1) + } + } +} + +func TestDSNWithCustomTLS(t *testing.T) { + baseDSN := "User:password@tcp(localhost:5555)/dbname?tls=" + tlsCfg := tls.Config{} + + RegisterTLSConfig("utils_test", &tlsCfg) + + // Custom TLS is missing + tst := baseDSN + "invalid_tls" + cfg, err := ParseDSN(tst) + if err == nil { + t.Errorf("invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg) + } + + tst = baseDSN + "utils_test" + + // Custom TLS with a server name + name := "foohost" + tlsCfg.ServerName = name + cfg, err = ParseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst) + } + + // Custom TLS without a server name + name = "localhost" + tlsCfg.ServerName = "" + cfg, err = ParseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("did not get the correct ServerName (%s) parsing DSN (%s).", name, tst) + } + + DeregisterTLSConfig("utils_test") +} + +func TestDSNWithCustomTLSQueryEscape(t *testing.T) { + const configKey = "&%!:" + dsn := "User:password@tcp(localhost:5555)/dbname?tls=" + url.QueryEscape(configKey) + name := "foohost" + tlsCfg := tls.Config{ServerName: name} + + RegisterTLSConfig(configKey, &tlsCfg) + + cfg, err := ParseDSN(dsn) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, dsn) + } +} + +func TestDSNUnsafeCollation(t *testing.T) { + _, err := ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true") + if err != errInvalidDSNUnsafeCollation { + t.Errorf("expected %v, got %v", errInvalidDSNUnsafeCollation, err) + } + + _, err = ParseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=gbk_chinese_ci") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=ascii_bin&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } + + _, err = ParseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true") + if err != nil { + t.Errorf("expected %v, got %v", nil, err) + } +} + +func BenchmarkParseDSN(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, tst := range testDSNs { + if _, err := ParseDSN(tst.in); err != nil { + b.Error(err.Error()) + } + } + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go new file mode 100644 index 0000000..857854e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/errors.go @@ -0,0 +1,132 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "errors" + "fmt" + "io" + "log" + "os" +) + +// Various errors the driver might return. Can change between driver versions. +var ( + ErrInvalidConn = errors.New("invalid connection") + ErrMalformPkt = errors.New("malformed packet") + ErrNoTLS = errors.New("TLS requested but server does not support TLS") + ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN") + ErrNativePassword = errors.New("this user requires mysql native password authentication.") + ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") + ErrUnknownPlugin = errors.New("this authentication plugin is not supported") + ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+") + ErrPktSync = errors.New("commands out of sync. You can't run this command now") + ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?") + ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server") + ErrBusyBuffer = errors.New("busy buffer") +) + +var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) + +// Logger is used to log critical error messages. +type Logger interface { + Print(v ...interface{}) +} + +// SetLogger is used to set the logger for critical errors. +// The initial logger is os.Stderr. +func SetLogger(logger Logger) error { + if logger == nil { + return errors.New("logger is nil") + } + errLog = logger + return nil +} + +// MySQLError is an error type which represents a single MySQL error +type MySQLError struct { + Number uint16 + Message string +} + +func (me *MySQLError) Error() string { + return fmt.Sprintf("Error %d: %s", me.Number, me.Message) +} + +// MySQLWarnings is an error type which represents a group of one or more MySQL +// warnings +type MySQLWarnings []MySQLWarning + +func (mws MySQLWarnings) Error() string { + var msg string + for i, warning := range mws { + if i > 0 { + msg += "\r\n" + } + msg += fmt.Sprintf( + "%s %s: %s", + warning.Level, + warning.Code, + warning.Message, + ) + } + return msg +} + +// MySQLWarning is an error type which represents a single MySQL warning. +// Warnings are returned in groups only. See MySQLWarnings +type MySQLWarning struct { + Level string + Code string + Message string +} + +func (mc *mysqlConn) getWarnings() (err error) { + rows, err := mc.Query("SHOW WARNINGS", nil) + if err != nil { + return + } + + var warnings = MySQLWarnings{} + var values = make([]driver.Value, 3) + + for { + err = rows.Next(values) + switch err { + case nil: + warning := MySQLWarning{} + + if raw, ok := values[0].([]byte); ok { + warning.Level = string(raw) + } else { + warning.Level = fmt.Sprintf("%s", values[0]) + } + if raw, ok := values[1].([]byte); ok { + warning.Code = string(raw) + } else { + warning.Code = fmt.Sprintf("%s", values[1]) + } + if raw, ok := values[2].([]byte); ok { + warning.Message = string(raw) + } else { + warning.Message = fmt.Sprintf("%s", values[0]) + } + + warnings = append(warnings, warning) + + case io.EOF: + return warnings + + default: + rows.Close() + return + } + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/errors_test.go b/vendor/github.com/go-sql-driver/mysql/errors_test.go new file mode 100644 index 0000000..96f9126 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/errors_test.go @@ -0,0 +1,42 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "log" + "testing" +) + +func TestErrorsSetLogger(t *testing.T) { + previous := errLog + defer func() { + errLog = previous + }() + + // set up logger + const expected = "prefix: test\n" + buffer := bytes.NewBuffer(make([]byte, 0, 64)) + logger := log.New(buffer, "prefix: ", 0) + + // print + SetLogger(logger) + errLog.Print("test") + + // check result + if actual := buffer.String(); actual != expected { + t.Errorf("expected %q, got %q", expected, actual) + } +} + +func TestErrorsStrictIgnoreNotes(t *testing.T) { + runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) { + dbt.mustExec("DROP TABLE IF EXISTS does_not_exist") + }) +} diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go new file mode 100644 index 0000000..547357c --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/infile.go @@ -0,0 +1,182 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "fmt" + "io" + "os" + "strings" + "sync" +) + +var ( + fileRegister map[string]bool + fileRegisterLock sync.RWMutex + readerRegister map[string]func() io.Reader + readerRegisterLock sync.RWMutex +) + +// RegisterLocalFile adds the given file to the file whitelist, +// so that it can be used by "LOAD DATA LOCAL INFILE ". +// Alternatively you can allow the use of all local files with +// the DSN parameter 'allowAllFiles=true' +// +// filePath := "/home/gopher/data.csv" +// mysql.RegisterLocalFile(filePath) +// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterLocalFile(filePath string) { + fileRegisterLock.Lock() + // lazy map init + if fileRegister == nil { + fileRegister = make(map[string]bool) + } + + fileRegister[strings.Trim(filePath, `"`)] = true + fileRegisterLock.Unlock() +} + +// DeregisterLocalFile removes the given filepath from the whitelist. +func DeregisterLocalFile(filePath string) { + fileRegisterLock.Lock() + delete(fileRegister, strings.Trim(filePath, `"`)) + fileRegisterLock.Unlock() +} + +// RegisterReaderHandler registers a handler function which is used +// to receive a io.Reader. +// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". +// If the handler returns a io.ReadCloser Close() is called when the +// request is finished. +// +// mysql.RegisterReaderHandler("data", func() io.Reader { +// var csvReader io.Reader // Some Reader that returns CSV data +// ... // Open Reader here +// return csvReader +// }) +// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterReaderHandler(name string, handler func() io.Reader) { + readerRegisterLock.Lock() + // lazy map init + if readerRegister == nil { + readerRegister = make(map[string]func() io.Reader) + } + + readerRegister[name] = handler + readerRegisterLock.Unlock() +} + +// DeregisterReaderHandler removes the ReaderHandler function with +// the given name from the registry. +func DeregisterReaderHandler(name string) { + readerRegisterLock.Lock() + delete(readerRegister, name) + readerRegisterLock.Unlock() +} + +func deferredClose(err *error, closer io.Closer) { + closeErr := closer.Close() + if *err == nil { + *err = closeErr + } +} + +func (mc *mysqlConn) handleInFileRequest(name string) (err error) { + var rdr io.Reader + var data []byte + packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP + if mc.maxWriteSize < packetSize { + packetSize = mc.maxWriteSize + } + + if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader + // The server might return an an absolute path. See issue #355. + name = name[idx+8:] + + readerRegisterLock.RLock() + handler, inMap := readerRegister[name] + readerRegisterLock.RUnlock() + + if inMap { + rdr = handler() + if rdr != nil { + if cl, ok := rdr.(io.Closer); ok { + defer deferredClose(&err, cl) + } + } else { + err = fmt.Errorf("Reader '%s' is ", name) + } + } else { + err = fmt.Errorf("Reader '%s' is not registered", name) + } + } else { // File + name = strings.Trim(name, `"`) + fileRegisterLock.RLock() + fr := fileRegister[name] + fileRegisterLock.RUnlock() + if mc.cfg.AllowAllFiles || fr { + var file *os.File + var fi os.FileInfo + + if file, err = os.Open(name); err == nil { + defer deferredClose(&err, file) + + // get file size + if fi, err = file.Stat(); err == nil { + rdr = file + if fileSize := int(fi.Size()); fileSize < packetSize { + packetSize = fileSize + } + } + } + } else { + err = fmt.Errorf("local file '%s' is not registered", name) + } + } + + // send content packets + if err == nil { + data := make([]byte, 4+packetSize) + var n int + for err == nil { + n, err = rdr.Read(data[4:]) + if n > 0 { + if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { + return ioErr + } + } + } + if err == io.EOF { + err = nil + } + } + + // send empty packet (termination) + if data == nil { + data = make([]byte, 4) + } + if ioErr := mc.writePacket(data[:4]); ioErr != nil { + return ioErr + } + + // read OK packet + if err == nil { + _, err = mc.readResultOK() + return err + } + + mc.readPacket() + return err +} diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go new file mode 100644 index 0000000..aafe979 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -0,0 +1,1287 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "time" +) + +// Packets documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +// Read packet to buffer 'data' +func (mc *mysqlConn) readPacket() ([]byte, error) { + var prevData []byte + for { + // read packet header + data, err := mc.buf.readNext(4) + if err != nil { + errLog.Print(err) + mc.Close() + return nil, driver.ErrBadConn + } + + // packet length [24 bit] + pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + + // check packet sync [8 bit] + if data[3] != mc.sequence { + if data[3] > mc.sequence { + return nil, ErrPktSyncMul + } + return nil, ErrPktSync + } + mc.sequence++ + + // packets with length 0 terminate a previous packet which is a + // multiple of (2^24)−1 bytes long + if pktLen == 0 { + // there was no previous packet + if prevData == nil { + errLog.Print(ErrMalformPkt) + mc.Close() + return nil, driver.ErrBadConn + } + + return prevData, nil + } + + // read packet body [pktLen bytes] + data, err = mc.buf.readNext(pktLen) + if err != nil { + errLog.Print(err) + mc.Close() + return nil, driver.ErrBadConn + } + + // return data if this was the last packet + if pktLen < maxPacketSize { + // zero allocations for non-split packets + if prevData == nil { + return data, nil + } + + return append(prevData, data...), nil + } + + prevData = append(prevData, data...) + } +} + +// Write packet buffer 'data' +func (mc *mysqlConn) writePacket(data []byte) error { + pktLen := len(data) - 4 + + if pktLen > mc.maxAllowedPacket { + return ErrPktTooLarge + } + + for { + var size int + if pktLen >= maxPacketSize { + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + size = maxPacketSize + } else { + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + size = pktLen + } + data[3] = mc.sequence + + // Write packet + if mc.writeTimeout > 0 { + if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil { + return err + } + } + + n, err := mc.netConn.Write(data[:4+size]) + if err == nil && n == 4+size { + mc.sequence++ + if size != maxPacketSize { + return nil + } + pktLen -= size + data = data[size:] + continue + } + + // Handle error + if err == nil { // n != len(data) + errLog.Print(ErrMalformPkt) + } else { + errLog.Print(err) + } + return driver.ErrBadConn + } +} + +/****************************************************************************** +* Initialisation Process * +******************************************************************************/ + +// Handshake Initialization Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake +func (mc *mysqlConn) readInitPacket() ([]byte, error) { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + if data[0] == iERR { + return nil, mc.handleErrorPacket(data) + } + + // protocol version [1 byte] + if data[0] < minProtocolVersion { + return nil, fmt.Errorf( + "unsupported protocol version %d. Version %d or higher is required", + data[0], + minProtocolVersion, + ) + } + + // server version [null terminated string] + // connection id [4 bytes] + pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 + + // first part of the password cipher [8 bytes] + cipher := data[pos : pos+8] + + // (filler) always 0x00 [1 byte] + pos += 8 + 1 + + // capability flags (lower 2 bytes) [2 bytes] + mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + if mc.flags&clientProtocol41 == 0 { + return nil, ErrOldProtocol + } + if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { + return nil, ErrNoTLS + } + pos += 2 + + if len(data) > pos { + // character set [1 byte] + // status flags [2 bytes] + // capability flags (upper 2 bytes) [2 bytes] + // length of auth-plugin-data [1 byte] + // reserved (all [00]) [10 bytes] + pos += 1 + 2 + 2 + 1 + 10 + + // second part of the password cipher [mininum 13 bytes], + // where len=MAX(13, length of auth-plugin-data - 8) + // + // The web documentation is ambiguous about the length. However, + // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, + // the 13th byte is "\0 byte, terminating the second part of + // a scramble". So the second part of the password cipher is + // a NULL terminated string that's at least 13 bytes with the + // last byte being NULL. + // + // The official Python library uses the fixed length 12 + // which seems to work but technically could have a hidden bug. + cipher = append(cipher, data[pos:pos+12]...) + + // TODO: Verify string termination + // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) + // \NUL otherwise + // + //if data[len(data)-1] == 0 { + // return + //} + //return ErrMalformPkt + + // make a memory safe copy of the cipher slice + var b [20]byte + copy(b[:], cipher) + return b[:], nil + } + + // make a memory safe copy of the cipher slice + var b [8]byte + copy(b[:], cipher) + return b[:], nil +} + +// Client Authentication Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse +func (mc *mysqlConn) writeAuthPacket(cipher []byte) error { + // Adjust client flags based on server support + clientFlags := clientProtocol41 | + clientSecureConn | + clientLongPassword | + clientTransactions | + clientLocalFiles | + clientPluginAuth | + clientMultiResults | + mc.flags&clientLongFlag + + if mc.cfg.ClientFoundRows { + clientFlags |= clientFoundRows + } + + // To enable TLS / SSL + if mc.cfg.tls != nil { + clientFlags |= clientSSL + } + + if mc.cfg.MultiStatements { + clientFlags |= clientMultiStatements + } + + // User Password + scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd)) + + pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + 1 + len(scrambleBuff) + 21 + 1 + + // To specify a db name + if n := len(mc.cfg.DBName); n > 0 { + clientFlags |= clientConnectWithDB + pktLen += n + 1 + } + + // Calculate packet length and get buffer with that size + data := mc.buf.takeSmallBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // ClientFlags [32 bit] + data[4] = byte(clientFlags) + data[5] = byte(clientFlags >> 8) + data[6] = byte(clientFlags >> 16) + data[7] = byte(clientFlags >> 24) + + // MaxPacketSize [32 bit] (none) + data[8] = 0x00 + data[9] = 0x00 + data[10] = 0x00 + data[11] = 0x00 + + // Charset [1 byte] + var found bool + data[12], found = collations[mc.cfg.Collation] + if !found { + // Note possibility for false negatives: + // could be triggered although the collation is valid if the + // collations map does not contain entries the server supports. + return errors.New("unknown collation") + } + + // SSL Connection Request Packet + // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest + if mc.cfg.tls != nil { + // Send TLS / SSL request packet + if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { + return err + } + + // Switch to TLS + tlsConn := tls.Client(mc.netConn, mc.cfg.tls) + if err := tlsConn.Handshake(); err != nil { + return err + } + mc.netConn = tlsConn + mc.buf.nc = tlsConn + } + + // Filler [23 bytes] (all 0x00) + pos := 13 + for ; pos < 13+23; pos++ { + data[pos] = 0 + } + + // User [null terminated string] + if len(mc.cfg.User) > 0 { + pos += copy(data[pos:], mc.cfg.User) + } + data[pos] = 0x00 + pos++ + + // ScrambleBuffer [length encoded integer] + data[pos] = byte(len(scrambleBuff)) + pos += 1 + copy(data[pos+1:], scrambleBuff) + + // Databasename [null terminated string] + if len(mc.cfg.DBName) > 0 { + pos += copy(data[pos:], mc.cfg.DBName) + data[pos] = 0x00 + pos++ + } + + // Assume native client during response + pos += copy(data[pos:], "mysql_native_password") + data[pos] = 0x00 + + // Send Auth packet + return mc.writePacket(data) +} + +// Client old authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error { + // User password + scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.Passwd)) + + // Calculate the packet length and add a tailing 0 + pktLen := len(scrambleBuff) + 1 + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add the scrambled password [null terminated string] + copy(data[4:], scrambleBuff) + data[4+pktLen-1] = 0x00 + + return mc.writePacket(data) +} + +// Client clear text authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeClearAuthPacket() error { + // Calculate the packet length and add a tailing 0 + pktLen := len(mc.cfg.Passwd) + 1 + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add the clear password [null terminated string] + copy(data[4:], mc.cfg.Passwd) + data[4+pktLen-1] = 0x00 + + return mc.writePacket(data) +} + +// Native password authentication method +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error { + scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd)) + + // Calculate the packet length and add a tailing 0 + pktLen := len(scrambleBuff) + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add the scramble + copy(data[4:], scrambleBuff) + + return mc.writePacket(data) +} + +/****************************************************************************** +* Command Packets * +******************************************************************************/ + +func (mc *mysqlConn) writeCommandPacket(command byte) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { + // Reset Packet Sequence + mc.sequence = 0 + + pktLen := 1 + len(arg) + data := mc.buf.takeBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Add arg + copy(data[5:], arg) + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1 + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Add arg [32 bit] + data[5] = byte(arg) + data[6] = byte(arg >> 8) + data[7] = byte(arg >> 16) + data[8] = byte(arg >> 24) + + // Send CMD packet + return mc.writePacket(data) +} + +/****************************************************************************** +* Result Packets * +******************************************************************************/ + +// Returns error if Packet is not an 'Result OK'-Packet +func (mc *mysqlConn) readResultOK() ([]byte, error) { + data, err := mc.readPacket() + if err == nil { + // packet indicator + switch data[0] { + + case iOK: + return nil, mc.handleOkPacket(data) + + case iEOF: + if len(data) > 1 { + pluginEndIndex := bytes.IndexByte(data, 0x00) + plugin := string(data[1:pluginEndIndex]) + cipher := data[pluginEndIndex+1 : len(data)-1] + + if plugin == "mysql_old_password" { + // using old_passwords + return cipher, ErrOldPassword + } else if plugin == "mysql_clear_password" { + // using clear text password + return cipher, ErrCleartextPassword + } else if plugin == "mysql_native_password" { + // using mysql default authentication method + return cipher, ErrNativePassword + } else { + return cipher, ErrUnknownPlugin + } + } else { + // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest + return nil, ErrOldPassword + } + + default: // Error otherwise + return nil, mc.handleErrorPacket(data) + } + } + return nil, err +} + +// Result Set Header Packet +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { + data, err := mc.readPacket() + if err == nil { + switch data[0] { + + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + num, _, n := readLengthEncodedInteger(data) + if n-len(data) == 0 { + return int(num), nil + } + + return 0, ErrMalformPkt + } + return 0, err +} + +// Error Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet +func (mc *mysqlConn) handleErrorPacket(data []byte) error { + if data[0] != iERR { + return ErrMalformPkt + } + + // 0xff [1 byte] + + // Error Number [16 bit uint] + errno := binary.LittleEndian.Uint16(data[1:3]) + + pos := 3 + + // SQL State [optional: # + 5bytes string] + if data[3] == 0x23 { + //sqlstate := string(data[4 : 4+5]) + pos = 9 + } + + // Error Message [string] + return &MySQLError{ + Number: errno, + Message: string(data[pos:]), + } +} + +func readStatus(b []byte) statusFlag { + return statusFlag(b[0]) | statusFlag(b[1])<<8 +} + +// Ok Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet +func (mc *mysqlConn) handleOkPacket(data []byte) error { + var n, m int + + // 0x00 [1 byte] + + // Affected rows [Length Coded Binary] + mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) + + // Insert id [Length Coded Binary] + mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) + + // server_status [2 bytes] + mc.status = readStatus(data[1+n+m : 1+n+m+2]) + if err := mc.discardResults(); err != nil { + return err + } + + // warning count [2 bytes] + if !mc.strict { + return nil + } + + pos := 1 + n + m + 2 + if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 { + return mc.getWarnings() + } + return nil +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 +func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { + columns := make([]mysqlField, count) + + for i := 0; ; i++ { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + // EOF Packet + if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { + if i == count { + return columns, nil + } + return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns)) + } + + // Catalog + pos, err := skipLengthEncodedString(data) + if err != nil { + return nil, err + } + + // Database [len coded string] + n, err := skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Table [len coded string] + if mc.cfg.ColumnsWithAlias { + tableName, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + columns[i].tableName = string(tableName) + } else { + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + } + + // Original table [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Name [len coded string] + name, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + columns[i].name = string(name) + pos += n + + // Original name [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + + // Filler [uint8] + // Charset [charset, collation uint8] + // Length [uint32] + pos += n + 1 + 2 + 4 + + // Field type [uint8] + columns[i].fieldType = data[pos] + pos++ + + // Flags [uint16] + columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + pos += 2 + + // Decimals [uint8] + columns[i].decimals = data[pos] + //pos++ + + // Default value [len coded binary] + //if pos < len(data) { + // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) + //} + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (rows *textRows) readRow(dest []driver.Value) error { + mc := rows.mc + + data, err := mc.readPacket() + if err != nil { + return err + } + + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + // server_status [2 bytes] + rows.mc.status = readStatus(data[3:]) + err = rows.mc.discardResults() + if err == nil { + err = io.EOF + } else { + // connection unusable + rows.mc.Close() + } + rows.mc = nil + return err + } + if data[0] == iERR { + rows.mc = nil + return mc.handleErrorPacket(data) + } + + // RowSet Packet + var n int + var isNull bool + pos := 0 + + for i := range dest { + // Read bytes and convert to string + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + if !mc.parseTime { + continue + } else { + switch rows.columns[i].fieldType { + case fieldTypeTimestamp, fieldTypeDateTime, + fieldTypeDate, fieldTypeNewDate: + dest[i], err = parseDateTime( + string(dest[i].([]byte)), + mc.cfg.Loc, + ) + if err == nil { + continue + } + default: + continue + } + } + + } else { + dest[i] = nil + continue + } + } + return err // err != nil + } + + return nil +} + +// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read +func (mc *mysqlConn) readUntilEOF() error { + for { + data, err := mc.readPacket() + if err != nil { + return err + } + + switch data[0] { + case iERR: + return mc.handleErrorPacket(data) + case iEOF: + if len(data) == 5 { + mc.status = readStatus(data[3:]) + } + return nil + } + } +} + +/****************************************************************************** +* Prepared Statements * +******************************************************************************/ + +// Prepare Result Packets +// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html +func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { + data, err := stmt.mc.readPacket() + if err == nil { + // packet indicator [1 byte] + if data[0] != iOK { + return 0, stmt.mc.handleErrorPacket(data) + } + + // statement id [4 bytes] + stmt.id = binary.LittleEndian.Uint32(data[1:5]) + + // Column count [16 bit uint] + columnCount := binary.LittleEndian.Uint16(data[5:7]) + + // Param count [16 bit uint] + stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) + + // Reserved [8 bit] + + // Warning count [16 bit uint] + if !stmt.mc.strict { + return columnCount, nil + } + + // Check for warnings count > 0, only available in MySQL > 4.1 + if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 { + return columnCount, stmt.mc.getWarnings() + } + return columnCount, nil + } + return 0, err +} + +// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html +func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { + maxLen := stmt.mc.maxAllowedPacket - 1 + pktLen := maxLen + + // After the header (bytes 0-3) follows before the data: + // 1 byte command + // 4 bytes stmtID + // 2 bytes paramID + const dataOffset = 1 + 4 + 2 + + // Can not use the write buffer since + // a) the buffer is too small + // b) it is in use + data := make([]byte, 4+1+4+2+len(arg)) + + copy(data[4+dataOffset:], arg) + + for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { + if dataOffset+argLen < maxLen { + pktLen = dataOffset + argLen + } + + stmt.mc.sequence = 0 + // Add command byte [1 byte] + data[4] = comStmtSendLongData + + // Add stmtID [32 bit] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // Add paramID [16 bit] + data[9] = byte(paramID) + data[10] = byte(paramID >> 8) + + // Send CMD packet + err := stmt.mc.writePacket(data[:4+pktLen]) + if err == nil { + data = data[pktLen-dataOffset:] + continue + } + return err + + } + + // Reset Packet Sequence + stmt.mc.sequence = 0 + return nil +} + +// Execute Prepared Statement +// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html +func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { + if len(args) != stmt.paramCount { + return fmt.Errorf( + "argument count mismatch (got: %d; has: %d)", + len(args), + stmt.paramCount, + ) + } + + const minPktLen = 4 + 1 + 4 + 1 + 4 + mc := stmt.mc + + // Reset packet-sequence + mc.sequence = 0 + + var data []byte + + if len(args) == 0 { + data = mc.buf.takeBuffer(minPktLen) + } else { + data = mc.buf.takeCompleteBuffer() + } + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // command [1 byte] + data[4] = comStmtExecute + + // statement_id [4 bytes] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] + data[9] = 0x00 + + // iteration_count (uint32(1)) [4 bytes] + data[10] = 0x01 + data[11] = 0x00 + data[12] = 0x00 + data[13] = 0x00 + + if len(args) > 0 { + pos := minPktLen + + var nullMask []byte + if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { + // buffer has to be extended but we don't know by how much so + // we depend on append after all data with known sizes fit. + // We stop at that because we deal with a lot of columns here + // which makes the required allocation size hard to guess. + tmp := make([]byte, pos+maskLen+typesLen) + copy(tmp[:pos], data[:pos]) + data = tmp + nullMask = data[pos : pos+maskLen] + pos += maskLen + } else { + nullMask = data[pos : pos+maskLen] + for i := 0; i < maskLen; i++ { + nullMask[i] = 0 + } + pos += maskLen + } + + // newParameterBoundFlag 1 [1 byte] + data[pos] = 0x01 + pos++ + + // type of each parameter [len(args)*2 bytes] + paramTypes := data[pos:] + pos += len(args) * 2 + + // value of each parameter [n bytes] + paramValues := data[pos:pos] + valuesCap := cap(paramValues) + + for i, arg := range args { + // build NULL-bitmap + if arg == nil { + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + continue + } + + // cache types and values + switch v := arg.(type) { + case int64: + paramTypes[i+i] = fieldTypeLongLong + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case float64: + paramTypes[i+i] = fieldTypeDouble + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + math.Float64bits(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(math.Float64bits(v))..., + ) + } + + case bool: + paramTypes[i+i] = fieldTypeTiny + paramTypes[i+i+1] = 0x00 + + if v { + paramValues = append(paramValues, 0x01) + } else { + paramValues = append(paramValues, 0x00) + } + + case []byte: + // Common case (non-nil value) first + if v != nil { + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, v); err != nil { + return err + } + } + continue + } + + // Handle []byte(nil) as a NULL value + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + + case string: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { + return err + } + } + + case time.Time: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + var val []byte + if v.IsZero() { + val = []byte("0000-00-00") + } else { + val = []byte(v.In(mc.cfg.Loc).Format(timeFormat)) + } + + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(val)), + ) + paramValues = append(paramValues, val...) + + default: + return fmt.Errorf("can not convert type: %T", arg) + } + } + + // Check if param values exceeded the available buffer + // In that case we must build the data packet with the new values buffer + if valuesCap != cap(paramValues) { + data = append(data[:pos], paramValues...) + mc.buf.buf = data + } + + pos += len(paramValues) + data = data[:pos] + } + + return mc.writePacket(data) +} + +func (mc *mysqlConn) discardResults() error { + for mc.status&statusMoreResultsExists != 0 { + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return err + } + if resLen > 0 { + // columns + if err := mc.readUntilEOF(); err != nil { + return err + } + // rows + if err := mc.readUntilEOF(); err != nil { + return err + } + } else { + mc.status &^= statusMoreResultsExists + } + } + return nil +} + +// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html +func (rows *binaryRows) readRow(dest []driver.Value) error { + data, err := rows.mc.readPacket() + if err != nil { + return err + } + + // packet indicator [1 byte] + if data[0] != iOK { + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + rows.mc.status = readStatus(data[3:]) + err = rows.mc.discardResults() + if err == nil { + err = io.EOF + } else { + // connection unusable + rows.mc.Close() + } + rows.mc = nil + return err + } + rows.mc = nil + + // Error otherwise + return rows.mc.handleErrorPacket(data) + } + + // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] + pos := 1 + (len(dest)+7+2)>>3 + nullMask := data[1:pos] + + for i := range dest { + // Field is NULL + // (byte >> bit-pos) % 2 == 1 + if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { + dest[i] = nil + continue + } + + // Convert to byte-coded string + switch rows.columns[i].fieldType { + case fieldTypeNULL: + dest[i] = nil + continue + + // Numeric Types + case fieldTypeTiny: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(data[pos]) + } else { + dest[i] = int64(int8(data[pos])) + } + pos++ + continue + + case fieldTypeShort, fieldTypeYear: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) + } else { + dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) + } + pos += 2 + continue + + case fieldTypeInt24, fieldTypeLong: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) + } else { + dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) + } + pos += 4 + continue + + case fieldTypeLongLong: + if rows.columns[i].flags&flagUnsigned != 0 { + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if val > math.MaxInt64 { + dest[i] = uint64ToString(val) + } else { + dest[i] = int64(val) + } + } else { + dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + pos += 8 + continue + + case fieldTypeFloat: + dest[i] = float32(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))) + pos += 4 + continue + + case fieldTypeDouble: + dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) + pos += 8 + continue + + // Length coded Binary Strings + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON: + var isNull bool + var n int + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + continue + } else { + dest[i] = nil + continue + } + } + return err + + case + fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD + fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] + fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] + + num, isNull, n := readLengthEncodedInteger(data[pos:]) + pos += n + + switch { + case isNull: + dest[i] = nil + continue + case rows.columns[i].fieldType == fieldTypeTime: + // database/sql does not support an equivalent to TIME, return a string + var dstlen uint8 + switch decimals := rows.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 8 + case 1, 2, 3, 4, 5, 6: + dstlen = 8 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.columns[i].decimals, + ) + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true) + case rows.mc.parseTime: + dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc) + default: + var dstlen uint8 + if rows.columns[i].fieldType == fieldTypeDate { + dstlen = 10 + } else { + switch decimals := rows.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 19 + case 1, 2, 3, 4, 5, 6: + dstlen = 19 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.columns[i].decimals, + ) + } + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false) + } + + if err == nil { + pos += int(num) + continue + } else { + return err + } + + // Please report if this happens! + default: + return fmt.Errorf("unknown field type %d", rows.columns[i].fieldType) + } + } + + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/packets_test.go b/vendor/github.com/go-sql-driver/mysql/packets_test.go new file mode 100644 index 0000000..9840458 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/packets_test.go @@ -0,0 +1,282 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "errors" + "net" + "testing" + "time" +) + +var ( + errConnClosed = errors.New("connection is closed") + errConnTooManyReads = errors.New("too many reads") + errConnTooManyWrites = errors.New("too many writes") +) + +// struct to mock a net.Conn for testing purposes +type mockConn struct { + laddr net.Addr + raddr net.Addr + data []byte + closed bool + read int + written int + reads int + writes int + maxReads int + maxWrites int +} + +func (m *mockConn) Read(b []byte) (n int, err error) { + if m.closed { + return 0, errConnClosed + } + + m.reads++ + if m.maxReads > 0 && m.reads > m.maxReads { + return 0, errConnTooManyReads + } + + n = copy(b, m.data) + m.read += n + m.data = m.data[n:] + return +} +func (m *mockConn) Write(b []byte) (n int, err error) { + if m.closed { + return 0, errConnClosed + } + + m.writes++ + if m.maxWrites > 0 && m.writes > m.maxWrites { + return 0, errConnTooManyWrites + } + + n = len(b) + m.written += n + return +} +func (m *mockConn) Close() error { + m.closed = true + return nil +} +func (m *mockConn) LocalAddr() net.Addr { + return m.laddr +} +func (m *mockConn) RemoteAddr() net.Addr { + return m.raddr +} +func (m *mockConn) SetDeadline(t time.Time) error { + return nil +} +func (m *mockConn) SetReadDeadline(t time.Time) error { + return nil +} +func (m *mockConn) SetWriteDeadline(t time.Time) error { + return nil +} + +// make sure mockConn implements the net.Conn interface +var _ net.Conn = new(mockConn) + +func TestReadPacketSingleByte(t *testing.T) { + conn := new(mockConn) + mc := &mysqlConn{ + buf: newBuffer(conn), + } + + conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff} + conn.maxReads = 1 + packet, err := mc.readPacket() + if err != nil { + t.Fatal(err) + } + if len(packet) != 1 { + t.Fatalf("unexpected packet lenght: expected %d, got %d", 1, len(packet)) + } + if packet[0] != 0xff { + t.Fatalf("unexpected packet content: expected %x, got %x", 0xff, packet[0]) + } +} + +func TestReadPacketWrongSequenceID(t *testing.T) { + conn := new(mockConn) + mc := &mysqlConn{ + buf: newBuffer(conn), + } + + // too low sequence id + conn.data = []byte{0x01, 0x00, 0x00, 0x00, 0xff} + conn.maxReads = 1 + mc.sequence = 1 + _, err := mc.readPacket() + if err != ErrPktSync { + t.Errorf("expected ErrPktSync, got %v", err) + } + + // reset + conn.reads = 0 + mc.sequence = 0 + mc.buf = newBuffer(conn) + + // too high sequence id + conn.data = []byte{0x01, 0x00, 0x00, 0x42, 0xff} + _, err = mc.readPacket() + if err != ErrPktSyncMul { + t.Errorf("expected ErrPktSyncMul, got %v", err) + } +} + +func TestReadPacketSplit(t *testing.T) { + conn := new(mockConn) + mc := &mysqlConn{ + buf: newBuffer(conn), + } + + data := make([]byte, maxPacketSize*2+4*3) + const pkt2ofs = maxPacketSize + 4 + const pkt3ofs = 2 * (maxPacketSize + 4) + + // case 1: payload has length maxPacketSize + data = data[:pkt2ofs+4] + + // 1st packet has maxPacketSize length and sequence id 0 + // ff ff ff 00 ... + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + + // mark the payload start and end of 1st packet so that we can check if the + // content was correctly appended + data[4] = 0x11 + data[maxPacketSize+3] = 0x22 + + // 2nd packet has payload length 0 and squence id 1 + // 00 00 00 01 + data[pkt2ofs+3] = 0x01 + + conn.data = data + conn.maxReads = 3 + packet, err := mc.readPacket() + if err != nil { + t.Fatal(err) + } + if len(packet) != maxPacketSize { + t.Fatalf("unexpected packet lenght: expected %d, got %d", maxPacketSize, len(packet)) + } + if packet[0] != 0x11 { + t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0]) + } + if packet[maxPacketSize-1] != 0x22 { + t.Fatalf("unexpected payload end: expected %x, got %x", 0x22, packet[maxPacketSize-1]) + } + + // case 2: payload has length which is a multiple of maxPacketSize + data = data[:cap(data)] + + // 2nd packet now has maxPacketSize length + data[pkt2ofs] = 0xff + data[pkt2ofs+1] = 0xff + data[pkt2ofs+2] = 0xff + + // mark the payload start and end of the 2nd packet + data[pkt2ofs+4] = 0x33 + data[pkt2ofs+maxPacketSize+3] = 0x44 + + // 3rd packet has payload length 0 and squence id 2 + // 00 00 00 02 + data[pkt3ofs+3] = 0x02 + + conn.data = data + conn.reads = 0 + conn.maxReads = 5 + mc.sequence = 0 + packet, err = mc.readPacket() + if err != nil { + t.Fatal(err) + } + if len(packet) != 2*maxPacketSize { + t.Fatalf("unexpected packet lenght: expected %d, got %d", 2*maxPacketSize, len(packet)) + } + if packet[0] != 0x11 { + t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0]) + } + if packet[2*maxPacketSize-1] != 0x44 { + t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[2*maxPacketSize-1]) + } + + // case 3: payload has a length larger maxPacketSize, which is not an exact + // multiple of it + data = data[:pkt2ofs+4+42] + data[pkt2ofs] = 0x2a + data[pkt2ofs+1] = 0x00 + data[pkt2ofs+2] = 0x00 + data[pkt2ofs+4+41] = 0x44 + + conn.data = data + conn.reads = 0 + conn.maxReads = 4 + mc.sequence = 0 + packet, err = mc.readPacket() + if err != nil { + t.Fatal(err) + } + if len(packet) != maxPacketSize+42 { + t.Fatalf("unexpected packet lenght: expected %d, got %d", maxPacketSize+42, len(packet)) + } + if packet[0] != 0x11 { + t.Fatalf("unexpected payload start: expected %x, got %x", 0x11, packet[0]) + } + if packet[maxPacketSize+41] != 0x44 { + t.Fatalf("unexpected payload end: expected %x, got %x", 0x44, packet[maxPacketSize+41]) + } +} + +func TestReadPacketFail(t *testing.T) { + conn := new(mockConn) + mc := &mysqlConn{ + buf: newBuffer(conn), + } + + // illegal empty (stand-alone) packet + conn.data = []byte{0x00, 0x00, 0x00, 0x00} + conn.maxReads = 1 + _, err := mc.readPacket() + if err != driver.ErrBadConn { + t.Errorf("expected ErrBadConn, got %v", err) + } + + // reset + conn.reads = 0 + mc.sequence = 0 + mc.buf = newBuffer(conn) + + // fail to read header + conn.closed = true + _, err = mc.readPacket() + if err != driver.ErrBadConn { + t.Errorf("expected ErrBadConn, got %v", err) + } + + // reset + conn.closed = false + conn.reads = 0 + mc.sequence = 0 + mc.buf = newBuffer(conn) + + // fail to read body + conn.maxReads = 1 + _, err = mc.readPacket() + if err != driver.ErrBadConn { + t.Errorf("expected ErrBadConn, got %v", err) + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go new file mode 100644 index 0000000..c6438d0 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/result.go @@ -0,0 +1,22 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlResult struct { + affectedRows int64 + insertId int64 +} + +func (res *mysqlResult) LastInsertId() (int64, error) { + return res.insertId, nil +} + +func (res *mysqlResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go new file mode 100644 index 0000000..c08255e --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/rows.go @@ -0,0 +1,112 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" +) + +type mysqlField struct { + tableName string + name string + flags fieldFlag + fieldType byte + decimals byte +} + +type mysqlRows struct { + mc *mysqlConn + columns []mysqlField +} + +type binaryRows struct { + mysqlRows +} + +type textRows struct { + mysqlRows +} + +type emptyRows struct{} + +func (rows *mysqlRows) Columns() []string { + columns := make([]string, len(rows.columns)) + if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias { + for i := range columns { + if tableName := rows.columns[i].tableName; len(tableName) > 0 { + columns[i] = tableName + "." + rows.columns[i].name + } else { + columns[i] = rows.columns[i].name + } + } + } else { + for i := range columns { + columns[i] = rows.columns[i].name + } + } + return columns +} + +func (rows *mysqlRows) Close() error { + mc := rows.mc + if mc == nil { + return nil + } + if mc.netConn == nil { + return ErrInvalidConn + } + + // Remove unread packets from stream + err := mc.readUntilEOF() + if err == nil { + if err = mc.discardResults(); err != nil { + return err + } + } + + rows.mc = nil + return err +} + +func (rows *binaryRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return ErrInvalidConn + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows *textRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return ErrInvalidConn + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows emptyRows) Columns() []string { + return nil +} + +func (rows emptyRows) Close() error { + return nil +} + +func (rows emptyRows) Next(dest []driver.Value) error { + return io.EOF +} diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go new file mode 100644 index 0000000..7f9b045 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/statement.go @@ -0,0 +1,153 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "reflect" + "strconv" +) + +type mysqlStmt struct { + mc *mysqlConn + id uint32 + paramCount int + columns []mysqlField // cached from the first query +} + +func (stmt *mysqlStmt) Close() error { + if stmt.mc == nil || stmt.mc.netConn == nil { + // driver.Stmt.Close can be called more than once, thus this function + // has to be idempotent. + // See also Issue #450 and golang/go#16019. + //errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) + stmt.mc = nil + return err +} + +func (stmt *mysqlStmt) NumInput() int { + return stmt.paramCount +} + +func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { + return converter{} +} + +func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { + if stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + mc.affectedRows = 0 + mc.insertId = 0 + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + if resLen > 0 { + // Columns + err = mc.readUntilEOF() + if err != nil { + return nil, err + } + + // Rows + err = mc.readUntilEOF() + } + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, nil + } + } + + return nil, err +} + +func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { + if stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + rows := new(binaryRows) + + if resLen > 0 { + rows.mc = mc + // Columns + // If not cached, read them and cache them + if stmt.columns == nil { + rows.columns, err = mc.readColumns(resLen) + stmt.columns = rows.columns + } else { + rows.columns = stmt.columns + err = mc.readUntilEOF() + } + } + + return rows, err +} + +type converter struct{} + +func (c converter) ConvertValue(v interface{}) (driver.Value, error) { + if driver.IsValue(v) { + return v, nil + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Ptr: + // indirect pointers + if rv.IsNil() { + return nil, nil + } + return c.ConvertValue(rv.Elem().Interface()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(rv.Uint()), nil + case reflect.Uint64: + u64 := rv.Uint() + if u64 >= 1<<63 { + return strconv.FormatUint(u64, 10), nil + } + return int64(u64), nil + case reflect.Float32, reflect.Float64: + return rv.Float(), nil + } + return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) +} diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go new file mode 100644 index 0000000..33c749b --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/transaction.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlTx struct { + mc *mysqlConn +} + +func (tx *mysqlTx) Commit() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return ErrInvalidConn + } + err = tx.mc.exec("COMMIT") + tx.mc = nil + return +} + +func (tx *mysqlTx) Rollback() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return ErrInvalidConn + } + err = tx.mc.exec("ROLLBACK") + tx.mc = nil + return +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go new file mode 100644 index 0000000..d523b7f --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils.go @@ -0,0 +1,740 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/sha1" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "fmt" + "io" + "strings" + "time" +) + +var ( + tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs +) + +// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. +// Use the key as a value in the DSN where tls=value. +// +// rootCertPool := x509.NewCertPool() +// pem, err := ioutil.ReadFile("/path/ca-cert.pem") +// if err != nil { +// log.Fatal(err) +// } +// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { +// log.Fatal("Failed to append PEM.") +// } +// clientCert := make([]tls.Certificate, 0, 1) +// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") +// if err != nil { +// log.Fatal(err) +// } +// clientCert = append(clientCert, certs) +// mysql.RegisterTLSConfig("custom", &tls.Config{ +// RootCAs: rootCertPool, +// Certificates: clientCert, +// }) +// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") +// +func RegisterTLSConfig(key string, config *tls.Config) error { + if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { + return fmt.Errorf("key '%s' is reserved", key) + } + + if tlsConfigRegister == nil { + tlsConfigRegister = make(map[string]*tls.Config) + } + + tlsConfigRegister[key] = config + return nil +} + +// DeregisterTLSConfig removes the tls.Config associated with key. +func DeregisterTLSConfig(key string) { + if tlsConfigRegister != nil { + delete(tlsConfigRegister, key) + } +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} + +/****************************************************************************** +* Authentication * +******************************************************************************/ + +// Encrypt password using 4.1+ method +func scramblePassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write(password) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + hash := crypt.Sum(nil) + + // outer Hash + crypt.Reset() + crypt.Write(scramble) + crypt.Write(hash) + scramble = crypt.Sum(nil) + + // token = scrambleHash XOR stage1Hash + for i := range scramble { + scramble[i] ^= stage1[i] + } + return scramble +} + +// Encrypt password using pre 4.1 (old password) method +// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c +type myRnd struct { + seed1, seed2 uint32 +} + +const myRndMaxVal = 0x3FFFFFFF + +// Pseudo random number generator +func newMyRnd(seed1, seed2 uint32) *myRnd { + return &myRnd{ + seed1: seed1 % myRndMaxVal, + seed2: seed2 % myRndMaxVal, + } +} + +// Tested to be equivalent to MariaDB's floating point variant +// http://play.golang.org/p/QHvhd4qved +// http://play.golang.org/p/RG0q4ElWDx +func (r *myRnd) NextByte() byte { + r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal + r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal + + return byte(uint64(r.seed1) * 31 / myRndMaxVal) +} + +// Generate binary hash from byte string using insecure pre 4.1 method +func pwHash(password []byte) (result [2]uint32) { + var add uint32 = 7 + var tmp uint32 + + result[0] = 1345345333 + result[1] = 0x12345671 + + for _, c := range password { + // skip spaces and tabs in password + if c == ' ' || c == '\t' { + continue + } + + tmp = uint32(c) + result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) + result[1] += (result[1] << 8) ^ result[0] + add += tmp + } + + // Remove sign bit (1<<31)-1) + result[0] &= 0x7FFFFFFF + result[1] &= 0x7FFFFFFF + + return +} + +// Encrypt password using insecure pre 4.1 method +func scrambleOldPassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + scramble = scramble[:8] + + hashPw := pwHash(password) + hashSc := pwHash(scramble) + + r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) + + var out [8]byte + for i := range out { + out[i] = r.NextByte() + 64 + } + + mask := r.NextByte() + for i := range out { + out[i] ^= mask + } + + return out[:] +} + +/****************************************************************************** +* Time related utils * +******************************************************************************/ + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(string(v), time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { + base := "0000-00-00 00:00:00.0000000" + switch len(str) { + case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" + if str == base[:len(str)] { + return + } + t, err = time.Parse(timeFormat[:len(str)], str) + default: + err = fmt.Errorf("invalid time string: %s", str) + return + } + + // Adjust location + if err == nil && loc != time.UTC { + y, mo, d := t.Date() + h, mi, s := t.Clock() + t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil + } + + return +} + +func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { + switch num { + case 0: + return time.Time{}, nil + case 4: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + 0, 0, 0, 0, + loc, + ), nil + case 7: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + 0, + loc, + ), nil + case 11: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds + loc, + ), nil + } + return nil, fmt.Errorf("invalid DATETIME packet length %d", num) +} + +// zeroDateTime is used in formatBinaryDateTime to avoid an allocation +// if the DATE or DATETIME has the zero value. +// It must never be changed. +// The current behavior depends on database/sql copying the result. +var zeroDateTime = []byte("0000-00-00 00:00:00.000000") + +const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" + +func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + if justTime { + return zeroDateTime[11 : 11+length], nil + } + return zeroDateTime[:length], nil + } + var dst []byte // return value + var pt, p1, p2, p3 byte // current digit pair + var zOffs byte // offset of value in zeroDateTime + if justTime { + switch length { + case + 8, // time (can be up to 10 when negative and 100+ hours) + 10, 11, 12, 13, 14, 15: // time with fractional seconds + default: + return nil, fmt.Errorf("illegal TIME length %d", length) + } + switch len(src) { + case 8, 12: + default: + return nil, fmt.Errorf("invalid TIME packet length %d", len(src)) + } + // +2 to enable negative time and 100+ hours + dst = make([]byte, 0, length+2) + if src[0] == 1 { + dst = append(dst, '-') + } + if src[1] != 0 { + hour := uint16(src[1])*24 + uint16(src[5]) + pt = byte(hour / 100) + p1 = byte(hour - 100*uint16(pt)) + dst = append(dst, digits01[pt]) + } else { + p1 = src[5] + } + zOffs = 11 + src = src[6:] + } else { + switch length { + case 10, 19, 21, 22, 23, 24, 25, 26: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s length %d", t, length) + } + switch len(src) { + case 4, 7, 11: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s packet length %d", t, len(src)) + } + dst = make([]byte, 0, length) + // start with the date + year := binary.LittleEndian.Uint16(src[:2]) + pt = byte(year / 100) + p1 = byte(year - 100*uint16(pt)) + p2, p3 = src[2], src[3] + dst = append(dst, + digits10[pt], digits01[pt], + digits10[p1], digits01[p1], '-', + digits10[p2], digits01[p2], '-', + digits10[p3], digits01[p3], + ) + if length == 10 { + return dst, nil + } + if len(src) == 4 { + return append(dst, zeroDateTime[10:length]...), nil + } + dst = append(dst, ' ') + p1 = src[4] // hour + src = src[5:] + } + // p1 is 2-digit hour, src is after hour + p2, p3 = src[0], src[1] + dst = append(dst, + digits10[p1], digits01[p1], ':', + digits10[p2], digits01[p2], ':', + digits10[p3], digits01[p3], + ) + if length <= byte(len(dst)) { + return dst, nil + } + src = src[2:] + if len(src) == 0 { + return append(dst, zeroDateTime[19:zOffs+length]...), nil + } + microsecs := binary.LittleEndian.Uint32(src[:4]) + p1 = byte(microsecs / 10000) + microsecs -= 10000 * uint32(p1) + p2 = byte(microsecs / 100) + microsecs -= 100 * uint32(p2) + p3 = byte(microsecs) + switch decimals := zOffs + length - 20; decimals { + default: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], digits01[p3], + ), nil + case 1: + return append(dst, '.', + digits10[p1], + ), nil + case 2: + return append(dst, '.', + digits10[p1], digits01[p1], + ), nil + case 3: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], + ), nil + case 4: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + ), nil + case 5: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], + ), nil + } +} + +/****************************************************************************** +* Convert from and to bytes * +******************************************************************************/ + +func uint64ToBytes(n uint64) []byte { + return []byte{ + byte(n), + byte(n >> 8), + byte(n >> 16), + byte(n >> 24), + byte(n >> 32), + byte(n >> 40), + byte(n >> 48), + byte(n >> 56), + } +} + +func uint64ToString(n uint64) []byte { + var a [20]byte + i := 20 + + // U+0030 = 0 + // ... + // U+0039 = 9 + + var q uint64 + for n >= 10 { + i-- + q = n / 10 + a[i] = uint8(n-q*10) + 0x30 + n = q + } + + i-- + a[i] = uint8(n) + 0x30 + + return a[i:] +} + +// treats string value as unsigned integer representation +func stringToInt(b []byte) int { + val := 0 + for i := range b { + val *= 10 + val += int(b[i] - 0x30) + } + return val +} + +// returns the string read as a bytes slice, wheter the value is NULL, +// the number of bytes read and an error, in case the string is longer than +// the input slice +func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { + // Get length + num, isNull, n := readLengthEncodedInteger(b) + if num < 1 { + return b[n:n], isNull, n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return b[n-int(num) : n], false, n, nil + } + return nil, false, n, io.EOF +} + +// returns the number of bytes skipped and an error, in case the string is +// longer than the input slice +func skipLengthEncodedString(b []byte) (int, error) { + // Get length + num, _, n := readLengthEncodedInteger(b) + if num < 1 { + return n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return n, nil + } + return n, io.EOF +} + +// returns the number read, whether the value is NULL and the number of bytes read +func readLengthEncodedInteger(b []byte) (uint64, bool, int) { + // See issue #349 + if len(b) == 0 { + return 0, true, 1 + } + switch b[0] { + + // 251: NULL + case 0xfb: + return 0, true, 1 + + // 252: value of following 2 + case 0xfc: + return uint64(b[1]) | uint64(b[2])<<8, false, 3 + + // 253: value of following 3 + case 0xfd: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + + // 254: value of following 8 + case 0xfe: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | + uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | + uint64(b[7])<<48 | uint64(b[8])<<56, + false, 9 + } + + // 0-250: value of first byte + return uint64(b[0]), false, 1 +} + +// encodes a uint64 value and appends it to the given bytes slice +func appendLengthEncodedInteger(b []byte, n uint64) []byte { + switch { + case n <= 250: + return append(b, byte(n)) + + case n <= 0xffff: + return append(b, 0xfc, byte(n), byte(n>>8)) + + case n <= 0xffffff: + return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) + } + return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), + byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) +} + +// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. +// If cap(buf) is not enough, reallocate new buffer. +func reserveBuffer(buf []byte, appendSize int) []byte { + newSize := len(buf) + appendSize + if cap(buf) < newSize { + // Grow buffer exponentially + newBuf := make([]byte, len(buf)*2+appendSize) + copy(newBuf, buf) + buf = newBuf + } + return buf[:newSize] +} + +// escapeBytesBackslash escapes []byte with backslashes (\) +// This escapes the contents of a string (provided as []byte) by adding backslashes before special +// characters, and turning others into specific escape sequences, such as +// turning newlines into \n and null bytes into \0. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 +func escapeBytesBackslash(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringBackslash is similar to escapeBytesBackslash but for string. +func escapeStringBackslash(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. +// This escapes the contents of a string by doubling up any apostrophes that +// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in +// effect on the server. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 +func escapeBytesQuotes(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringQuotes is similar to escapeBytesQuotes but for string. +func escapeStringQuotes(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils_test.go b/vendor/github.com/go-sql-driver/mysql/utils_test.go new file mode 100644 index 0000000..0d6c668 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils_test.go @@ -0,0 +1,197 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "encoding/binary" + "fmt" + "testing" + "time" +) + +func TestScanNullTime(t *testing.T) { + var scanTests = []struct { + in interface{} + error bool + valid bool + time time.Time + }{ + {tDate, false, true, tDate}, + {sDate, false, true, tDate}, + {[]byte(sDate), false, true, tDate}, + {tDateTime, false, true, tDateTime}, + {sDateTime, false, true, tDateTime}, + {[]byte(sDateTime), false, true, tDateTime}, + {tDate0, false, true, tDate0}, + {sDate0, false, true, tDate0}, + {[]byte(sDate0), false, true, tDate0}, + {sDateTime0, false, true, tDate0}, + {[]byte(sDateTime0), false, true, tDate0}, + {"", true, false, tDate0}, + {"1234", true, false, tDate0}, + {0, true, false, tDate0}, + } + + var nt = NullTime{} + var err error + + for _, tst := range scanTests { + err = nt.Scan(tst.in) + if (err != nil) != tst.error { + t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil)) + } + if nt.Valid != tst.valid { + t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid) + } + if nt.Time != tst.time { + t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time) + } + } +} + +func TestLengthEncodedInteger(t *testing.T) { + var integerTests = []struct { + num uint64 + encoded []byte + }{ + {0x0000000000000000, []byte{0x00}}, + {0x0000000000000012, []byte{0x12}}, + {0x00000000000000fa, []byte{0xfa}}, + {0x0000000000000100, []byte{0xfc, 0x00, 0x01}}, + {0x0000000000001234, []byte{0xfc, 0x34, 0x12}}, + {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}}, + {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}}, + {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}}, + {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}}, + {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}}, + {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}}, + {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + } + + for _, tst := range integerTests { + num, isNull, numLen := readLengthEncodedInteger(tst.encoded) + if isNull { + t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num) + } + if num != tst.num { + t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num) + } + if numLen != len(tst.encoded) { + t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen) + } + encoded := appendLengthEncodedInteger(nil, num) + if !bytes.Equal(encoded, tst.encoded) { + t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded) + } + } +} + +func TestOldPass(t *testing.T) { + scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2} + vectors := []struct { + pass string + out string + }{ + {" pass", "47575c5a435b4251"}, + {"pass ", "47575c5a435b4251"}, + {"123\t456", "575c47505b5b5559"}, + {"C0mpl!ca ted#PASS123", "5d5d554849584a45"}, + } + for _, tuple := range vectors { + ours := scrambleOldPassword(scramble, []byte(tuple.pass)) + if tuple.out != fmt.Sprintf("%x", ours) { + t.Errorf("Failed old password %q", tuple.pass) + } + } +} + +func TestFormatBinaryDateTime(t *testing.T) { + rawDate := [11]byte{} + binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years + rawDate[2] = 12 // months + rawDate[3] = 30 // days + rawDate[4] = 15 // hours + rawDate[5] = 46 // minutes + rawDate[6] = 23 // seconds + binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds + expect := func(expected string, inlen, outlen uint8) { + actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen, false) + bytes, ok := actual.([]byte) + if !ok { + t.Errorf("formatBinaryDateTime must return []byte, was %T", actual) + } + if string(bytes) != expected { + t.Errorf( + "expected %q, got %q for length in %d, out %d", + bytes, actual, inlen, outlen, + ) + } + } + expect("0000-00-00", 0, 10) + expect("0000-00-00 00:00:00", 0, 19) + expect("1978-12-30", 4, 10) + expect("1978-12-30 15:46:23", 7, 19) + expect("1978-12-30 15:46:23.987654", 11, 26) +} + +func TestEscapeBackslash(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesBackslash([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringBackslash([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\\0bar", "foo\x00bar") + expect("foo\\nbar", "foo\nbar") + expect("foo\\rbar", "foo\rbar") + expect("foo\\Zbar", "foo\x1abar") + expect("foo\\\"bar", "foo\"bar") + expect("foo\\\\bar", "foo\\bar") + expect("foo\\'bar", "foo'bar") +} + +func TestEscapeQuotes(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesQuotes([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringQuotes([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\x00bar", "foo\x00bar") // not affected + expect("foo\nbar", "foo\nbar") // not affected + expect("foo\rbar", "foo\rbar") // not affected + expect("foo\x1abar", "foo\x1abar") // not affected + expect("foo''bar", "foo'bar") // affected + expect("foo\"bar", "foo\"bar") // not affected +} diff --git a/vendor/github.com/jinzhu/gorm/.codeclimate.yml b/vendor/github.com/jinzhu/gorm/.codeclimate.yml new file mode 100644 index 0000000..51aba50 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/.codeclimate.yml @@ -0,0 +1,11 @@ +--- +engines: + gofmt: + enabled: true + govet: + enabled: true + golint: + enabled: true +ratings: + paths: + - "**.go" diff --git a/vendor/github.com/jinzhu/gorm/.github/ISSUE_TEMPLATE.md b/vendor/github.com/jinzhu/gorm/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..a0b64bf --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,45 @@ +Your issue may already be reported! Please search on the [issue track](https://github.com/jinzhu/gorm/issues) before creating one. + +### What version of Go are you using (`go version`)? + + +### Which database and its version are you using? + + +### Please provide a complete runnable program to reproduce your issue. **IMPORTANT** + +Need to runnable with [GORM's docker compose config](https://github.com/jinzhu/gorm/blob/master/docker-compose.yml) or please provides your config. + +```go +package main + +import ( + "github.com/jinzhu/gorm" + _ "github.com/jinzhu/gorm/dialects/mssql" + _ "github.com/jinzhu/gorm/dialects/mysql" + _ "github.com/jinzhu/gorm/dialects/postgres" + _ "github.com/jinzhu/gorm/dialects/sqlite" +) + +var db *gorm.DB + +func init() { + var err error + db, err = gorm.Open("sqlite3", "test.db") + // db, err = gorm.Open("postgres", "user=gorm password=gorm DB.name=gorm port=9920 sslmode=disable") + // db, err = gorm.Open("mysql", "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True") + // db, err = gorm.Open("mssql", "sqlserver://gorm:LoremIpsum86@localhost:9930?database=gorm") + if err != nil { + panic(err) + } + db.LogMode(true) +} + +func main() { + if /* failure condition */ { + fmt.Println("failed") + } else { + fmt.Println("success") + } +} +``` diff --git a/vendor/github.com/jinzhu/gorm/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/jinzhu/gorm/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..b467b6c --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,9 @@ +Make sure these boxes checked before submitting your pull request. + +- [] Do only one thing +- [] No API-breaking changes +- [] New code/logic commented & tested + +For significant changes like big bug fixes, new features, please open an issue to make an agreement on an implementation design/plan first before starting it. + +### What did this pull request do? diff --git a/vendor/github.com/jinzhu/gorm/.gitignore b/vendor/github.com/jinzhu/gorm/.gitignore new file mode 100644 index 0000000..01dc5ce --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/.gitignore @@ -0,0 +1,2 @@ +documents +_book diff --git a/vendor/github.com/jinzhu/gorm/License b/vendor/github.com/jinzhu/gorm/License new file mode 100644 index 0000000..037e165 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/License @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013-NOW Jinzhu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/jinzhu/gorm/README.md b/vendor/github.com/jinzhu/gorm/README.md new file mode 100644 index 0000000..0c5c7ea --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/README.md @@ -0,0 +1,40 @@ +# GORM + +The fantastic ORM library for Golang, aims to be developer friendly. + +[![go report card](https://goreportcard.com/badge/github.com/jinzhu/gorm "go report card")](https://goreportcard.com/report/github.com/jinzhu/gorm) +[![wercker status](https://app.wercker.com/status/8596cace912c9947dd9c8542ecc8cb8b/s/master "wercker status")](https://app.wercker.com/project/byKey/8596cace912c9947dd9c8542ecc8cb8b) +[![Join the chat at https://gitter.im/jinzhu/gorm](https://img.shields.io/gitter/room/jinzhu/gorm.svg)](https://gitter.im/jinzhu/gorm?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Open Collective Backer](https://opencollective.com/gorm/tiers/backer/badge.svg?label=backer&color=brightgreen "Open Collective Backer")](https://opencollective.com/gorm) +[![Open Collective Sponsor](https://opencollective.com/gorm/tiers/sponsor/badge.svg?label=sponsor&color=brightgreen "Open Collective Sponsor")](https://opencollective.com/gorm) +[![MIT license](http://img.shields.io/badge/license-MIT-brightgreen.svg)](http://opensource.org/licenses/MIT) +[![GoDoc](https://godoc.org/github.com/jinzhu/gorm?status.svg)](https://godoc.org/github.com/jinzhu/gorm) + +## Overview + +* Full-Featured ORM (almost) +* Associations (Has One, Has Many, Belongs To, Many To Many, Polymorphism) +* Hooks (Before/After Create/Save/Update/Delete/Find) +* Preloading (eager loading) +* Transactions +* Composite Primary Key +* SQL Builder +* Auto Migrations +* Logger +* Extendable, write Plugins based on GORM callbacks +* Every feature comes with tests +* Developer Friendly + +## Getting Started + +* GORM Guides [http://gorm.io](http://gorm.io) + +## Contributing + +[You can help to deliver a better GORM, check out things you can do](http://gorm.io/contribute.html) + +## License + +© Jinzhu, 2013~time.Now + +Released under the [MIT License](https://github.com/jinzhu/gorm/blob/master/License) diff --git a/vendor/github.com/jinzhu/gorm/association.go b/vendor/github.com/jinzhu/gorm/association.go new file mode 100644 index 0000000..8c6d986 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/association.go @@ -0,0 +1,375 @@ +package gorm + +import ( + "errors" + "fmt" + "reflect" +) + +// Association Mode contains some helper methods to handle relationship things easily. +type Association struct { + Error error + scope *Scope + column string + field *Field +} + +// Find find out all related associations +func (association *Association) Find(value interface{}) *Association { + association.scope.related(value, association.column) + return association.setErr(association.scope.db.Error) +} + +// Append append new associations for many2many, has_many, replace current association for has_one, belongs_to +func (association *Association) Append(values ...interface{}) *Association { + if association.Error != nil { + return association + } + + if relationship := association.field.Relationship; relationship.Kind == "has_one" { + return association.Replace(values...) + } + return association.saveAssociations(values...) +} + +// Replace replace current associations with new one +func (association *Association) Replace(values ...interface{}) *Association { + if association.Error != nil { + return association + } + + var ( + relationship = association.field.Relationship + scope = association.scope + field = association.field.Field + newDB = scope.NewDB() + ) + + // Append new values + association.field.Set(reflect.Zero(association.field.Field.Type())) + association.saveAssociations(values...) + + // Belongs To + if relationship.Kind == "belongs_to" { + // Set foreign key to be null when clearing value (length equals 0) + if len(values) == 0 { + // Set foreign key to be nil + var foreignKeyMap = map[string]interface{}{} + for _, foreignKey := range relationship.ForeignDBNames { + foreignKeyMap[foreignKey] = nil + } + association.setErr(newDB.Model(scope.Value).UpdateColumn(foreignKeyMap).Error) + } + } else { + // Polymorphic Relations + if relationship.PolymorphicDBName != "" { + newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), relationship.PolymorphicValue) + } + + // Delete Relations except new created + if len(values) > 0 { + var associationForeignFieldNames, associationForeignDBNames []string + if relationship.Kind == "many_to_many" { + // if many to many relations, get association fields name from association foreign keys + associationScope := scope.New(reflect.New(field.Type()).Interface()) + for idx, dbName := range relationship.AssociationForeignFieldNames { + if field, ok := associationScope.FieldByName(dbName); ok { + associationForeignFieldNames = append(associationForeignFieldNames, field.Name) + associationForeignDBNames = append(associationForeignDBNames, relationship.AssociationForeignDBNames[idx]) + } + } + } else { + // If has one/many relations, use primary keys + for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() { + associationForeignFieldNames = append(associationForeignFieldNames, field.Name) + associationForeignDBNames = append(associationForeignDBNames, field.DBName) + } + } + + newPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, field.Interface()) + + if len(newPrimaryKeys) > 0 { + sql := fmt.Sprintf("%v NOT IN (%v)", toQueryCondition(scope, associationForeignDBNames), toQueryMarks(newPrimaryKeys)) + newDB = newDB.Where(sql, toQueryValues(newPrimaryKeys)...) + } + } + + if relationship.Kind == "many_to_many" { + // if many to many relations, delete related relations from join table + var sourceForeignFieldNames []string + + for _, dbName := range relationship.ForeignFieldNames { + if field, ok := scope.FieldByName(dbName); ok { + sourceForeignFieldNames = append(sourceForeignFieldNames, field.Name) + } + } + + if sourcePrimaryKeys := scope.getColumnAsArray(sourceForeignFieldNames, scope.Value); len(sourcePrimaryKeys) > 0 { + newDB = newDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(sourcePrimaryKeys)), toQueryValues(sourcePrimaryKeys)...) + + association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB)) + } + } else if relationship.Kind == "has_one" || relationship.Kind == "has_many" { + // has_one or has_many relations, set foreign key to be nil (TODO or delete them?) + var foreignKeyMap = map[string]interface{}{} + for idx, foreignKey := range relationship.ForeignDBNames { + foreignKeyMap[foreignKey] = nil + if field, ok := scope.FieldByName(relationship.AssociationForeignFieldNames[idx]); ok { + newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) + } + } + + fieldValue := reflect.New(association.field.Field.Type()).Interface() + association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error) + } + } + return association +} + +// Delete remove relationship between source & passed arguments, but won't delete those arguments +func (association *Association) Delete(values ...interface{}) *Association { + if association.Error != nil { + return association + } + + var ( + relationship = association.field.Relationship + scope = association.scope + field = association.field.Field + newDB = scope.NewDB() + ) + + if len(values) == 0 { + return association + } + + var deletingResourcePrimaryFieldNames, deletingResourcePrimaryDBNames []string + for _, field := range scope.New(reflect.New(field.Type()).Interface()).PrimaryFields() { + deletingResourcePrimaryFieldNames = append(deletingResourcePrimaryFieldNames, field.Name) + deletingResourcePrimaryDBNames = append(deletingResourcePrimaryDBNames, field.DBName) + } + + deletingPrimaryKeys := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, values...) + + if relationship.Kind == "many_to_many" { + // source value's foreign keys + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(relationship.ForeignFieldNames[idx]); ok { + newDB = newDB.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) + } + } + + // get association's foreign fields name + var associationScope = scope.New(reflect.New(field.Type()).Interface()) + var associationForeignFieldNames []string + for _, associationDBName := range relationship.AssociationForeignFieldNames { + if field, ok := associationScope.FieldByName(associationDBName); ok { + associationForeignFieldNames = append(associationForeignFieldNames, field.Name) + } + } + + // association value's foreign keys + deletingPrimaryKeys := scope.getColumnAsArray(associationForeignFieldNames, values...) + sql := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(deletingPrimaryKeys)) + newDB = newDB.Where(sql, toQueryValues(deletingPrimaryKeys)...) + + association.setErr(relationship.JoinTableHandler.Delete(relationship.JoinTableHandler, newDB)) + } else { + var foreignKeyMap = map[string]interface{}{} + for _, foreignKey := range relationship.ForeignDBNames { + foreignKeyMap[foreignKey] = nil + } + + if relationship.Kind == "belongs_to" { + // find with deleting relation's foreign keys + primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, values...) + newDB = newDB.Where( + fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)), + toQueryValues(primaryKeys)..., + ) + + // set foreign key to be null if there are some records affected + modelValue := reflect.New(scope.GetModelStruct().ModelType).Interface() + if results := newDB.Model(modelValue).UpdateColumn(foreignKeyMap); results.Error == nil { + if results.RowsAffected > 0 { + scope.updatedAttrsWithValues(foreignKeyMap) + } + } else { + association.setErr(results.Error) + } + } else if relationship.Kind == "has_one" || relationship.Kind == "has_many" { + // find all relations + primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value) + newDB = newDB.Where( + fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)), + toQueryValues(primaryKeys)..., + ) + + // only include those deleting relations + newDB = newDB.Where( + fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, deletingResourcePrimaryDBNames), toQueryMarks(deletingPrimaryKeys)), + toQueryValues(deletingPrimaryKeys)..., + ) + + // set matched relation's foreign key to be null + fieldValue := reflect.New(association.field.Field.Type()).Interface() + association.setErr(newDB.Model(fieldValue).UpdateColumn(foreignKeyMap).Error) + } + } + + // Remove deleted records from source's field + if association.Error == nil { + if field.Kind() == reflect.Slice { + leftValues := reflect.Zero(field.Type()) + + for i := 0; i < field.Len(); i++ { + reflectValue := field.Index(i) + primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, reflectValue.Interface())[0] + var isDeleted = false + for _, pk := range deletingPrimaryKeys { + if equalAsString(primaryKey, pk) { + isDeleted = true + break + } + } + if !isDeleted { + leftValues = reflect.Append(leftValues, reflectValue) + } + } + + association.field.Set(leftValues) + } else if field.Kind() == reflect.Struct { + primaryKey := scope.getColumnAsArray(deletingResourcePrimaryFieldNames, field.Interface())[0] + for _, pk := range deletingPrimaryKeys { + if equalAsString(primaryKey, pk) { + association.field.Set(reflect.Zero(field.Type())) + break + } + } + } + } + + return association +} + +// Clear remove relationship between source & current associations, won't delete those associations +func (association *Association) Clear() *Association { + return association.Replace() +} + +// Count return the count of current associations +func (association *Association) Count() int { + var ( + count = 0 + relationship = association.field.Relationship + scope = association.scope + fieldValue = association.field.Field.Interface() + query = scope.DB() + ) + + if relationship.Kind == "many_to_many" { + query = relationship.JoinTableHandler.JoinWith(relationship.JoinTableHandler, query, scope.Value) + } else if relationship.Kind == "has_many" || relationship.Kind == "has_one" { + primaryKeys := scope.getColumnAsArray(relationship.AssociationForeignFieldNames, scope.Value) + query = query.Where( + fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.ForeignDBNames), toQueryMarks(primaryKeys)), + toQueryValues(primaryKeys)..., + ) + } else if relationship.Kind == "belongs_to" { + primaryKeys := scope.getColumnAsArray(relationship.ForeignFieldNames, scope.Value) + query = query.Where( + fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relationship.AssociationForeignDBNames), toQueryMarks(primaryKeys)), + toQueryValues(primaryKeys)..., + ) + } + + if relationship.PolymorphicType != "" { + query = query.Where( + fmt.Sprintf("%v.%v = ?", scope.New(fieldValue).QuotedTableName(), scope.Quote(relationship.PolymorphicDBName)), + relationship.PolymorphicValue, + ) + } + + if err := query.Model(fieldValue).Count(&count).Error; err != nil { + association.Error = err + } + return count +} + +// saveAssociations save passed values as associations +func (association *Association) saveAssociations(values ...interface{}) *Association { + var ( + scope = association.scope + field = association.field + relationship = field.Relationship + ) + + saveAssociation := func(reflectValue reflect.Value) { + // value has to been pointer + if reflectValue.Kind() != reflect.Ptr { + reflectPtr := reflect.New(reflectValue.Type()) + reflectPtr.Elem().Set(reflectValue) + reflectValue = reflectPtr + } + + // value has to been saved for many2many + if relationship.Kind == "many_to_many" { + if scope.New(reflectValue.Interface()).PrimaryKeyZero() { + association.setErr(scope.NewDB().Save(reflectValue.Interface()).Error) + } + } + + // Assign Fields + var fieldType = field.Field.Type() + var setFieldBackToValue, setSliceFieldBackToValue bool + if reflectValue.Type().AssignableTo(fieldType) { + field.Set(reflectValue) + } else if reflectValue.Type().Elem().AssignableTo(fieldType) { + // if field's type is struct, then need to set value back to argument after save + setFieldBackToValue = true + field.Set(reflectValue.Elem()) + } else if fieldType.Kind() == reflect.Slice { + if reflectValue.Type().AssignableTo(fieldType.Elem()) { + field.Set(reflect.Append(field.Field, reflectValue)) + } else if reflectValue.Type().Elem().AssignableTo(fieldType.Elem()) { + // if field's type is slice of struct, then need to set value back to argument after save + setSliceFieldBackToValue = true + field.Set(reflect.Append(field.Field, reflectValue.Elem())) + } + } + + if relationship.Kind == "many_to_many" { + association.setErr(relationship.JoinTableHandler.Add(relationship.JoinTableHandler, scope.NewDB(), scope.Value, reflectValue.Interface())) + } else { + association.setErr(scope.NewDB().Select(field.Name).Save(scope.Value).Error) + + if setFieldBackToValue { + reflectValue.Elem().Set(field.Field) + } else if setSliceFieldBackToValue { + reflectValue.Elem().Set(field.Field.Index(field.Field.Len() - 1)) + } + } + } + + for _, value := range values { + reflectValue := reflect.ValueOf(value) + indirectReflectValue := reflect.Indirect(reflectValue) + if indirectReflectValue.Kind() == reflect.Struct { + saveAssociation(reflectValue) + } else if indirectReflectValue.Kind() == reflect.Slice { + for i := 0; i < indirectReflectValue.Len(); i++ { + saveAssociation(indirectReflectValue.Index(i)) + } + } else { + association.setErr(errors.New("invalid value type")) + } + } + return association +} + +func (association *Association) setErr(err error) *Association { + if err != nil { + association.Error = err + } + return association +} diff --git a/vendor/github.com/jinzhu/gorm/association_test.go b/vendor/github.com/jinzhu/gorm/association_test.go new file mode 100644 index 0000000..60d0cf4 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/association_test.go @@ -0,0 +1,1050 @@ +package gorm_test + +import ( + "fmt" + "os" + "reflect" + "sort" + "testing" + + "github.com/jinzhu/gorm" +) + +func TestBelongsTo(t *testing.T) { + post := Post{ + Title: "post belongs to", + Body: "body belongs to", + Category: Category{Name: "Category 1"}, + MainCategory: Category{Name: "Main Category 1"}, + } + + if err := DB.Save(&post).Error; err != nil { + t.Error("Got errors when save post", err) + } + + if post.Category.ID == 0 || post.MainCategory.ID == 0 { + t.Errorf("Category's primary key should be updated") + } + + if post.CategoryId.Int64 == 0 || post.MainCategoryId == 0 { + t.Errorf("post's foreign key should be updated") + } + + // Query + var category1 Category + DB.Model(&post).Association("Category").Find(&category1) + if category1.Name != "Category 1" { + t.Errorf("Query belongs to relations with Association") + } + + var mainCategory1 Category + DB.Model(&post).Association("MainCategory").Find(&mainCategory1) + if mainCategory1.Name != "Main Category 1" { + t.Errorf("Query belongs to relations with Association") + } + + var category11 Category + DB.Model(&post).Related(&category11) + if category11.Name != "Category 1" { + t.Errorf("Query belongs to relations with Related") + } + + if DB.Model(&post).Association("Category").Count() != 1 { + t.Errorf("Post's category count should be 1") + } + + if DB.Model(&post).Association("MainCategory").Count() != 1 { + t.Errorf("Post's main category count should be 1") + } + + // Append + var category2 = Category{ + Name: "Category 2", + } + DB.Model(&post).Association("Category").Append(&category2) + + if category2.ID == 0 { + t.Errorf("Category should has ID when created with Append") + } + + var category21 Category + DB.Model(&post).Related(&category21) + + if category21.Name != "Category 2" { + t.Errorf("Category should be updated with Append") + } + + if DB.Model(&post).Association("Category").Count() != 1 { + t.Errorf("Post's category count should be 1") + } + + // Replace + var category3 = Category{ + Name: "Category 3", + } + DB.Model(&post).Association("Category").Replace(&category3) + + if category3.ID == 0 { + t.Errorf("Category should has ID when created with Replace") + } + + var category31 Category + DB.Model(&post).Related(&category31) + if category31.Name != "Category 3" { + t.Errorf("Category should be updated with Replace") + } + + if DB.Model(&post).Association("Category").Count() != 1 { + t.Errorf("Post's category count should be 1") + } + + // Delete + DB.Model(&post).Association("Category").Delete(&category2) + if DB.Model(&post).Related(&Category{}).RecordNotFound() { + t.Errorf("Should not delete any category when Delete a unrelated Category") + } + + if post.Category.Name == "" { + t.Errorf("Post's category should not be reseted when Delete a unrelated Category") + } + + DB.Model(&post).Association("Category").Delete(&category3) + + if post.Category.Name != "" { + t.Errorf("Post's category should be reseted after Delete") + } + + var category41 Category + DB.Model(&post).Related(&category41) + if category41.Name != "" { + t.Errorf("Category should be deleted with Delete") + } + + if count := DB.Model(&post).Association("Category").Count(); count != 0 { + t.Errorf("Post's category count should be 0 after Delete, but got %v", count) + } + + // Clear + DB.Model(&post).Association("Category").Append(&Category{ + Name: "Category 2", + }) + + if DB.Model(&post).Related(&Category{}).RecordNotFound() { + t.Errorf("Should find category after append") + } + + if post.Category.Name == "" { + t.Errorf("Post's category should has value after Append") + } + + DB.Model(&post).Association("Category").Clear() + + if post.Category.Name != "" { + t.Errorf("Post's category should be cleared after Clear") + } + + if !DB.Model(&post).Related(&Category{}).RecordNotFound() { + t.Errorf("Should not find any category after Clear") + } + + if count := DB.Model(&post).Association("Category").Count(); count != 0 { + t.Errorf("Post's category count should be 0 after Clear, but got %v", count) + } + + // Check Association mode with soft delete + category6 := Category{ + Name: "Category 6", + } + DB.Model(&post).Association("Category").Append(&category6) + + if count := DB.Model(&post).Association("Category").Count(); count != 1 { + t.Errorf("Post's category count should be 1 after Append, but got %v", count) + } + + DB.Delete(&category6) + + if count := DB.Model(&post).Association("Category").Count(); count != 0 { + t.Errorf("Post's category count should be 0 after the category has been deleted, but got %v", count) + } + + if err := DB.Model(&post).Association("Category").Find(&Category{}).Error; err == nil { + t.Errorf("Post's category is not findable after Delete") + } + + if count := DB.Unscoped().Model(&post).Association("Category").Count(); count != 1 { + t.Errorf("Post's category count should be 1 when query with Unscoped, but got %v", count) + } + + if err := DB.Unscoped().Model(&post).Association("Category").Find(&Category{}).Error; err != nil { + t.Errorf("Post's category should be findable when query with Unscoped, got %v", err) + } +} + +func TestBelongsToOverrideForeignKey1(t *testing.T) { + type Profile struct { + gorm.Model + Name string + } + + type User struct { + gorm.Model + Profile Profile `gorm:"ForeignKey:ProfileRefer"` + ProfileRefer int + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "belongs_to" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileRefer"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestBelongsToOverrideForeignKey2(t *testing.T) { + type Profile struct { + gorm.Model + Refer string + Name string + } + + type User struct { + gorm.Model + Profile Profile `gorm:"ForeignKey:ProfileID;AssociationForeignKey:Refer"` + ProfileID int + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "belongs_to" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"ProfileID"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasOne(t *testing.T) { + user := User{ + Name: "has one", + CreditCard: CreditCard{Number: "411111111111"}, + } + + if err := DB.Save(&user).Error; err != nil { + t.Error("Got errors when save user", err.Error()) + } + + if user.CreditCard.UserId.Int64 == 0 { + t.Errorf("CreditCard's foreign key should be updated") + } + + // Query + var creditCard1 CreditCard + DB.Model(&user).Related(&creditCard1) + + if creditCard1.Number != "411111111111" { + t.Errorf("Query has one relations with Related") + } + + var creditCard11 CreditCard + DB.Model(&user).Association("CreditCard").Find(&creditCard11) + + if creditCard11.Number != "411111111111" { + t.Errorf("Query has one relations with Related") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + // Append + var creditcard2 = CreditCard{ + Number: "411111111112", + } + DB.Model(&user).Association("CreditCard").Append(&creditcard2) + + if creditcard2.ID == 0 { + t.Errorf("Creditcard should has ID when created with Append") + } + + var creditcard21 CreditCard + DB.Model(&user).Related(&creditcard21) + if creditcard21.Number != "411111111112" { + t.Errorf("CreditCard should be updated with Append") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + // Replace + var creditcard3 = CreditCard{ + Number: "411111111113", + } + DB.Model(&user).Association("CreditCard").Replace(&creditcard3) + + if creditcard3.ID == 0 { + t.Errorf("Creditcard should has ID when created with Replace") + } + + var creditcard31 CreditCard + DB.Model(&user).Related(&creditcard31) + if creditcard31.Number != "411111111113" { + t.Errorf("CreditCard should be updated with Replace") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + // Delete + DB.Model(&user).Association("CreditCard").Delete(&creditcard2) + var creditcard4 CreditCard + DB.Model(&user).Related(&creditcard4) + if creditcard4.Number != "411111111113" { + t.Errorf("Should not delete credit card when Delete a unrelated CreditCard") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + DB.Model(&user).Association("CreditCard").Delete(&creditcard3) + if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() { + t.Errorf("Should delete credit card with Delete") + } + + if DB.Model(&user).Association("CreditCard").Count() != 0 { + t.Errorf("User's credit card count should be 0 after Delete") + } + + // Clear + var creditcard5 = CreditCard{ + Number: "411111111115", + } + DB.Model(&user).Association("CreditCard").Append(&creditcard5) + + if DB.Model(&user).Related(&CreditCard{}).RecordNotFound() { + t.Errorf("Should added credit card with Append") + } + + if DB.Model(&user).Association("CreditCard").Count() != 1 { + t.Errorf("User's credit card count should be 1") + } + + DB.Model(&user).Association("CreditCard").Clear() + if !DB.Model(&user).Related(&CreditCard{}).RecordNotFound() { + t.Errorf("Credit card should be deleted with Clear") + } + + if DB.Model(&user).Association("CreditCard").Count() != 0 { + t.Errorf("User's credit card count should be 0 after Clear") + } + + // Check Association mode with soft delete + var creditcard6 = CreditCard{ + Number: "411111111116", + } + DB.Model(&user).Association("CreditCard").Append(&creditcard6) + + if count := DB.Model(&user).Association("CreditCard").Count(); count != 1 { + t.Errorf("User's credit card count should be 1 after Append, but got %v", count) + } + + DB.Delete(&creditcard6) + + if count := DB.Model(&user).Association("CreditCard").Count(); count != 0 { + t.Errorf("User's credit card count should be 0 after credit card deleted, but got %v", count) + } + + if err := DB.Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err == nil { + t.Errorf("User's creditcard is not findable after Delete") + } + + if count := DB.Unscoped().Model(&user).Association("CreditCard").Count(); count != 1 { + t.Errorf("User's credit card count should be 1 when query with Unscoped, but got %v", count) + } + + if err := DB.Unscoped().Model(&user).Association("CreditCard").Find(&CreditCard{}).Error; err != nil { + t.Errorf("User's creditcard should be findable when query with Unscoped, got %v", err) + } +} + +func TestHasOneOverrideForeignKey1(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserRefer uint + } + + type User struct { + gorm.Model + Profile Profile `gorm:"ForeignKey:UserRefer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_one" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasOneOverrideForeignKey2(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserID uint + } + + type User struct { + gorm.Model + Refer string + Profile Profile `gorm:"ForeignKey:UserID;AssociationForeignKey:Refer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_one" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasMany(t *testing.T) { + post := Post{ + Title: "post has many", + Body: "body has many", + Comments: []*Comment{{Content: "Comment 1"}, {Content: "Comment 2"}}, + } + + if err := DB.Save(&post).Error; err != nil { + t.Error("Got errors when save post", err) + } + + for _, comment := range post.Comments { + if comment.PostId == 0 { + t.Errorf("comment's PostID should be updated") + } + } + + var compareComments = func(comments []Comment, contents []string) bool { + var commentContents []string + for _, comment := range comments { + commentContents = append(commentContents, comment.Content) + } + sort.Strings(commentContents) + sort.Strings(contents) + return reflect.DeepEqual(commentContents, contents) + } + + // Query + if DB.First(&Comment{}, "content = ?", "Comment 1").Error != nil { + t.Errorf("Comment 1 should be saved") + } + + var comments1 []Comment + DB.Model(&post).Association("Comments").Find(&comments1) + if !compareComments(comments1, []string{"Comment 1", "Comment 2"}) { + t.Errorf("Query has many relations with Association") + } + + var comments11 []Comment + DB.Model(&post).Related(&comments11) + if !compareComments(comments11, []string{"Comment 1", "Comment 2"}) { + t.Errorf("Query has many relations with Related") + } + + if DB.Model(&post).Association("Comments").Count() != 2 { + t.Errorf("Post's comments count should be 2") + } + + // Append + DB.Model(&post).Association("Comments").Append(&Comment{Content: "Comment 3"}) + + var comments2 []Comment + DB.Model(&post).Related(&comments2) + if !compareComments(comments2, []string{"Comment 1", "Comment 2", "Comment 3"}) { + t.Errorf("Append new record to has many relations") + } + + if DB.Model(&post).Association("Comments").Count() != 3 { + t.Errorf("Post's comments count should be 3 after Append") + } + + // Delete + DB.Model(&post).Association("Comments").Delete(comments11) + + var comments3 []Comment + DB.Model(&post).Related(&comments3) + if !compareComments(comments3, []string{"Comment 3"}) { + t.Errorf("Delete an existing resource for has many relations") + } + + if DB.Model(&post).Association("Comments").Count() != 1 { + t.Errorf("Post's comments count should be 1 after Delete 2") + } + + // Replace + DB.Model(&Post{Id: 999}).Association("Comments").Replace() + + var comments4 []Comment + DB.Model(&post).Related(&comments4) + if len(comments4) == 0 { + t.Errorf("Replace for other resource should not clear all comments") + } + + DB.Model(&post).Association("Comments").Replace(&Comment{Content: "Comment 4"}, &Comment{Content: "Comment 5"}) + + var comments41 []Comment + DB.Model(&post).Related(&comments41) + if !compareComments(comments41, []string{"Comment 4", "Comment 5"}) { + t.Errorf("Replace has many relations") + } + + // Clear + DB.Model(&Post{Id: 999}).Association("Comments").Clear() + + var comments5 []Comment + DB.Model(&post).Related(&comments5) + if len(comments5) == 0 { + t.Errorf("Clear should not clear all comments") + } + + DB.Model(&post).Association("Comments").Clear() + + var comments51 []Comment + DB.Model(&post).Related(&comments51) + if len(comments51) != 0 { + t.Errorf("Clear has many relations") + } + + // Check Association mode with soft delete + var comment6 = Comment{ + Content: "comment 6", + } + DB.Model(&post).Association("Comments").Append(&comment6) + + if count := DB.Model(&post).Association("Comments").Count(); count != 1 { + t.Errorf("post's comments count should be 1 after Append, but got %v", count) + } + + DB.Delete(&comment6) + + if count := DB.Model(&post).Association("Comments").Count(); count != 0 { + t.Errorf("post's comments count should be 0 after comment been deleted, but got %v", count) + } + + var comments6 []Comment + if DB.Model(&post).Association("Comments").Find(&comments6); len(comments6) != 0 { + t.Errorf("post's comments count should be 0 when find with Find, but got %v", len(comments6)) + } + + if count := DB.Unscoped().Model(&post).Association("Comments").Count(); count != 1 { + t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", count) + } + + var comments61 []Comment + if DB.Unscoped().Model(&post).Association("Comments").Find(&comments61); len(comments61) != 1 { + t.Errorf("post's comments count should be 1 when query with Unscoped, but got %v", len(comments61)) + } +} + +func TestHasManyOverrideForeignKey1(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserRefer uint + } + + type User struct { + gorm.Model + Profile []Profile `gorm:"ForeignKey:UserRefer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_many" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserRefer"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"ID"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestHasManyOverrideForeignKey2(t *testing.T) { + type Profile struct { + gorm.Model + Name string + UserID uint + } + + type User struct { + gorm.Model + Refer string + Profile []Profile `gorm:"ForeignKey:UserID;AssociationForeignKey:Refer"` + } + + if relation, ok := DB.NewScope(&User{}).FieldByName("Profile"); ok { + if relation.Relationship.Kind != "has_many" || + !reflect.DeepEqual(relation.Relationship.ForeignFieldNames, []string{"UserID"}) || + !reflect.DeepEqual(relation.Relationship.AssociationForeignFieldNames, []string{"Refer"}) { + t.Errorf("Override belongs to foreign key with tag") + } + } +} + +func TestManyToMany(t *testing.T) { + DB.Raw("delete from languages") + var languages = []Language{{Name: "ZH"}, {Name: "EN"}} + user := User{Name: "Many2Many", Languages: languages} + DB.Save(&user) + + // Query + var newLanguages []Language + DB.Model(&user).Related(&newLanguages, "Languages") + if len(newLanguages) != len([]string{"ZH", "EN"}) { + t.Errorf("Query many to many relations") + } + + DB.Model(&user).Association("Languages").Find(&newLanguages) + if len(newLanguages) != len([]string{"ZH", "EN"}) { + t.Errorf("Should be able to find many to many relations") + } + + if DB.Model(&user).Association("Languages").Count() != len([]string{"ZH", "EN"}) { + t.Errorf("Count should return correct result") + } + + // Append + DB.Model(&user).Association("Languages").Append(&Language{Name: "DE"}) + if DB.Where("name = ?", "DE").First(&Language{}).RecordNotFound() { + t.Errorf("New record should be saved when append") + } + + languageA := Language{Name: "AA"} + DB.Save(&languageA) + DB.Model(&User{Id: user.Id}).Association("Languages").Append(&languageA) + + languageC := Language{Name: "CC"} + DB.Save(&languageC) + DB.Model(&user).Association("Languages").Append(&[]Language{{Name: "BB"}, languageC}) + + DB.Model(&User{Id: user.Id}).Association("Languages").Append(&[]Language{{Name: "DD"}, {Name: "EE"}}) + + totalLanguages := []string{"ZH", "EN", "DE", "AA", "BB", "CC", "DD", "EE"} + + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages) { + t.Errorf("All appended languages should be saved") + } + + // Delete + user.Languages = []Language{} + DB.Model(&user).Association("Languages").Find(&user.Languages) + + var language Language + DB.Where("name = ?", "EE").First(&language) + DB.Model(&user).Association("Languages").Delete(language, &language) + + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-1 || len(user.Languages) != len(totalLanguages)-1 { + t.Errorf("Relations should be deleted with Delete") + } + if DB.Where("name = ?", "EE").First(&Language{}).RecordNotFound() { + t.Errorf("Language EE should not be deleted") + } + + DB.Where("name IN (?)", []string{"CC", "DD"}).Find(&languages) + + user2 := User{Name: "Many2Many_User2", Languages: languages} + DB.Save(&user2) + + DB.Model(&user).Association("Languages").Delete(languages, &languages) + if DB.Model(&user).Association("Languages").Count() != len(totalLanguages)-3 || len(user.Languages) != len(totalLanguages)-3 { + t.Errorf("Relations should be deleted with Delete") + } + + if DB.Model(&user2).Association("Languages").Count() == 0 { + t.Errorf("Other user's relations should not be deleted") + } + + // Replace + var languageB Language + DB.Where("name = ?", "BB").First(&languageB) + DB.Model(&user).Association("Languages").Replace(languageB) + if len(user.Languages) != 1 || DB.Model(&user).Association("Languages").Count() != 1 { + t.Errorf("Relations should be replaced") + } + + DB.Model(&user).Association("Languages").Replace() + if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 { + t.Errorf("Relations should be replaced with empty") + } + + DB.Model(&user).Association("Languages").Replace(&[]Language{{Name: "FF"}, {Name: "JJ"}}) + if len(user.Languages) != 2 || DB.Model(&user).Association("Languages").Count() != len([]string{"FF", "JJ"}) { + t.Errorf("Relations should be replaced") + } + + // Clear + DB.Model(&user).Association("Languages").Clear() + if len(user.Languages) != 0 || DB.Model(&user).Association("Languages").Count() != 0 { + t.Errorf("Relations should be cleared") + } + + // Check Association mode with soft delete + var language6 = Language{ + Name: "language 6", + } + DB.Model(&user).Association("Languages").Append(&language6) + + if count := DB.Model(&user).Association("Languages").Count(); count != 1 { + t.Errorf("user's languages count should be 1 after Append, but got %v", count) + } + + DB.Delete(&language6) + + if count := DB.Model(&user).Association("Languages").Count(); count != 0 { + t.Errorf("user's languages count should be 0 after language been deleted, but got %v", count) + } + + var languages6 []Language + if DB.Model(&user).Association("Languages").Find(&languages6); len(languages6) != 0 { + t.Errorf("user's languages count should be 0 when find with Find, but got %v", len(languages6)) + } + + if count := DB.Unscoped().Model(&user).Association("Languages").Count(); count != 1 { + t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", count) + } + + var languages61 []Language + if DB.Unscoped().Model(&user).Association("Languages").Find(&languages61); len(languages61) != 1 { + t.Errorf("user's languages count should be 1 when query with Unscoped, but got %v", len(languages61)) + } +} + +func TestRelated(t *testing.T) { + user := User{ + Name: "jinzhu", + BillingAddress: Address{Address1: "Billing Address - Address 1"}, + ShippingAddress: Address{Address1: "Shipping Address - Address 1"}, + Emails: []Email{{Email: "jinzhu@example.com"}, {Email: "jinzhu-2@example@example.com"}}, + CreditCard: CreditCard{Number: "1234567890"}, + Company: Company{Name: "company1"}, + } + + if err := DB.Save(&user).Error; err != nil { + t.Errorf("No error should happen when saving user") + } + + if user.CreditCard.ID == 0 { + t.Errorf("After user save, credit card should have id") + } + + if user.BillingAddress.ID == 0 { + t.Errorf("After user save, billing address should have id") + } + + if user.Emails[0].Id == 0 { + t.Errorf("After user save, billing address should have id") + } + + var emails []Email + DB.Model(&user).Related(&emails) + if len(emails) != 2 { + t.Errorf("Should have two emails") + } + + var emails2 []Email + DB.Model(&user).Where("email = ?", "jinzhu@example.com").Related(&emails2) + if len(emails2) != 1 { + t.Errorf("Should have two emails") + } + + var emails3 []*Email + DB.Model(&user).Related(&emails3) + if len(emails3) != 2 { + t.Errorf("Should have two emails") + } + + var user1 User + DB.Model(&user).Related(&user1.Emails) + if len(user1.Emails) != 2 { + t.Errorf("Should have only one email match related condition") + } + + var address1 Address + DB.Model(&user).Related(&address1, "BillingAddressId") + if address1.Address1 != "Billing Address - Address 1" { + t.Errorf("Should get billing address from user correctly") + } + + user1 = User{} + DB.Model(&address1).Related(&user1, "BillingAddressId") + if DB.NewRecord(user1) { + t.Errorf("Should get user from address correctly") + } + + var user2 User + DB.Model(&emails[0]).Related(&user2) + if user2.Id != user.Id || user2.Name != user.Name { + t.Errorf("Should get user from email correctly") + } + + var creditcard CreditCard + var user3 User + DB.First(&creditcard, "number = ?", "1234567890") + DB.Model(&creditcard).Related(&user3) + if user3.Id != user.Id || user3.Name != user.Name { + t.Errorf("Should get user from credit card correctly") + } + + if !DB.Model(&CreditCard{}).Related(&User{}).RecordNotFound() { + t.Errorf("RecordNotFound for Related") + } + + var company Company + if DB.Model(&user).Related(&company, "Company").RecordNotFound() || company.Name != "company1" { + t.Errorf("RecordNotFound for Related") + } +} + +func TestForeignKey(t *testing.T) { + for _, structField := range DB.NewScope(&User{}).GetStructFields() { + for _, foreignKey := range []string{"BillingAddressID", "ShippingAddressId", "CompanyID"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Email{}).GetStructFields() { + for _, foreignKey := range []string{"UserId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Post{}).GetStructFields() { + for _, foreignKey := range []string{"CategoryId", "MainCategoryId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } + + for _, structField := range DB.NewScope(&Comment{}).GetStructFields() { + for _, foreignKey := range []string{"PostId"} { + if structField.Name == foreignKey && !structField.IsForeignKey { + t.Errorf(fmt.Sprintf("%v should be foreign key", foreignKey)) + } + } + } +} + +func testForeignKey(t *testing.T, source interface{}, sourceFieldName string, target interface{}, targetFieldName string) { + if dialect := os.Getenv("GORM_DIALECT"); dialect == "" || dialect == "sqlite" { + // sqlite does not support ADD CONSTRAINT in ALTER TABLE + return + } + targetScope := DB.NewScope(target) + targetTableName := targetScope.TableName() + modelScope := DB.NewScope(source) + modelField, ok := modelScope.FieldByName(sourceFieldName) + if !ok { + t.Fatalf(fmt.Sprintf("Failed to get field by name: %v", sourceFieldName)) + } + targetField, ok := targetScope.FieldByName(targetFieldName) + if !ok { + t.Fatalf(fmt.Sprintf("Failed to get field by name: %v", targetFieldName)) + } + dest := fmt.Sprintf("%v(%v)", targetTableName, targetField.DBName) + err := DB.Model(source).AddForeignKey(modelField.DBName, dest, "CASCADE", "CASCADE").Error + if err != nil { + t.Fatalf(fmt.Sprintf("Failed to create foreign key: %v", err)) + } +} + +func TestLongForeignKey(t *testing.T) { + testForeignKey(t, &NotSoLongTableName{}, "ReallyLongThingID", &ReallyLongTableNameToTestMySQLNameLengthLimit{}, "ID") +} + +func TestLongForeignKeyWithShortDest(t *testing.T) { + testForeignKey(t, &ReallyLongThingThatReferencesShort{}, "ShortID", &Short{}, "ID") +} + +func TestHasManyChildrenWithOneStruct(t *testing.T) { + category := Category{ + Name: "main", + Categories: []Category{ + {Name: "sub1"}, + {Name: "sub2"}, + }, + } + + DB.Save(&category) +} + +func TestAutoSaveBelongsToAssociation(t *testing.T) { + type Company struct { + gorm.Model + Name string + } + + type User struct { + gorm.Model + Name string + CompanyID uint + Company Company `gorm:"association_autoupdate:false;association_autocreate:false;"` + } + + DB.Where("name = ?", "auto_save_association").Delete(&Company{}) + DB.AutoMigrate(&Company{}, &User{}) + + DB.Save(&User{Name: "jinzhu", Company: Company{Name: "auto_save_association"}}) + + if !DB.Where("name = ?", "auto_save_association").First(&Company{}).RecordNotFound() { + t.Errorf("Company auto_save_association should not have been saved when autosave is false") + } + + // if foreign key is set, this should be saved even if association isn't + company := Company{Name: "auto_save_association"} + DB.Save(&company) + + company.Name = "auto_save_association_new_name" + user := User{Name: "jinzhu", Company: company} + + DB.Save(&user) + + if !DB.Where("name = ?", "auto_save_association_new_name").First(&Company{}).RecordNotFound() { + t.Errorf("Company should not have been updated") + } + + if DB.Where("id = ? AND company_id = ?", user.ID, company.ID).First(&User{}).RecordNotFound() { + t.Errorf("User's foreign key should have been saved") + } + + user2 := User{Name: "jinzhu_2", Company: Company{Name: "auto_save_association_2"}} + DB.Set("gorm:association_autocreate", true).Save(&user2) + if DB.Where("name = ?", "auto_save_association_2").First(&Company{}).RecordNotFound() { + t.Errorf("Company auto_save_association_2 should been created when autocreate is true") + } + + user2.Company.Name = "auto_save_association_2_newname" + DB.Set("gorm:association_autoupdate", true).Save(&user2) + + if DB.Where("name = ?", "auto_save_association_2_newname").First(&Company{}).RecordNotFound() { + t.Errorf("Company should been updated") + } +} + +func TestAutoSaveHasOneAssociation(t *testing.T) { + type Company struct { + gorm.Model + UserID uint + Name string + } + + type User struct { + gorm.Model + Name string + Company Company `gorm:"association_autoupdate:false;association_autocreate:false;"` + } + + DB.Where("name = ?", "auto_save_has_one_association").Delete(&Company{}) + DB.AutoMigrate(&Company{}, &User{}) + + DB.Save(&User{Name: "jinzhu", Company: Company{Name: "auto_save_has_one_association"}}) + + if !DB.Where("name = ?", "auto_save_has_one_association").First(&Company{}).RecordNotFound() { + t.Errorf("Company auto_save_has_one_association should not have been saved when autosave is false") + } + + company := Company{Name: "auto_save_has_one_association"} + DB.Save(&company) + + company.Name = "auto_save_has_one_association_new_name" + user := User{Name: "jinzhu", Company: company} + + DB.Save(&user) + + if !DB.Where("name = ?", "auto_save_has_one_association_new_name").First(&Company{}).RecordNotFound() { + t.Errorf("Company should not have been updated") + } + + if !DB.Where("name = ? AND user_id = ?", "auto_save_has_one_association", user.ID).First(&Company{}).RecordNotFound() { + t.Errorf("Company should not have been updated") + } + + if user.Company.UserID == 0 { + t.Errorf("UserID should be assigned") + } + + company.Name = "auto_save_has_one_association_2_new_name" + DB.Set("gorm:association_autoupdate", true).Save(&user) + + if DB.Where("name = ? AND user_id = ?", "auto_save_has_one_association_new_name", user.ID).First(&Company{}).RecordNotFound() { + t.Errorf("Company should been updated") + } + + user2 := User{Name: "jinzhu_2", Company: Company{Name: "auto_save_has_one_association_2"}} + DB.Set("gorm:association_autocreate", true).Save(&user2) + if DB.Where("name = ?", "auto_save_has_one_association_2").First(&Company{}).RecordNotFound() { + t.Errorf("Company auto_save_has_one_association_2 should been created when autocreate is true") + } +} + +func TestAutoSaveMany2ManyAssociation(t *testing.T) { + type Company struct { + gorm.Model + Name string + } + + type User struct { + gorm.Model + Name string + Companies []Company `gorm:"many2many:user_companies;association_autoupdate:false;association_autocreate:false;"` + } + + DB.AutoMigrate(&Company{}, &User{}) + + DB.Save(&User{Name: "jinzhu", Companies: []Company{{Name: "auto_save_m2m_association"}}}) + + if !DB.Where("name = ?", "auto_save_m2m_association").First(&Company{}).RecordNotFound() { + t.Errorf("Company auto_save_m2m_association should not have been saved when autosave is false") + } + + company := Company{Name: "auto_save_m2m_association"} + DB.Save(&company) + + company.Name = "auto_save_m2m_association_new_name" + user := User{Name: "jinzhu", Companies: []Company{company, {Name: "auto_save_m2m_association_new_name_2"}}} + + DB.Save(&user) + + if !DB.Where("name = ?", "auto_save_m2m_association_new_name").First(&Company{}).RecordNotFound() { + t.Errorf("Company should not have been updated") + } + + if !DB.Where("name = ?", "auto_save_m2m_association_new_name_2").First(&Company{}).RecordNotFound() { + t.Errorf("Company should not been created") + } + + if DB.Model(&user).Association("Companies").Count() != 1 { + t.Errorf("Relationship should been saved") + } + + DB.Set("gorm:association_autoupdate", true).Set("gorm:association_autocreate", true).Save(&user) + + if DB.Where("name = ?", "auto_save_m2m_association_new_name").First(&Company{}).RecordNotFound() { + t.Errorf("Company should been updated") + } + + if DB.Where("name = ?", "auto_save_m2m_association_new_name_2").First(&Company{}).RecordNotFound() { + t.Errorf("Company should been created") + } + + if DB.Model(&user).Association("Companies").Count() != 2 { + t.Errorf("Relationship should been updated") + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback.go b/vendor/github.com/jinzhu/gorm/callback.go new file mode 100644 index 0000000..a438214 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback.go @@ -0,0 +1,242 @@ +package gorm + +import "log" + +// DefaultCallback default callbacks defined by gorm +var DefaultCallback = &Callback{} + +// Callback is a struct that contains all CRUD callbacks +// Field `creates` contains callbacks will be call when creating object +// Field `updates` contains callbacks will be call when updating object +// Field `deletes` contains callbacks will be call when deleting object +// Field `queries` contains callbacks will be call when querying object with query methods like Find, First, Related, Association... +// Field `rowQueries` contains callbacks will be call when querying object with Row, Rows... +// Field `processors` contains all callback processors, will be used to generate above callbacks in order +type Callback struct { + creates []*func(scope *Scope) + updates []*func(scope *Scope) + deletes []*func(scope *Scope) + queries []*func(scope *Scope) + rowQueries []*func(scope *Scope) + processors []*CallbackProcessor +} + +// CallbackProcessor contains callback informations +type CallbackProcessor struct { + name string // current callback's name + before string // register current callback before a callback + after string // register current callback after a callback + replace bool // replace callbacks with same name + remove bool // delete callbacks with same name + kind string // callback type: create, update, delete, query, row_query + processor *func(scope *Scope) // callback handler + parent *Callback +} + +func (c *Callback) clone() *Callback { + return &Callback{ + creates: c.creates, + updates: c.updates, + deletes: c.deletes, + queries: c.queries, + rowQueries: c.rowQueries, + processors: c.processors, + } +} + +// Create could be used to register callbacks for creating object +// db.Callback().Create().After("gorm:create").Register("plugin:run_after_create", func(*Scope) { +// // business logic +// ... +// +// // set error if some thing wrong happened, will rollback the creating +// scope.Err(errors.New("error")) +// }) +func (c *Callback) Create() *CallbackProcessor { + return &CallbackProcessor{kind: "create", parent: c} +} + +// Update could be used to register callbacks for updating object, refer `Create` for usage +func (c *Callback) Update() *CallbackProcessor { + return &CallbackProcessor{kind: "update", parent: c} +} + +// Delete could be used to register callbacks for deleting object, refer `Create` for usage +func (c *Callback) Delete() *CallbackProcessor { + return &CallbackProcessor{kind: "delete", parent: c} +} + +// Query could be used to register callbacks for querying objects with query methods like `Find`, `First`, `Related`, `Association`... +// Refer `Create` for usage +func (c *Callback) Query() *CallbackProcessor { + return &CallbackProcessor{kind: "query", parent: c} +} + +// RowQuery could be used to register callbacks for querying objects with `Row`, `Rows`, refer `Create` for usage +func (c *Callback) RowQuery() *CallbackProcessor { + return &CallbackProcessor{kind: "row_query", parent: c} +} + +// After insert a new callback after callback `callbackName`, refer `Callbacks.Create` +func (cp *CallbackProcessor) After(callbackName string) *CallbackProcessor { + cp.after = callbackName + return cp +} + +// Before insert a new callback before callback `callbackName`, refer `Callbacks.Create` +func (cp *CallbackProcessor) Before(callbackName string) *CallbackProcessor { + cp.before = callbackName + return cp +} + +// Register a new callback, refer `Callbacks.Create` +func (cp *CallbackProcessor) Register(callbackName string, callback func(scope *Scope)) { + if cp.kind == "row_query" { + if cp.before == "" && cp.after == "" && callbackName != "gorm:row_query" { + log.Printf("Registing RowQuery callback %v without specify order with Before(), After(), applying Before('gorm:row_query') by default for compatibility...\n", callbackName) + cp.before = "gorm:row_query" + } + } + + cp.name = callbackName + cp.processor = &callback + cp.parent.processors = append(cp.parent.processors, cp) + cp.parent.reorder() +} + +// Remove a registered callback +// db.Callback().Create().Remove("gorm:update_time_stamp_when_create") +func (cp *CallbackProcessor) Remove(callbackName string) { + log.Printf("[info] removing callback `%v` from %v\n", callbackName, fileWithLineNum()) + cp.name = callbackName + cp.remove = true + cp.parent.processors = append(cp.parent.processors, cp) + cp.parent.reorder() +} + +// Replace a registered callback with new callback +// db.Callback().Create().Replace("gorm:update_time_stamp_when_create", func(*Scope) { +// scope.SetColumn("Created", now) +// scope.SetColumn("Updated", now) +// }) +func (cp *CallbackProcessor) Replace(callbackName string, callback func(scope *Scope)) { + log.Printf("[info] replacing callback `%v` from %v\n", callbackName, fileWithLineNum()) + cp.name = callbackName + cp.processor = &callback + cp.replace = true + cp.parent.processors = append(cp.parent.processors, cp) + cp.parent.reorder() +} + +// Get registered callback +// db.Callback().Create().Get("gorm:create") +func (cp *CallbackProcessor) Get(callbackName string) (callback func(scope *Scope)) { + for _, p := range cp.parent.processors { + if p.name == callbackName && p.kind == cp.kind && !cp.remove { + return *p.processor + } + } + return nil +} + +// getRIndex get right index from string slice +func getRIndex(strs []string, str string) int { + for i := len(strs) - 1; i >= 0; i-- { + if strs[i] == str { + return i + } + } + return -1 +} + +// sortProcessors sort callback processors based on its before, after, remove, replace +func sortProcessors(cps []*CallbackProcessor) []*func(scope *Scope) { + var ( + allNames, sortedNames []string + sortCallbackProcessor func(c *CallbackProcessor) + ) + + for _, cp := range cps { + // show warning message the callback name already exists + if index := getRIndex(allNames, cp.name); index > -1 && !cp.replace && !cp.remove { + log.Printf("[warning] duplicated callback `%v` from %v\n", cp.name, fileWithLineNum()) + } + allNames = append(allNames, cp.name) + } + + sortCallbackProcessor = func(c *CallbackProcessor) { + if getRIndex(sortedNames, c.name) == -1 { // if not sorted + if c.before != "" { // if defined before callback + if index := getRIndex(sortedNames, c.before); index != -1 { + // if before callback already sorted, append current callback just after it + sortedNames = append(sortedNames[:index], append([]string{c.name}, sortedNames[index:]...)...) + } else if index := getRIndex(allNames, c.before); index != -1 { + // if before callback exists but haven't sorted, append current callback to last + sortedNames = append(sortedNames, c.name) + sortCallbackProcessor(cps[index]) + } + } + + if c.after != "" { // if defined after callback + if index := getRIndex(sortedNames, c.after); index != -1 { + // if after callback already sorted, append current callback just before it + sortedNames = append(sortedNames[:index+1], append([]string{c.name}, sortedNames[index+1:]...)...) + } else if index := getRIndex(allNames, c.after); index != -1 { + // if after callback exists but haven't sorted + cp := cps[index] + // set after callback's before callback to current callback + if cp.before == "" { + cp.before = c.name + } + sortCallbackProcessor(cp) + } + } + + // if current callback haven't been sorted, append it to last + if getRIndex(sortedNames, c.name) == -1 { + sortedNames = append(sortedNames, c.name) + } + } + } + + for _, cp := range cps { + sortCallbackProcessor(cp) + } + + var sortedFuncs []*func(scope *Scope) + for _, name := range sortedNames { + if index := getRIndex(allNames, name); !cps[index].remove { + sortedFuncs = append(sortedFuncs, cps[index].processor) + } + } + + return sortedFuncs +} + +// reorder all registered processors, and reset CRUD callbacks +func (c *Callback) reorder() { + var creates, updates, deletes, queries, rowQueries []*CallbackProcessor + + for _, processor := range c.processors { + if processor.name != "" { + switch processor.kind { + case "create": + creates = append(creates, processor) + case "update": + updates = append(updates, processor) + case "delete": + deletes = append(deletes, processor) + case "query": + queries = append(queries, processor) + case "row_query": + rowQueries = append(rowQueries, processor) + } + } + } + + c.creates = sortProcessors(creates) + c.updates = sortProcessors(updates) + c.deletes = sortProcessors(deletes) + c.queries = sortProcessors(queries) + c.rowQueries = sortProcessors(rowQueries) +} diff --git a/vendor/github.com/jinzhu/gorm/callback_create.go b/vendor/github.com/jinzhu/gorm/callback_create.go new file mode 100644 index 0000000..e7fe6f8 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_create.go @@ -0,0 +1,164 @@ +package gorm + +import ( + "fmt" + "strings" +) + +// Define callbacks for creating +func init() { + DefaultCallback.Create().Register("gorm:begin_transaction", beginTransactionCallback) + DefaultCallback.Create().Register("gorm:before_create", beforeCreateCallback) + DefaultCallback.Create().Register("gorm:save_before_associations", saveBeforeAssociationsCallback) + DefaultCallback.Create().Register("gorm:update_time_stamp", updateTimeStampForCreateCallback) + DefaultCallback.Create().Register("gorm:create", createCallback) + DefaultCallback.Create().Register("gorm:force_reload_after_create", forceReloadAfterCreateCallback) + DefaultCallback.Create().Register("gorm:save_after_associations", saveAfterAssociationsCallback) + DefaultCallback.Create().Register("gorm:after_create", afterCreateCallback) + DefaultCallback.Create().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback) +} + +// beforeCreateCallback will invoke `BeforeSave`, `BeforeCreate` method before creating +func beforeCreateCallback(scope *Scope) { + if !scope.HasError() { + scope.CallMethod("BeforeSave") + } + if !scope.HasError() { + scope.CallMethod("BeforeCreate") + } +} + +// updateTimeStampForCreateCallback will set `CreatedAt`, `UpdatedAt` when creating +func updateTimeStampForCreateCallback(scope *Scope) { + if !scope.HasError() { + now := NowFunc() + + if createdAtField, ok := scope.FieldByName("CreatedAt"); ok { + if createdAtField.IsBlank { + createdAtField.Set(now) + } + } + + if updatedAtField, ok := scope.FieldByName("UpdatedAt"); ok { + if updatedAtField.IsBlank { + updatedAtField.Set(now) + } + } + } +} + +// createCallback the callback used to insert data into database +func createCallback(scope *Scope) { + if !scope.HasError() { + defer scope.trace(NowFunc()) + + var ( + columns, placeholders []string + blankColumnsWithDefaultValue []string + ) + + for _, field := range scope.Fields() { + if scope.changeableField(field) { + if field.IsNormal { + if field.IsBlank && field.HasDefaultValue { + blankColumnsWithDefaultValue = append(blankColumnsWithDefaultValue, scope.Quote(field.DBName)) + scope.InstanceSet("gorm:blank_columns_with_default_value", blankColumnsWithDefaultValue) + } else if !field.IsPrimaryKey || !field.IsBlank { + columns = append(columns, scope.Quote(field.DBName)) + placeholders = append(placeholders, scope.AddToVars(field.Field.Interface())) + } + } else if field.Relationship != nil && field.Relationship.Kind == "belongs_to" { + for _, foreignKey := range field.Relationship.ForeignDBNames { + if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) { + columns = append(columns, scope.Quote(foreignField.DBName)) + placeholders = append(placeholders, scope.AddToVars(foreignField.Field.Interface())) + } + } + } + } + } + + var ( + returningColumn = "*" + quotedTableName = scope.QuotedTableName() + primaryField = scope.PrimaryField() + extraOption string + ) + + if str, ok := scope.Get("gorm:insert_option"); ok { + extraOption = fmt.Sprint(str) + } + + if primaryField != nil { + returningColumn = scope.Quote(primaryField.DBName) + } + + lastInsertIDReturningSuffix := scope.Dialect().LastInsertIDReturningSuffix(quotedTableName, returningColumn) + + if len(columns) == 0 { + scope.Raw(fmt.Sprintf( + "INSERT INTO %v %v%v%v", + quotedTableName, + scope.Dialect().DefaultValueStr(), + addExtraSpaceIfExist(extraOption), + addExtraSpaceIfExist(lastInsertIDReturningSuffix), + )) + } else { + scope.Raw(fmt.Sprintf( + "INSERT INTO %v (%v) VALUES (%v)%v%v", + scope.QuotedTableName(), + strings.Join(columns, ","), + strings.Join(placeholders, ","), + addExtraSpaceIfExist(extraOption), + addExtraSpaceIfExist(lastInsertIDReturningSuffix), + )) + } + + // execute create sql + if lastInsertIDReturningSuffix == "" || primaryField == nil { + if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil { + // set rows affected count + scope.db.RowsAffected, _ = result.RowsAffected() + + // set primary value to primary field + if primaryField != nil && primaryField.IsBlank { + if primaryValue, err := result.LastInsertId(); scope.Err(err) == nil { + scope.Err(primaryField.Set(primaryValue)) + } + } + } + } else { + if primaryField.Field.CanAddr() { + if err := scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...).Scan(primaryField.Field.Addr().Interface()); scope.Err(err) == nil { + primaryField.IsBlank = false + scope.db.RowsAffected = 1 + } + } else { + scope.Err(ErrUnaddressable) + } + } + } +} + +// forceReloadAfterCreateCallback will reload columns that having default value, and set it back to current object +func forceReloadAfterCreateCallback(scope *Scope) { + if blankColumnsWithDefaultValue, ok := scope.InstanceGet("gorm:blank_columns_with_default_value"); ok { + db := scope.DB().New().Table(scope.TableName()).Select(blankColumnsWithDefaultValue.([]string)) + for _, field := range scope.Fields() { + if field.IsPrimaryKey && !field.IsBlank { + db = db.Where(fmt.Sprintf("%v = ?", field.DBName), field.Field.Interface()) + } + } + db.Scan(scope.Value) + } +} + +// afterCreateCallback will invoke `AfterCreate`, `AfterSave` method after creating +func afterCreateCallback(scope *Scope) { + if !scope.HasError() { + scope.CallMethod("AfterCreate") + } + if !scope.HasError() { + scope.CallMethod("AfterSave") + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_delete.go b/vendor/github.com/jinzhu/gorm/callback_delete.go new file mode 100644 index 0000000..73d9088 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_delete.go @@ -0,0 +1,63 @@ +package gorm + +import ( + "errors" + "fmt" +) + +// Define callbacks for deleting +func init() { + DefaultCallback.Delete().Register("gorm:begin_transaction", beginTransactionCallback) + DefaultCallback.Delete().Register("gorm:before_delete", beforeDeleteCallback) + DefaultCallback.Delete().Register("gorm:delete", deleteCallback) + DefaultCallback.Delete().Register("gorm:after_delete", afterDeleteCallback) + DefaultCallback.Delete().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback) +} + +// beforeDeleteCallback will invoke `BeforeDelete` method before deleting +func beforeDeleteCallback(scope *Scope) { + if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() { + scope.Err(errors.New("Missing WHERE clause while deleting")) + return + } + if !scope.HasError() { + scope.CallMethod("BeforeDelete") + } +} + +// deleteCallback used to delete data from database or set deleted_at to current time (when using with soft delete) +func deleteCallback(scope *Scope) { + if !scope.HasError() { + var extraOption string + if str, ok := scope.Get("gorm:delete_option"); ok { + extraOption = fmt.Sprint(str) + } + + deletedAtField, hasDeletedAtField := scope.FieldByName("DeletedAt") + + if !scope.Search.Unscoped && hasDeletedAtField { + scope.Raw(fmt.Sprintf( + "UPDATE %v SET %v=%v%v%v", + scope.QuotedTableName(), + scope.Quote(deletedAtField.DBName), + scope.AddToVars(NowFunc()), + addExtraSpaceIfExist(scope.CombinedConditionSql()), + addExtraSpaceIfExist(extraOption), + )).Exec() + } else { + scope.Raw(fmt.Sprintf( + "DELETE FROM %v%v%v", + scope.QuotedTableName(), + addExtraSpaceIfExist(scope.CombinedConditionSql()), + addExtraSpaceIfExist(extraOption), + )).Exec() + } + } +} + +// afterDeleteCallback will invoke `AfterDelete` method after deleting +func afterDeleteCallback(scope *Scope) { + if !scope.HasError() { + scope.CallMethod("AfterDelete") + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_query.go b/vendor/github.com/jinzhu/gorm/callback_query.go new file mode 100644 index 0000000..ba10cc7 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_query.go @@ -0,0 +1,99 @@ +package gorm + +import ( + "errors" + "fmt" + "reflect" +) + +// Define callbacks for querying +func init() { + DefaultCallback.Query().Register("gorm:query", queryCallback) + DefaultCallback.Query().Register("gorm:preload", preloadCallback) + DefaultCallback.Query().Register("gorm:after_query", afterQueryCallback) +} + +// queryCallback used to query data from database +func queryCallback(scope *Scope) { + if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip { + return + } + + defer scope.trace(NowFunc()) + + var ( + isSlice, isPtr bool + resultType reflect.Type + results = scope.IndirectValue() + ) + + if orderBy, ok := scope.Get("gorm:order_by_primary_key"); ok { + if primaryField := scope.PrimaryField(); primaryField != nil { + scope.Search.Order(fmt.Sprintf("%v.%v %v", scope.QuotedTableName(), scope.Quote(primaryField.DBName), orderBy)) + } + } + + if value, ok := scope.Get("gorm:query_destination"); ok { + results = indirect(reflect.ValueOf(value)) + } + + if kind := results.Kind(); kind == reflect.Slice { + isSlice = true + resultType = results.Type().Elem() + results.Set(reflect.MakeSlice(results.Type(), 0, 0)) + + if resultType.Kind() == reflect.Ptr { + isPtr = true + resultType = resultType.Elem() + } + } else if kind != reflect.Struct { + scope.Err(errors.New("unsupported destination, should be slice or struct")) + return + } + + scope.prepareQuerySQL() + + if !scope.HasError() { + scope.db.RowsAffected = 0 + if str, ok := scope.Get("gorm:query_option"); ok { + scope.SQL += addExtraSpaceIfExist(fmt.Sprint(str)) + } + + if rows, err := scope.SQLDB().Query(scope.SQL, scope.SQLVars...); scope.Err(err) == nil { + defer rows.Close() + + columns, _ := rows.Columns() + for rows.Next() { + scope.db.RowsAffected++ + + elem := results + if isSlice { + elem = reflect.New(resultType).Elem() + } + + scope.scan(rows, columns, scope.New(elem.Addr().Interface()).Fields()) + + if isSlice { + if isPtr { + results.Set(reflect.Append(results, elem.Addr())) + } else { + results.Set(reflect.Append(results, elem)) + } + } + } + + if err := rows.Err(); err != nil { + scope.Err(err) + } else if scope.db.RowsAffected == 0 && !isSlice { + scope.Err(ErrRecordNotFound) + } + } + } +} + +// afterQueryCallback will invoke `AfterFind` method after querying +func afterQueryCallback(scope *Scope) { + if !scope.HasError() { + scope.CallMethod("AfterFind") + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_query_preload.go b/vendor/github.com/jinzhu/gorm/callback_query_preload.go new file mode 100644 index 0000000..30f6b58 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_query_preload.go @@ -0,0 +1,387 @@ +package gorm + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// preloadCallback used to preload associations +func preloadCallback(scope *Scope) { + if _, skip := scope.InstanceGet("gorm:skip_query_callback"); skip { + return + } + + if _, ok := scope.Get("gorm:auto_preload"); ok { + autoPreload(scope) + } + + if scope.Search.preload == nil || scope.HasError() { + return + } + + var ( + preloadedMap = map[string]bool{} + fields = scope.Fields() + ) + + for _, preload := range scope.Search.preload { + var ( + preloadFields = strings.Split(preload.schema, ".") + currentScope = scope + currentFields = fields + ) + + for idx, preloadField := range preloadFields { + var currentPreloadConditions []interface{} + + if currentScope == nil { + continue + } + + // if not preloaded + if preloadKey := strings.Join(preloadFields[:idx+1], "."); !preloadedMap[preloadKey] { + + // assign search conditions to last preload + if idx == len(preloadFields)-1 { + currentPreloadConditions = preload.conditions + } + + for _, field := range currentFields { + if field.Name != preloadField || field.Relationship == nil { + continue + } + + switch field.Relationship.Kind { + case "has_one": + currentScope.handleHasOnePreload(field, currentPreloadConditions) + case "has_many": + currentScope.handleHasManyPreload(field, currentPreloadConditions) + case "belongs_to": + currentScope.handleBelongsToPreload(field, currentPreloadConditions) + case "many_to_many": + currentScope.handleManyToManyPreload(field, currentPreloadConditions) + default: + scope.Err(errors.New("unsupported relation")) + } + + preloadedMap[preloadKey] = true + break + } + + if !preloadedMap[preloadKey] { + scope.Err(fmt.Errorf("can't preload field %s for %s", preloadField, currentScope.GetModelStruct().ModelType)) + return + } + } + + // preload next level + if idx < len(preloadFields)-1 { + currentScope = currentScope.getColumnAsScope(preloadField) + if currentScope != nil { + currentFields = currentScope.Fields() + } + } + } + } +} + +func autoPreload(scope *Scope) { + for _, field := range scope.Fields() { + if field.Relationship == nil { + continue + } + + if val, ok := field.TagSettings["PRELOAD"]; ok { + if preload, err := strconv.ParseBool(val); err != nil { + scope.Err(errors.New("invalid preload option")) + return + } else if !preload { + continue + } + } + + scope.Search.Preload(field.Name) + } +} + +func (scope *Scope) generatePreloadDBWithConditions(conditions []interface{}) (*DB, []interface{}) { + var ( + preloadDB = scope.NewDB() + preloadConditions []interface{} + ) + + for _, condition := range conditions { + if scopes, ok := condition.(func(*DB) *DB); ok { + preloadDB = scopes(preloadDB) + } else { + preloadConditions = append(preloadConditions, condition) + } + } + + return preloadDB, preloadConditions +} + +// handleHasOnePreload used to preload has one associations +func (scope *Scope) handleHasOnePreload(field *Field, conditions []interface{}) { + relation := field.Relationship + + // get relations's primary keys + primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value) + if len(primaryKeys) == 0 { + return + } + + // preload conditions + preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) + + // find relations + query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys)) + values := toQueryValues(primaryKeys) + if relation.PolymorphicType != "" { + query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName)) + values = append(values, relation.PolymorphicValue) + } + + results := makeSlice(field.Struct.Type) + scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error) + + // assign find results + var ( + resultsValue = indirect(reflect.ValueOf(results)) + indirectScopeValue = scope.IndirectValue() + ) + + if indirectScopeValue.Kind() == reflect.Slice { + for j := 0; j < indirectScopeValue.Len(); j++ { + for i := 0; i < resultsValue.Len(); i++ { + result := resultsValue.Index(i) + foreignValues := getValueFromFields(result, relation.ForeignFieldNames) + if indirectValue := indirect(indirectScopeValue.Index(j)); equalAsString(getValueFromFields(indirectValue, relation.AssociationForeignFieldNames), foreignValues) { + indirectValue.FieldByName(field.Name).Set(result) + break + } + } + } + } else { + for i := 0; i < resultsValue.Len(); i++ { + result := resultsValue.Index(i) + scope.Err(field.Set(result)) + } + } +} + +// handleHasManyPreload used to preload has many associations +func (scope *Scope) handleHasManyPreload(field *Field, conditions []interface{}) { + relation := field.Relationship + + // get relations's primary keys + primaryKeys := scope.getColumnAsArray(relation.AssociationForeignFieldNames, scope.Value) + if len(primaryKeys) == 0 { + return + } + + // preload conditions + preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) + + // find relations + query := fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.ForeignDBNames), toQueryMarks(primaryKeys)) + values := toQueryValues(primaryKeys) + if relation.PolymorphicType != "" { + query += fmt.Sprintf(" AND %v = ?", scope.Quote(relation.PolymorphicDBName)) + values = append(values, relation.PolymorphicValue) + } + + results := makeSlice(field.Struct.Type) + scope.Err(preloadDB.Where(query, values...).Find(results, preloadConditions...).Error) + + // assign find results + var ( + resultsValue = indirect(reflect.ValueOf(results)) + indirectScopeValue = scope.IndirectValue() + ) + + if indirectScopeValue.Kind() == reflect.Slice { + preloadMap := make(map[string][]reflect.Value) + for i := 0; i < resultsValue.Len(); i++ { + result := resultsValue.Index(i) + foreignValues := getValueFromFields(result, relation.ForeignFieldNames) + preloadMap[toString(foreignValues)] = append(preloadMap[toString(foreignValues)], result) + } + + for j := 0; j < indirectScopeValue.Len(); j++ { + object := indirect(indirectScopeValue.Index(j)) + objectRealValue := getValueFromFields(object, relation.AssociationForeignFieldNames) + f := object.FieldByName(field.Name) + if results, ok := preloadMap[toString(objectRealValue)]; ok { + f.Set(reflect.Append(f, results...)) + } else { + f.Set(reflect.MakeSlice(f.Type(), 0, 0)) + } + } + } else { + scope.Err(field.Set(resultsValue)) + } +} + +// handleBelongsToPreload used to preload belongs to associations +func (scope *Scope) handleBelongsToPreload(field *Field, conditions []interface{}) { + relation := field.Relationship + + // preload conditions + preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) + + // get relations's primary keys + primaryKeys := scope.getColumnAsArray(relation.ForeignFieldNames, scope.Value) + if len(primaryKeys) == 0 { + return + } + + // find relations + results := makeSlice(field.Struct.Type) + scope.Err(preloadDB.Where(fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, relation.AssociationForeignDBNames), toQueryMarks(primaryKeys)), toQueryValues(primaryKeys)...).Find(results, preloadConditions...).Error) + + // assign find results + var ( + resultsValue = indirect(reflect.ValueOf(results)) + indirectScopeValue = scope.IndirectValue() + ) + + for i := 0; i < resultsValue.Len(); i++ { + result := resultsValue.Index(i) + if indirectScopeValue.Kind() == reflect.Slice { + value := getValueFromFields(result, relation.AssociationForeignFieldNames) + for j := 0; j < indirectScopeValue.Len(); j++ { + object := indirect(indirectScopeValue.Index(j)) + if equalAsString(getValueFromFields(object, relation.ForeignFieldNames), value) { + object.FieldByName(field.Name).Set(result) + } + } + } else { + scope.Err(field.Set(result)) + } + } +} + +// handleManyToManyPreload used to preload many to many associations +func (scope *Scope) handleManyToManyPreload(field *Field, conditions []interface{}) { + var ( + relation = field.Relationship + joinTableHandler = relation.JoinTableHandler + fieldType = field.Struct.Type.Elem() + foreignKeyValue interface{} + foreignKeyType = reflect.ValueOf(&foreignKeyValue).Type() + linkHash = map[string][]reflect.Value{} + isPtr bool + ) + + if fieldType.Kind() == reflect.Ptr { + isPtr = true + fieldType = fieldType.Elem() + } + + var sourceKeys = []string{} + for _, key := range joinTableHandler.SourceForeignKeys() { + sourceKeys = append(sourceKeys, key.DBName) + } + + // preload conditions + preloadDB, preloadConditions := scope.generatePreloadDBWithConditions(conditions) + + // generate query with join table + newScope := scope.New(reflect.New(fieldType).Interface()) + preloadDB = preloadDB.Table(newScope.TableName()).Model(newScope.Value) + + if len(preloadDB.search.selects) == 0 { + preloadDB = preloadDB.Select("*") + } + + preloadDB = joinTableHandler.JoinWith(joinTableHandler, preloadDB, scope.Value) + + // preload inline conditions + if len(preloadConditions) > 0 { + preloadDB = preloadDB.Where(preloadConditions[0], preloadConditions[1:]...) + } + + rows, err := preloadDB.Rows() + + if scope.Err(err) != nil { + return + } + defer rows.Close() + + columns, _ := rows.Columns() + for rows.Next() { + var ( + elem = reflect.New(fieldType).Elem() + fields = scope.New(elem.Addr().Interface()).Fields() + ) + + // register foreign keys in join tables + var joinTableFields []*Field + for _, sourceKey := range sourceKeys { + joinTableFields = append(joinTableFields, &Field{StructField: &StructField{DBName: sourceKey, IsNormal: true}, Field: reflect.New(foreignKeyType).Elem()}) + } + + scope.scan(rows, columns, append(fields, joinTableFields...)) + + scope.New(elem.Addr().Interface()). + InstanceSet("gorm:skip_query_callback", true). + callCallbacks(scope.db.parent.callbacks.queries) + + var foreignKeys = make([]interface{}, len(sourceKeys)) + // generate hashed forkey keys in join table + for idx, joinTableField := range joinTableFields { + if !joinTableField.Field.IsNil() { + foreignKeys[idx] = joinTableField.Field.Elem().Interface() + } + } + hashedSourceKeys := toString(foreignKeys) + + if isPtr { + linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem.Addr()) + } else { + linkHash[hashedSourceKeys] = append(linkHash[hashedSourceKeys], elem) + } + } + + if err := rows.Err(); err != nil { + scope.Err(err) + } + + // assign find results + var ( + indirectScopeValue = scope.IndirectValue() + fieldsSourceMap = map[string][]reflect.Value{} + foreignFieldNames = []string{} + ) + + for _, dbName := range relation.ForeignFieldNames { + if field, ok := scope.FieldByName(dbName); ok { + foreignFieldNames = append(foreignFieldNames, field.Name) + } + } + + if indirectScopeValue.Kind() == reflect.Slice { + for j := 0; j < indirectScopeValue.Len(); j++ { + object := indirect(indirectScopeValue.Index(j)) + key := toString(getValueFromFields(object, foreignFieldNames)) + fieldsSourceMap[key] = append(fieldsSourceMap[key], object.FieldByName(field.Name)) + } + } else if indirectScopeValue.IsValid() { + key := toString(getValueFromFields(indirectScopeValue, foreignFieldNames)) + fieldsSourceMap[key] = append(fieldsSourceMap[key], indirectScopeValue.FieldByName(field.Name)) + } + for source, link := range linkHash { + for i, field := range fieldsSourceMap[source] { + //If not 0 this means Value is a pointer and we already added preloaded models to it + if fieldsSourceMap[source][i].Len() != 0 { + continue + } + field.Set(reflect.Append(fieldsSourceMap[source][i], link...)) + } + + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_row_query.go b/vendor/github.com/jinzhu/gorm/callback_row_query.go new file mode 100644 index 0000000..c2ff4a0 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_row_query.go @@ -0,0 +1,30 @@ +package gorm + +import "database/sql" + +// Define callbacks for row query +func init() { + DefaultCallback.RowQuery().Register("gorm:row_query", rowQueryCallback) +} + +type RowQueryResult struct { + Row *sql.Row +} + +type RowsQueryResult struct { + Rows *sql.Rows + Error error +} + +// queryCallback used to query data from database +func rowQueryCallback(scope *Scope) { + if result, ok := scope.InstanceGet("row_query_result"); ok { + scope.prepareQuerySQL() + + if rowResult, ok := result.(*RowQueryResult); ok { + rowResult.Row = scope.SQLDB().QueryRow(scope.SQL, scope.SQLVars...) + } else if rowsResult, ok := result.(*RowsQueryResult); ok { + rowsResult.Rows, rowsResult.Error = scope.SQLDB().Query(scope.SQL, scope.SQLVars...) + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_save.go b/vendor/github.com/jinzhu/gorm/callback_save.go new file mode 100644 index 0000000..ef26714 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_save.go @@ -0,0 +1,170 @@ +package gorm + +import ( + "reflect" + "strings" +) + +func beginTransactionCallback(scope *Scope) { + scope.Begin() +} + +func commitOrRollbackTransactionCallback(scope *Scope) { + scope.CommitOrRollback() +} + +func saveAssociationCheck(scope *Scope, field *Field) (autoUpdate bool, autoCreate bool, saveReference bool, r *Relationship) { + checkTruth := func(value interface{}) bool { + if v, ok := value.(bool); ok && !v { + return false + } + + if v, ok := value.(string); ok { + v = strings.ToLower(v) + if v == "false" || v != "skip" { + return false + } + } + + return true + } + + if scope.changeableField(field) && !field.IsBlank && !field.IsIgnored { + if r = field.Relationship; r != nil { + autoUpdate, autoCreate, saveReference = true, true, true + + if value, ok := scope.Get("gorm:save_associations"); ok { + autoUpdate = checkTruth(value) + autoCreate = autoUpdate + } else if value, ok := field.TagSettings["SAVE_ASSOCIATIONS"]; ok { + autoUpdate = checkTruth(value) + autoCreate = autoUpdate + } + + if value, ok := scope.Get("gorm:association_autoupdate"); ok { + autoUpdate = checkTruth(value) + } else if value, ok := field.TagSettings["ASSOCIATION_AUTOUPDATE"]; ok { + autoUpdate = checkTruth(value) + } + + if value, ok := scope.Get("gorm:association_autocreate"); ok { + autoCreate = checkTruth(value) + } else if value, ok := field.TagSettings["ASSOCIATION_AUTOCREATE"]; ok { + autoCreate = checkTruth(value) + } + + if value, ok := scope.Get("gorm:association_save_reference"); ok { + saveReference = checkTruth(value) + } else if value, ok := field.TagSettings["ASSOCIATION_SAVE_REFERENCE"]; ok { + saveReference = checkTruth(value) + } + } + } + + return +} + +func saveBeforeAssociationsCallback(scope *Scope) { + for _, field := range scope.Fields() { + autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field) + + if relationship != nil && relationship.Kind == "belongs_to" { + fieldValue := field.Field.Addr().Interface() + newScope := scope.New(fieldValue) + + if newScope.PrimaryKeyZero() { + if autoCreate { + scope.Err(scope.NewDB().Save(fieldValue).Error) + } + } else if autoUpdate { + scope.Err(scope.NewDB().Save(fieldValue).Error) + } + + if saveReference { + if len(relationship.ForeignFieldNames) != 0 { + // set value's foreign key + for idx, fieldName := range relationship.ForeignFieldNames { + associationForeignName := relationship.AssociationForeignDBNames[idx] + if foreignField, ok := scope.New(fieldValue).FieldByName(associationForeignName); ok { + scope.Err(scope.SetColumn(fieldName, foreignField.Field.Interface())) + } + } + } + } + } + } +} + +func saveAfterAssociationsCallback(scope *Scope) { + for _, field := range scope.Fields() { + autoUpdate, autoCreate, saveReference, relationship := saveAssociationCheck(scope, field) + + if relationship != nil && (relationship.Kind == "has_one" || relationship.Kind == "has_many" || relationship.Kind == "many_to_many") { + value := field.Field + + switch value.Kind() { + case reflect.Slice: + for i := 0; i < value.Len(); i++ { + newDB := scope.NewDB() + elem := value.Index(i).Addr().Interface() + newScope := newDB.NewScope(elem) + + if saveReference { + if relationship.JoinTableHandler == nil && len(relationship.ForeignFieldNames) != 0 { + for idx, fieldName := range relationship.ForeignFieldNames { + associationForeignName := relationship.AssociationForeignDBNames[idx] + if f, ok := scope.FieldByName(associationForeignName); ok { + scope.Err(newScope.SetColumn(fieldName, f.Field.Interface())) + } + } + } + + if relationship.PolymorphicType != "" { + scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue)) + } + } + + if newScope.PrimaryKeyZero() { + if autoCreate { + scope.Err(newDB.Save(elem).Error) + } + } else if autoUpdate { + scope.Err(newDB.Save(elem).Error) + } + + if !scope.New(newScope.Value).PrimaryKeyZero() && saveReference { + if joinTableHandler := relationship.JoinTableHandler; joinTableHandler != nil { + scope.Err(joinTableHandler.Add(joinTableHandler, newDB, scope.Value, newScope.Value)) + } + } + } + default: + elem := value.Addr().Interface() + newScope := scope.New(elem) + + if saveReference { + if len(relationship.ForeignFieldNames) != 0 { + for idx, fieldName := range relationship.ForeignFieldNames { + associationForeignName := relationship.AssociationForeignDBNames[idx] + if f, ok := scope.FieldByName(associationForeignName); ok { + scope.Err(newScope.SetColumn(fieldName, f.Field.Interface())) + } + } + } + + if relationship.PolymorphicType != "" { + scope.Err(newScope.SetColumn(relationship.PolymorphicType, relationship.PolymorphicValue)) + } + } + + if newScope.PrimaryKeyZero() { + if autoCreate { + scope.Err(scope.NewDB().Save(elem).Error) + } + } else if autoUpdate { + scope.Err(scope.NewDB().Save(elem).Error) + } + } + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_system_test.go b/vendor/github.com/jinzhu/gorm/callback_system_test.go new file mode 100644 index 0000000..13ca3f4 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_system_test.go @@ -0,0 +1,112 @@ +package gorm + +import ( + "reflect" + "runtime" + "strings" + "testing" +) + +func equalFuncs(funcs []*func(s *Scope), fnames []string) bool { + var names []string + for _, f := range funcs { + fnames := strings.Split(runtime.FuncForPC(reflect.ValueOf(*f).Pointer()).Name(), ".") + names = append(names, fnames[len(fnames)-1]) + } + return reflect.DeepEqual(names, fnames) +} + +func create(s *Scope) {} +func beforeCreate1(s *Scope) {} +func beforeCreate2(s *Scope) {} +func afterCreate1(s *Scope) {} +func afterCreate2(s *Scope) {} + +func TestRegisterCallback(t *testing.T) { + var callback = &Callback{} + + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("before_create2", beforeCreate2) + callback.Create().Register("create", create) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Register("after_create2", afterCreate2) + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "beforeCreate2", "create", "afterCreate1", "afterCreate2"}) { + t.Errorf("register callback") + } +} + +func TestRegisterCallbackWithOrder(t *testing.T) { + var callback1 = &Callback{} + callback1.Create().Register("before_create1", beforeCreate1) + callback1.Create().Register("create", create) + callback1.Create().Register("after_create1", afterCreate1) + callback1.Create().Before("after_create1").Register("after_create2", afterCreate2) + if !equalFuncs(callback1.creates, []string{"beforeCreate1", "create", "afterCreate2", "afterCreate1"}) { + t.Errorf("register callback with order") + } + + var callback2 = &Callback{} + + callback2.Update().Register("create", create) + callback2.Update().Before("create").Register("before_create1", beforeCreate1) + callback2.Update().After("after_create2").Register("after_create1", afterCreate1) + callback2.Update().Before("before_create1").Register("before_create2", beforeCreate2) + callback2.Update().Register("after_create2", afterCreate2) + + if !equalFuncs(callback2.updates, []string{"beforeCreate2", "beforeCreate1", "create", "afterCreate2", "afterCreate1"}) { + t.Errorf("register callback with order") + } +} + +func TestRegisterCallbackWithComplexOrder(t *testing.T) { + var callback1 = &Callback{} + + callback1.Query().Before("after_create1").After("before_create1").Register("create", create) + callback1.Query().Register("before_create1", beforeCreate1) + callback1.Query().Register("after_create1", afterCreate1) + + if !equalFuncs(callback1.queries, []string{"beforeCreate1", "create", "afterCreate1"}) { + t.Errorf("register callback with order") + } + + var callback2 = &Callback{} + + callback2.Delete().Before("after_create1").After("before_create1").Register("create", create) + callback2.Delete().Before("create").Register("before_create1", beforeCreate1) + callback2.Delete().After("before_create1").Register("before_create2", beforeCreate2) + callback2.Delete().Register("after_create1", afterCreate1) + callback2.Delete().After("after_create1").Register("after_create2", afterCreate2) + + if !equalFuncs(callback2.deletes, []string{"beforeCreate1", "beforeCreate2", "create", "afterCreate1", "afterCreate2"}) { + t.Errorf("register callback with order") + } +} + +func replaceCreate(s *Scope) {} + +func TestReplaceCallback(t *testing.T) { + var callback = &Callback{} + + callback.Create().Before("after_create1").After("before_create1").Register("create", create) + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Replace("create", replaceCreate) + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "replaceCreate", "afterCreate1"}) { + t.Errorf("replace callback") + } +} + +func TestRemoveCallback(t *testing.T) { + var callback = &Callback{} + + callback.Create().Before("after_create1").After("before_create1").Register("create", create) + callback.Create().Register("before_create1", beforeCreate1) + callback.Create().Register("after_create1", afterCreate1) + callback.Create().Remove("create") + + if !equalFuncs(callback.creates, []string{"beforeCreate1", "afterCreate1"}) { + t.Errorf("remove callback") + } +} diff --git a/vendor/github.com/jinzhu/gorm/callback_update.go b/vendor/github.com/jinzhu/gorm/callback_update.go new file mode 100644 index 0000000..373bd72 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callback_update.go @@ -0,0 +1,119 @@ +package gorm + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Define callbacks for updating +func init() { + DefaultCallback.Update().Register("gorm:assign_updating_attributes", assignUpdatingAttributesCallback) + DefaultCallback.Update().Register("gorm:begin_transaction", beginTransactionCallback) + DefaultCallback.Update().Register("gorm:before_update", beforeUpdateCallback) + DefaultCallback.Update().Register("gorm:save_before_associations", saveBeforeAssociationsCallback) + DefaultCallback.Update().Register("gorm:update_time_stamp", updateTimeStampForUpdateCallback) + DefaultCallback.Update().Register("gorm:update", updateCallback) + DefaultCallback.Update().Register("gorm:save_after_associations", saveAfterAssociationsCallback) + DefaultCallback.Update().Register("gorm:after_update", afterUpdateCallback) + DefaultCallback.Update().Register("gorm:commit_or_rollback_transaction", commitOrRollbackTransactionCallback) +} + +// assignUpdatingAttributesCallback assign updating attributes to model +func assignUpdatingAttributesCallback(scope *Scope) { + if attrs, ok := scope.InstanceGet("gorm:update_interface"); ok { + if updateMaps, hasUpdate := scope.updatedAttrsWithValues(attrs); hasUpdate { + scope.InstanceSet("gorm:update_attrs", updateMaps) + } else { + scope.SkipLeft() + } + } +} + +// beforeUpdateCallback will invoke `BeforeSave`, `BeforeUpdate` method before updating +func beforeUpdateCallback(scope *Scope) { + if scope.DB().HasBlockGlobalUpdate() && !scope.hasConditions() { + scope.Err(errors.New("Missing WHERE clause while updating")) + return + } + if _, ok := scope.Get("gorm:update_column"); !ok { + if !scope.HasError() { + scope.CallMethod("BeforeSave") + } + if !scope.HasError() { + scope.CallMethod("BeforeUpdate") + } + } +} + +// updateTimeStampForUpdateCallback will set `UpdatedAt` when updating +func updateTimeStampForUpdateCallback(scope *Scope) { + if _, ok := scope.Get("gorm:update_column"); !ok { + scope.SetColumn("UpdatedAt", NowFunc()) + } +} + +// updateCallback the callback used to update data to database +func updateCallback(scope *Scope) { + if !scope.HasError() { + var sqls []string + + if updateAttrs, ok := scope.InstanceGet("gorm:update_attrs"); ok { + // Sort the column names so that the generated SQL is the same every time. + updateMap := updateAttrs.(map[string]interface{}) + var columns []string + for c := range updateMap { + columns = append(columns, c) + } + sort.Strings(columns) + + for _, column := range columns { + value := updateMap[column] + sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(column), scope.AddToVars(value))) + } + } else { + for _, field := range scope.Fields() { + if scope.changeableField(field) { + if !field.IsPrimaryKey && field.IsNormal { + sqls = append(sqls, fmt.Sprintf("%v = %v", scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface()))) + } else if relationship := field.Relationship; relationship != nil && relationship.Kind == "belongs_to" { + for _, foreignKey := range relationship.ForeignDBNames { + if foreignField, ok := scope.FieldByName(foreignKey); ok && !scope.changeableField(foreignField) { + sqls = append(sqls, + fmt.Sprintf("%v = %v", scope.Quote(foreignField.DBName), scope.AddToVars(foreignField.Field.Interface()))) + } + } + } + } + } + } + + var extraOption string + if str, ok := scope.Get("gorm:update_option"); ok { + extraOption = fmt.Sprint(str) + } + + if len(sqls) > 0 { + scope.Raw(fmt.Sprintf( + "UPDATE %v SET %v%v%v", + scope.QuotedTableName(), + strings.Join(sqls, ", "), + addExtraSpaceIfExist(scope.CombinedConditionSql()), + addExtraSpaceIfExist(extraOption), + )).Exec() + } + } +} + +// afterUpdateCallback will invoke `AfterUpdate`, `AfterSave` method after updating +func afterUpdateCallback(scope *Scope) { + if _, ok := scope.Get("gorm:update_column"); !ok { + if !scope.HasError() { + scope.CallMethod("AfterUpdate") + } + if !scope.HasError() { + scope.CallMethod("AfterSave") + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/callbacks_test.go b/vendor/github.com/jinzhu/gorm/callbacks_test.go new file mode 100644 index 0000000..a58913d --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/callbacks_test.go @@ -0,0 +1,177 @@ +package gorm_test + +import ( + "errors" + + "github.com/jinzhu/gorm" + + "reflect" + "testing" +) + +func (s *Product) BeforeCreate() (err error) { + if s.Code == "Invalid" { + err = errors.New("invalid product") + } + s.BeforeCreateCallTimes = s.BeforeCreateCallTimes + 1 + return +} + +func (s *Product) BeforeUpdate() (err error) { + if s.Code == "dont_update" { + err = errors.New("can't update") + } + s.BeforeUpdateCallTimes = s.BeforeUpdateCallTimes + 1 + return +} + +func (s *Product) BeforeSave() (err error) { + if s.Code == "dont_save" { + err = errors.New("can't save") + } + s.BeforeSaveCallTimes = s.BeforeSaveCallTimes + 1 + return +} + +func (s *Product) AfterFind() { + s.AfterFindCallTimes = s.AfterFindCallTimes + 1 +} + +func (s *Product) AfterCreate(tx *gorm.DB) { + tx.Model(s).UpdateColumn(Product{AfterCreateCallTimes: s.AfterCreateCallTimes + 1}) +} + +func (s *Product) AfterUpdate() { + s.AfterUpdateCallTimes = s.AfterUpdateCallTimes + 1 +} + +func (s *Product) AfterSave() (err error) { + if s.Code == "after_save_error" { + err = errors.New("can't save") + } + s.AfterSaveCallTimes = s.AfterSaveCallTimes + 1 + return +} + +func (s *Product) BeforeDelete() (err error) { + if s.Code == "dont_delete" { + err = errors.New("can't delete") + } + s.BeforeDeleteCallTimes = s.BeforeDeleteCallTimes + 1 + return +} + +func (s *Product) AfterDelete() (err error) { + if s.Code == "after_delete_error" { + err = errors.New("can't delete") + } + s.AfterDeleteCallTimes = s.AfterDeleteCallTimes + 1 + return +} + +func (s *Product) GetCallTimes() []int64 { + return []int64{s.BeforeCreateCallTimes, s.BeforeSaveCallTimes, s.BeforeUpdateCallTimes, s.AfterCreateCallTimes, s.AfterSaveCallTimes, s.AfterUpdateCallTimes, s.BeforeDeleteCallTimes, s.AfterDeleteCallTimes, s.AfterFindCallTimes} +} + +func TestRunCallbacks(t *testing.T) { + p := Product{Code: "unique_code", Price: 100} + DB.Save(&p) + + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 1, 0, 0, 0, 0}) { + t.Errorf("Callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + DB.Where("Code = ?", "unique_code").First(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 0, 0, 0, 0, 1}) { + t.Errorf("After callbacks values are not saved, %v", p.GetCallTimes()) + } + + p.Price = 200 + DB.Save(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 1, 1, 0, 0, 1}) { + t.Errorf("After update callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + var products []Product + DB.Find(&products, "code = ?", "unique_code") + if products[0].AfterFindCallTimes != 2 { + t.Errorf("AfterFind callbacks should work with slice") + } + + DB.Where("Code = ?", "unique_code").First(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 0, 0, 2}) { + t.Errorf("After update callbacks values are not saved, %v", p.GetCallTimes()) + } + + DB.Delete(&p) + if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 1, 1, 2}) { + t.Errorf("After delete callbacks should be invoked successfully, %v", p.GetCallTimes()) + } + + if DB.Where("Code = ?", "unique_code").First(&p).Error == nil { + t.Errorf("Can't find a deleted record") + } +} + +func TestCallbacksWithErrors(t *testing.T) { + p := Product{Code: "Invalid", Price: 100} + if DB.Save(&p).Error == nil { + t.Errorf("An error from before create callbacks happened when create with invalid value") + } + + if DB.Where("code = ?", "Invalid").First(&Product{}).Error == nil { + t.Errorf("Should not save record that have errors") + } + + if DB.Save(&Product{Code: "dont_save", Price: 100}).Error == nil { + t.Errorf("An error from after create callbacks happened when create with invalid value") + } + + p2 := Product{Code: "update_callback", Price: 100} + DB.Save(&p2) + + p2.Code = "dont_update" + if DB.Save(&p2).Error == nil { + t.Errorf("An error from before update callbacks happened when update with invalid value") + } + + if DB.Where("code = ?", "update_callback").First(&Product{}).Error != nil { + t.Errorf("Record Should not be updated due to errors happened in before update callback") + } + + if DB.Where("code = ?", "dont_update").First(&Product{}).Error == nil { + t.Errorf("Record Should not be updated due to errors happened in before update callback") + } + + p2.Code = "dont_save" + if DB.Save(&p2).Error == nil { + t.Errorf("An error from before save callbacks happened when update with invalid value") + } + + p3 := Product{Code: "dont_delete", Price: 100} + DB.Save(&p3) + if DB.Delete(&p3).Error == nil { + t.Errorf("An error from before delete callbacks happened when delete") + } + + if DB.Where("Code = ?", "dont_delete").First(&p3).Error != nil { + t.Errorf("An error from before delete callbacks happened") + } + + p4 := Product{Code: "after_save_error", Price: 100} + DB.Save(&p4) + if err := DB.First(&Product{}, "code = ?", "after_save_error").Error; err == nil { + t.Errorf("Record should be reverted if get an error in after save callback") + } + + p5 := Product{Code: "after_delete_error", Price: 100} + DB.Save(&p5) + if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil { + t.Errorf("Record should be found") + } + + DB.Delete(&p5) + if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil { + t.Errorf("Record shouldn't be deleted because of an error happened in after delete callback") + } +} diff --git a/vendor/github.com/jinzhu/gorm/create_test.go b/vendor/github.com/jinzhu/gorm/create_test.go new file mode 100644 index 0000000..9256064 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/create_test.go @@ -0,0 +1,231 @@ +package gorm_test + +import ( + "os" + "reflect" + "testing" + "time" + + "github.com/jinzhu/now" +) + +func TestCreate(t *testing.T) { + float := 35.03554004971999 + now := time.Now() + user := User{Name: "CreateUser", Age: 18, Birthday: &now, UserNum: Num(111), PasswordHash: []byte{'f', 'a', 'k', '4'}, Latitude: float} + + if !DB.NewRecord(user) || !DB.NewRecord(&user) { + t.Error("User should be new record before create") + } + + if count := DB.Save(&user).RowsAffected; count != 1 { + t.Error("There should be one record be affected when create record") + } + + if DB.NewRecord(user) || DB.NewRecord(&user) { + t.Error("User should not new record after save") + } + + var newUser User + if err := DB.First(&newUser, user.Id).Error; err != nil { + t.Errorf("No error should happen, but got %v", err) + } + + if !reflect.DeepEqual(newUser.PasswordHash, []byte{'f', 'a', 'k', '4'}) { + t.Errorf("User's PasswordHash should be saved ([]byte)") + } + + if newUser.Age != 18 { + t.Errorf("User's Age should be saved (int)") + } + + if newUser.UserNum != Num(111) { + t.Errorf("User's UserNum should be saved (custom type), but got %v", newUser.UserNum) + } + + if newUser.Latitude != float { + t.Errorf("Float64 should not be changed after save") + } + + if user.CreatedAt.IsZero() { + t.Errorf("Should have created_at after create") + } + + if newUser.CreatedAt.IsZero() { + t.Errorf("Should have created_at after create") + } + + DB.Model(user).Update("name", "create_user_new_name") + DB.First(&user, user.Id) + if user.CreatedAt.Format(time.RFC3339Nano) != newUser.CreatedAt.Format(time.RFC3339Nano) { + t.Errorf("CreatedAt should not be changed after update") + } +} + +func TestCreateEmptyStrut(t *testing.T) { + type EmptyStruct struct { + ID uint + } + DB.AutoMigrate(&EmptyStruct{}) + + if err := DB.Create(&EmptyStruct{}).Error; err != nil { + t.Errorf("No error should happen when creating user, but got %v", err) + } +} + +func TestCreateWithExistingTimestamp(t *testing.T) { + user := User{Name: "CreateUserExistingTimestamp"} + + timeA := now.MustParse("2016-01-01") + user.CreatedAt = timeA + user.UpdatedAt = timeA + DB.Save(&user) + + if user.CreatedAt.UTC().Format(time.RFC3339) != timeA.UTC().Format(time.RFC3339) { + t.Errorf("CreatedAt should not be changed") + } + + if user.UpdatedAt.UTC().Format(time.RFC3339) != timeA.UTC().Format(time.RFC3339) { + t.Errorf("UpdatedAt should not be changed") + } + + var newUser User + DB.First(&newUser, user.Id) + + if newUser.CreatedAt.UTC().Format(time.RFC3339) != timeA.UTC().Format(time.RFC3339) { + t.Errorf("CreatedAt should not be changed") + } + + if newUser.UpdatedAt.UTC().Format(time.RFC3339) != timeA.UTC().Format(time.RFC3339) { + t.Errorf("UpdatedAt should not be changed") + } +} + +type AutoIncrementUser struct { + User + Sequence uint `gorm:"AUTO_INCREMENT"` +} + +func TestCreateWithAutoIncrement(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" { + t.Skip("Skipping this because only postgres properly support auto_increment on a non-primary_key column") + } + + DB.AutoMigrate(&AutoIncrementUser{}) + + user1 := AutoIncrementUser{} + user2 := AutoIncrementUser{} + + DB.Create(&user1) + DB.Create(&user2) + + if user2.Sequence-user1.Sequence != 1 { + t.Errorf("Auto increment should apply on Sequence") + } +} + +func TestCreateWithNoGORMPrimayKey(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect == "mssql" { + t.Skip("Skipping this because MSSQL will return identity only if the table has an Id column") + } + + jt := JoinTable{From: 1, To: 2} + err := DB.Create(&jt).Error + if err != nil { + t.Errorf("No error should happen when create a record without a GORM primary key. But in the database this primary key exists and is the union of 2 or more fields\n But got: %s", err) + } +} + +func TestCreateWithNoStdPrimaryKeyAndDefaultValues(t *testing.T) { + animal := Animal{Name: "Ferdinand"} + if DB.Save(&animal).Error != nil { + t.Errorf("No error should happen when create a record without std primary key") + } + + if animal.Counter == 0 { + t.Errorf("No std primary key should be filled value after create") + } + + if animal.Name != "Ferdinand" { + t.Errorf("Default value should be overrided") + } + + // Test create with default value not overrided + an := Animal{From: "nerdz"} + + if DB.Save(&an).Error != nil { + t.Errorf("No error should happen when create an record without std primary key") + } + + // We must fetch the value again, to have the default fields updated + // (We can't do this in the update statements, since sql default can be expressions + // And be different from the fields' type (eg. a time.Time fields has a default value of "now()" + DB.Model(Animal{}).Where(&Animal{Counter: an.Counter}).First(&an) + + if an.Name != "galeone" { + t.Errorf("Default value should fill the field. But got %v", an.Name) + } +} + +func TestAnonymousScanner(t *testing.T) { + user := User{Name: "anonymous_scanner", Role: Role{Name: "admin"}} + DB.Save(&user) + + var user2 User + DB.First(&user2, "name = ?", "anonymous_scanner") + if user2.Role.Name != "admin" { + t.Errorf("Should be able to get anonymous scanner") + } + + if !user2.Role.IsAdmin() { + t.Errorf("Should be able to get anonymous scanner") + } +} + +func TestAnonymousField(t *testing.T) { + user := User{Name: "anonymous_field", Company: Company{Name: "company"}} + DB.Save(&user) + + var user2 User + DB.First(&user2, "name = ?", "anonymous_field") + DB.Model(&user2).Related(&user2.Company) + if user2.Company.Name != "company" { + t.Errorf("Should be able to get anonymous field") + } +} + +func TestSelectWithCreate(t *testing.T) { + user := getPreparedUser("select_user", "select_with_create") + DB.Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Create(user) + + var queryuser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryuser, user.Id) + + if queryuser.Name != user.Name || queryuser.Age == user.Age { + t.Errorf("Should only create users with name column") + } + + if queryuser.BillingAddressID.Int64 == 0 || queryuser.ShippingAddressId != 0 || + queryuser.CreditCard.ID == 0 || len(queryuser.Emails) == 0 { + t.Errorf("Should only create selected relationships") + } +} + +func TestOmitWithCreate(t *testing.T) { + user := getPreparedUser("omit_user", "omit_with_create") + DB.Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Create(user) + + var queryuser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryuser, user.Id) + + if queryuser.Name == user.Name || queryuser.Age != user.Age { + t.Errorf("Should only create users with age column") + } + + if queryuser.BillingAddressID.Int64 != 0 || queryuser.ShippingAddressId == 0 || + queryuser.CreditCard.ID != 0 || len(queryuser.Emails) != 0 { + t.Errorf("Should not create omitted relationships") + } +} diff --git a/vendor/github.com/jinzhu/gorm/customize_column_test.go b/vendor/github.com/jinzhu/gorm/customize_column_test.go new file mode 100644 index 0000000..5e19d6f --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/customize_column_test.go @@ -0,0 +1,346 @@ +package gorm_test + +import ( + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +type CustomizeColumn struct { + ID int64 `gorm:"column:mapped_id; primary_key:yes"` + Name string `gorm:"column:mapped_name"` + Date *time.Time `gorm:"column:mapped_time"` +} + +// Make sure an ignored field does not interfere with another field's custom +// column name that matches the ignored field. +type CustomColumnAndIgnoredFieldClash struct { + Body string `sql:"-"` + RawBody string `gorm:"column:body"` +} + +func TestCustomizeColumn(t *testing.T) { + col := "mapped_name" + DB.DropTable(&CustomizeColumn{}) + DB.AutoMigrate(&CustomizeColumn{}) + + scope := DB.NewScope(&CustomizeColumn{}) + if !scope.Dialect().HasColumn(scope.TableName(), col) { + t.Errorf("CustomizeColumn should have column %s", col) + } + + col = "mapped_id" + if scope.PrimaryKey() != col { + t.Errorf("CustomizeColumn should have primary key %s, but got %q", col, scope.PrimaryKey()) + } + + expected := "foo" + now := time.Now() + cc := CustomizeColumn{ID: 666, Name: expected, Date: &now} + + if count := DB.Create(&cc).RowsAffected; count != 1 { + t.Error("There should be one record be affected when create record") + } + + var cc1 CustomizeColumn + DB.First(&cc1, 666) + + if cc1.Name != expected { + t.Errorf("Failed to query CustomizeColumn") + } + + cc.Name = "bar" + DB.Save(&cc) + + var cc2 CustomizeColumn + DB.First(&cc2, 666) + if cc2.Name != "bar" { + t.Errorf("Failed to query CustomizeColumn") + } +} + +func TestCustomColumnAndIgnoredFieldClash(t *testing.T) { + DB.DropTable(&CustomColumnAndIgnoredFieldClash{}) + if err := DB.AutoMigrate(&CustomColumnAndIgnoredFieldClash{}).Error; err != nil { + t.Errorf("Should not raise error: %s", err) + } +} + +type CustomizePerson struct { + IdPerson string `gorm:"column:idPerson;primary_key:true"` + Accounts []CustomizeAccount `gorm:"many2many:PersonAccount;associationforeignkey:idAccount;foreignkey:idPerson"` +} + +type CustomizeAccount struct { + IdAccount string `gorm:"column:idAccount;primary_key:true"` + Name string +} + +func TestManyToManyWithCustomizedColumn(t *testing.T) { + DB.DropTable(&CustomizePerson{}, &CustomizeAccount{}, "PersonAccount") + DB.AutoMigrate(&CustomizePerson{}, &CustomizeAccount{}) + + account := CustomizeAccount{IdAccount: "account", Name: "id1"} + person := CustomizePerson{ + IdPerson: "person", + Accounts: []CustomizeAccount{account}, + } + + if err := DB.Create(&account).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + if err := DB.Create(&person).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + var person1 CustomizePerson + scope := DB.NewScope(nil) + if err := DB.Preload("Accounts").First(&person1, scope.Quote("idPerson")+" = ?", person.IdPerson).Error; err != nil { + t.Errorf("no error should happen when preloading customized column many2many relations, but got %v", err) + } + + if len(person1.Accounts) != 1 || person1.Accounts[0].IdAccount != "account" { + t.Errorf("should preload correct accounts") + } +} + +type CustomizeUser struct { + gorm.Model + Email string `sql:"column:email_address"` +} + +type CustomizeInvitation struct { + gorm.Model + Address string `sql:"column:invitation"` + Person *CustomizeUser `gorm:"foreignkey:Email;associationforeignkey:invitation"` +} + +func TestOneToOneWithCustomizedColumn(t *testing.T) { + DB.DropTable(&CustomizeUser{}, &CustomizeInvitation{}) + DB.AutoMigrate(&CustomizeUser{}, &CustomizeInvitation{}) + + user := CustomizeUser{ + Email: "hello@example.com", + } + invitation := CustomizeInvitation{ + Address: "hello@example.com", + } + + DB.Create(&user) + DB.Create(&invitation) + + var invitation2 CustomizeInvitation + if err := DB.Preload("Person").Find(&invitation2, invitation.ID).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + if invitation2.Person.Email != user.Email { + t.Errorf("Should preload one to one relation with customize foreign keys") + } +} + +type PromotionDiscount struct { + gorm.Model + Name string + Coupons []*PromotionCoupon `gorm:"ForeignKey:discount_id"` + Rule *PromotionRule `gorm:"ForeignKey:discount_id"` + Benefits []PromotionBenefit `gorm:"ForeignKey:promotion_id"` +} + +type PromotionBenefit struct { + gorm.Model + Name string + PromotionID uint + Discount PromotionDiscount `gorm:"ForeignKey:promotion_id"` +} + +type PromotionCoupon struct { + gorm.Model + Code string + DiscountID uint + Discount PromotionDiscount +} + +type PromotionRule struct { + gorm.Model + Name string + Begin *time.Time + End *time.Time + DiscountID uint + Discount *PromotionDiscount +} + +func TestOneToManyWithCustomizedColumn(t *testing.T) { + DB.DropTable(&PromotionDiscount{}, &PromotionCoupon{}) + DB.AutoMigrate(&PromotionDiscount{}, &PromotionCoupon{}) + + discount := PromotionDiscount{ + Name: "Happy New Year", + Coupons: []*PromotionCoupon{ + {Code: "newyear1"}, + {Code: "newyear2"}, + }, + } + + if err := DB.Create(&discount).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + var discount1 PromotionDiscount + if err := DB.Preload("Coupons").First(&discount1, "id = ?", discount.ID).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if len(discount.Coupons) != 2 { + t.Errorf("should find two coupons") + } + + var coupon PromotionCoupon + if err := DB.Preload("Discount").First(&coupon, "code = ?", "newyear1").Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if coupon.Discount.Name != "Happy New Year" { + t.Errorf("should preload discount from coupon") + } +} + +func TestHasOneWithPartialCustomizedColumn(t *testing.T) { + DB.DropTable(&PromotionDiscount{}, &PromotionRule{}) + DB.AutoMigrate(&PromotionDiscount{}, &PromotionRule{}) + + var begin = time.Now() + var end = time.Now().Add(24 * time.Hour) + discount := PromotionDiscount{ + Name: "Happy New Year 2", + Rule: &PromotionRule{ + Name: "time_limited", + Begin: &begin, + End: &end, + }, + } + + if err := DB.Create(&discount).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + var discount1 PromotionDiscount + if err := DB.Preload("Rule").First(&discount1, "id = ?", discount.ID).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if discount.Rule.Begin.Format(time.RFC3339Nano) != begin.Format(time.RFC3339Nano) { + t.Errorf("Should be able to preload Rule") + } + + var rule PromotionRule + if err := DB.Preload("Discount").First(&rule, "name = ?", "time_limited").Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if rule.Discount.Name != "Happy New Year 2" { + t.Errorf("should preload discount from rule") + } +} + +func TestBelongsToWithPartialCustomizedColumn(t *testing.T) { + DB.DropTable(&PromotionDiscount{}, &PromotionBenefit{}) + DB.AutoMigrate(&PromotionDiscount{}, &PromotionBenefit{}) + + discount := PromotionDiscount{ + Name: "Happy New Year 3", + Benefits: []PromotionBenefit{ + {Name: "free cod"}, + {Name: "free shipping"}, + }, + } + + if err := DB.Create(&discount).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + var discount1 PromotionDiscount + if err := DB.Preload("Benefits").First(&discount1, "id = ?", discount.ID).Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if len(discount.Benefits) != 2 { + t.Errorf("should find two benefits") + } + + var benefit PromotionBenefit + if err := DB.Preload("Discount").First(&benefit, "name = ?", "free cod").Error; err != nil { + t.Errorf("no error should happen but got %v", err) + } + + if benefit.Discount.Name != "Happy New Year 3" { + t.Errorf("should preload discount from coupon") + } +} + +type SelfReferencingUser struct { + gorm.Model + Name string + Friends []*SelfReferencingUser `gorm:"many2many:UserFriends;association_jointable_foreignkey:friend_id"` +} + +func TestSelfReferencingMany2ManyColumn(t *testing.T) { + DB.DropTable(&SelfReferencingUser{}, "UserFriends") + DB.AutoMigrate(&SelfReferencingUser{}) + + friend1 := SelfReferencingUser{Name: "friend1_m2m"} + if err := DB.Create(&friend1).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + friend2 := SelfReferencingUser{Name: "friend2_m2m"} + if err := DB.Create(&friend2).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + user := SelfReferencingUser{ + Name: "self_m2m", + Friends: []*SelfReferencingUser{&friend1, &friend2}, + } + + if err := DB.Create(&user).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + if DB.Model(&user).Association("Friends").Count() != 2 { + t.Errorf("Should find created friends correctly") + } + + var newUser = SelfReferencingUser{} + + if err := DB.Preload("Friends").First(&newUser, "id = ?", user.ID).Error; err != nil { + t.Errorf("no error should happen, but got %v", err) + } + + if len(newUser.Friends) != 2 { + t.Errorf("Should preload created frineds for self reference m2m") + } + + DB.Model(&newUser).Association("Friends").Append(&SelfReferencingUser{Name: "friend3_m2m"}) + if DB.Model(&user).Association("Friends").Count() != 3 { + t.Errorf("Should find created friends correctly") + } + + DB.Model(&newUser).Association("Friends").Replace(&SelfReferencingUser{Name: "friend4_m2m"}) + if DB.Model(&user).Association("Friends").Count() != 1 { + t.Errorf("Should find created friends correctly") + } + + friend := SelfReferencingUser{} + DB.Model(&newUser).Association("Friends").Find(&friend) + if friend.Name != "friend4_m2m" { + t.Errorf("Should find created friends correctly") + } + + DB.Model(&newUser).Association("Friends").Delete(friend) + if DB.Model(&user).Association("Friends").Count() != 0 { + t.Errorf("All friends should be deleted") + } +} diff --git a/vendor/github.com/jinzhu/gorm/delete_test.go b/vendor/github.com/jinzhu/gorm/delete_test.go new file mode 100644 index 0000000..043641f --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/delete_test.go @@ -0,0 +1,91 @@ +package gorm_test + +import ( + "testing" + "time" +) + +func TestDelete(t *testing.T) { + user1, user2 := User{Name: "delete1"}, User{Name: "delete2"} + DB.Save(&user1) + DB.Save(&user2) + + if err := DB.Delete(&user1).Error; err != nil { + t.Errorf("No error should happen when delete a record, err=%s", err) + } + + if !DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } + + if DB.Where("name = ?", user2.Name).First(&User{}).RecordNotFound() { + t.Errorf("Other users that not deleted should be found-able") + } +} + +func TestInlineDelete(t *testing.T) { + user1, user2 := User{Name: "inline_delete1"}, User{Name: "inline_delete2"} + DB.Save(&user1) + DB.Save(&user2) + + if DB.Delete(&User{}, user1.Id).Error != nil { + t.Errorf("No error should happen when delete a record") + } else if !DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } + + if err := DB.Delete(&User{}, "name = ?", user2.Name).Error; err != nil { + t.Errorf("No error should happen when delete a record, err=%s", err) + } else if !DB.Where("name = ?", user2.Name).First(&User{}).RecordNotFound() { + t.Errorf("User can't be found after delete") + } +} + +func TestSoftDelete(t *testing.T) { + type User struct { + Id int64 + Name string + DeletedAt *time.Time + } + DB.AutoMigrate(&User{}) + + user := User{Name: "soft_delete"} + DB.Save(&user) + DB.Delete(&user) + + if DB.First(&User{}, "name = ?", user.Name).Error == nil { + t.Errorf("Can't find a soft deleted record") + } + + if err := DB.Unscoped().First(&User{}, "name = ?", user.Name).Error; err != nil { + t.Errorf("Should be able to find soft deleted record with Unscoped, but err=%s", err) + } + + DB.Unscoped().Delete(&user) + if !DB.Unscoped().First(&User{}, "name = ?", user.Name).RecordNotFound() { + t.Errorf("Can't find permanently deleted record") + } +} + +func TestSoftDeleteWithCustomizedDeletedAtColumnName(t *testing.T) { + creditCard := CreditCard{Number: "411111111234567"} + DB.Save(&creditCard) + DB.Delete(&creditCard) + + if deletedAtField, ok := DB.NewScope(&CreditCard{}).FieldByName("DeletedAt"); !ok || deletedAtField.DBName != "deleted_time" { + t.Errorf("CreditCard's DeletedAt's column name should be `deleted_time`") + } + + if DB.First(&CreditCard{}, "number = ?", creditCard.Number).Error == nil { + t.Errorf("Can't find a soft deleted record") + } + + if err := DB.Unscoped().First(&CreditCard{}, "number = ?", creditCard.Number).Error; err != nil { + t.Errorf("Should be able to find soft deleted record with Unscoped, but err=%s", err) + } + + DB.Unscoped().Delete(&creditCard) + if !DB.Unscoped().First(&CreditCard{}, "number = ?", creditCard.Number).RecordNotFound() { + t.Errorf("Can't find permanently deleted record") + } +} diff --git a/vendor/github.com/jinzhu/gorm/dialect.go b/vendor/github.com/jinzhu/gorm/dialect.go new file mode 100644 index 0000000..5f6439c --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialect.go @@ -0,0 +1,130 @@ +package gorm + +import ( + "database/sql" + "fmt" + "reflect" + "strconv" + "strings" +) + +// Dialect interface contains behaviors that differ across SQL database +type Dialect interface { + // GetName get dialect's name + GetName() string + + // SetDB set db for dialect + SetDB(db SQLCommon) + + // BindVar return the placeholder for actual values in SQL statements, in many dbs it is "?", Postgres using $1 + BindVar(i int) string + // Quote quotes field name to avoid SQL parsing exceptions by using a reserved word as a field name + Quote(key string) string + // DataTypeOf return data's sql type + DataTypeOf(field *StructField) string + + // HasIndex check has index or not + HasIndex(tableName string, indexName string) bool + // HasForeignKey check has foreign key or not + HasForeignKey(tableName string, foreignKeyName string) bool + // RemoveIndex remove index + RemoveIndex(tableName string, indexName string) error + // HasTable check has table or not + HasTable(tableName string) bool + // HasColumn check has column or not + HasColumn(tableName string, columnName string) bool + // ModifyColumn modify column's type + ModifyColumn(tableName string, columnName string, typ string) error + + // LimitAndOffsetSQL return generated SQL with Limit and Offset, as mssql has special case + LimitAndOffsetSQL(limit, offset interface{}) string + // SelectFromDummyTable return select values, for most dbs, `SELECT values` just works, mysql needs `SELECT value FROM DUAL` + SelectFromDummyTable() string + // LastInsertIdReturningSuffix most dbs support LastInsertId, but postgres needs to use `RETURNING` + LastInsertIDReturningSuffix(tableName, columnName string) string + // DefaultValueStr + DefaultValueStr() string + + // BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference + BuildKeyName(kind, tableName string, fields ...string) string + + // CurrentDatabase return current database name + CurrentDatabase() string +} + +var dialectsMap = map[string]Dialect{} + +func newDialect(name string, db SQLCommon) Dialect { + if value, ok := dialectsMap[name]; ok { + dialect := reflect.New(reflect.TypeOf(value).Elem()).Interface().(Dialect) + dialect.SetDB(db) + return dialect + } + + fmt.Printf("`%v` is not officially supported, running under compatibility mode.\n", name) + commontDialect := &commonDialect{} + commontDialect.SetDB(db) + return commontDialect +} + +// RegisterDialect register new dialect +func RegisterDialect(name string, dialect Dialect) { + dialectsMap[name] = dialect +} + +// ParseFieldStructForDialect get field's sql data type +var ParseFieldStructForDialect = func(field *StructField, dialect Dialect) (fieldValue reflect.Value, sqlType string, size int, additionalType string) { + // Get redirected field type + var ( + reflectType = field.Struct.Type + dataType = field.TagSettings["TYPE"] + ) + + for reflectType.Kind() == reflect.Ptr { + reflectType = reflectType.Elem() + } + + // Get redirected field value + fieldValue = reflect.Indirect(reflect.New(reflectType)) + + if gormDataType, ok := fieldValue.Interface().(interface { + GormDataType(Dialect) string + }); ok { + dataType = gormDataType.GormDataType(dialect) + } + + // Get scanner's real value + if dataType == "" { + var getScannerValue func(reflect.Value) + getScannerValue = func(value reflect.Value) { + fieldValue = value + if _, isScanner := reflect.New(fieldValue.Type()).Interface().(sql.Scanner); isScanner && fieldValue.Kind() == reflect.Struct { + getScannerValue(fieldValue.Field(0)) + } + } + getScannerValue(fieldValue) + } + + // Default Size + if num, ok := field.TagSettings["SIZE"]; ok { + size, _ = strconv.Atoi(num) + } else { + size = 255 + } + + // Default type from tag setting + additionalType = field.TagSettings["NOT NULL"] + " " + field.TagSettings["UNIQUE"] + if value, ok := field.TagSettings["DEFAULT"]; ok { + additionalType = additionalType + " DEFAULT " + value + } + + return fieldValue, dataType, size, strings.TrimSpace(additionalType) +} + +func currentDatabaseAndTable(dialect Dialect, tableName string) (string, string) { + if strings.Contains(tableName, ".") { + splitStrings := strings.SplitN(tableName, ".", 2) + return splitStrings[0], splitStrings[1] + } + return dialect.CurrentDatabase(), tableName +} diff --git a/vendor/github.com/jinzhu/gorm/dialect_common.go b/vendor/github.com/jinzhu/gorm/dialect_common.go new file mode 100644 index 0000000..b9f0c7d --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialect_common.go @@ -0,0 +1,176 @@ +package gorm + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +// DefaultForeignKeyNamer contains the default foreign key name generator method +type DefaultForeignKeyNamer struct { +} + +type commonDialect struct { + db SQLCommon + DefaultForeignKeyNamer +} + +func init() { + RegisterDialect("common", &commonDialect{}) +} + +func (commonDialect) GetName() string { + return "common" +} + +func (s *commonDialect) SetDB(db SQLCommon) { + s.db = db +} + +func (commonDialect) BindVar(i int) string { + return "$$$" // ? +} + +func (commonDialect) Quote(key string) string { + return fmt.Sprintf(`"%s"`, key) +} + +func (s *commonDialect) fieldCanAutoIncrement(field *StructField) bool { + if value, ok := field.TagSettings["AUTO_INCREMENT"]; ok { + return strings.ToLower(value) != "false" + } + return field.IsPrimaryKey +} + +func (s *commonDialect) DataTypeOf(field *StructField) string { + var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s) + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "BOOLEAN" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if s.fieldCanAutoIncrement(field) { + sqlType = "INTEGER AUTO_INCREMENT" + } else { + sqlType = "INTEGER" + } + case reflect.Int64, reflect.Uint64: + if s.fieldCanAutoIncrement(field) { + sqlType = "BIGINT AUTO_INCREMENT" + } else { + sqlType = "BIGINT" + } + case reflect.Float32, reflect.Float64: + sqlType = "FLOAT" + case reflect.String: + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("VARCHAR(%d)", size) + } else { + sqlType = "VARCHAR(65532)" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + sqlType = "TIMESTAMP" + } + default: + if _, ok := dataValue.Interface().([]byte); ok { + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("BINARY(%d)", size) + } else { + sqlType = "BINARY(65532)" + } + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for commonDialect", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func (s commonDialect) HasIndex(tableName string, indexName string) bool { + var count int + currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE table_schema = ? AND table_name = ? AND index_name = ?", currentDatabase, tableName, indexName).Scan(&count) + return count > 0 +} + +func (s commonDialect) RemoveIndex(tableName string, indexName string) error { + _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v", indexName)) + return err +} + +func (s commonDialect) HasForeignKey(tableName string, foreignKeyName string) bool { + return false +} + +func (s commonDialect) HasTable(tableName string) bool { + var count int + currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ? AND table_name = ?", currentDatabase, tableName).Scan(&count) + return count > 0 +} + +func (s commonDialect) HasColumn(tableName string, columnName string) bool { + var count int + currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ? AND column_name = ?", currentDatabase, tableName, columnName).Scan(&count) + return count > 0 +} + +func (s commonDialect) ModifyColumn(tableName string, columnName string, typ string) error { + _, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v ALTER COLUMN %v TYPE %v", tableName, columnName, typ)) + return err +} + +func (s commonDialect) CurrentDatabase() (name string) { + s.db.QueryRow("SELECT DATABASE()").Scan(&name) + return +} + +func (commonDialect) LimitAndOffsetSQL(limit, offset interface{}) (sql string) { + if limit != nil { + if parsedLimit, err := strconv.ParseInt(fmt.Sprint(limit), 0, 0); err == nil && parsedLimit >= 0 { + sql += fmt.Sprintf(" LIMIT %d", parsedLimit) + } + } + if offset != nil { + if parsedOffset, err := strconv.ParseInt(fmt.Sprint(offset), 0, 0); err == nil && parsedOffset >= 0 { + sql += fmt.Sprintf(" OFFSET %d", parsedOffset) + } + } + return +} + +func (commonDialect) SelectFromDummyTable() string { + return "" +} + +func (commonDialect) LastInsertIDReturningSuffix(tableName, columnName string) string { + return "" +} + +func (commonDialect) DefaultValueStr() string { + return "DEFAULT VALUES" +} + +// BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference +func (DefaultForeignKeyNamer) BuildKeyName(kind, tableName string, fields ...string) string { + keyName := fmt.Sprintf("%s_%s_%s", kind, tableName, strings.Join(fields, "_")) + keyName = regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString(keyName, "_") + return keyName +} + +// IsByteArrayOrSlice returns true of the reflected value is an array or slice +func IsByteArrayOrSlice(value reflect.Value) bool { + return (value.Kind() == reflect.Array || value.Kind() == reflect.Slice) && value.Type().Elem() == reflect.TypeOf(uint8(0)) +} diff --git a/vendor/github.com/jinzhu/gorm/dialect_mysql.go b/vendor/github.com/jinzhu/gorm/dialect_mysql.go new file mode 100644 index 0000000..b162bad --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialect_mysql.go @@ -0,0 +1,191 @@ +package gorm + +import ( + "crypto/sha1" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type mysql struct { + commonDialect +} + +func init() { + RegisterDialect("mysql", &mysql{}) +} + +func (mysql) GetName() string { + return "mysql" +} + +func (mysql) Quote(key string) string { + return fmt.Sprintf("`%s`", key) +} + +// Get Data Type for MySQL Dialect +func (s *mysql) DataTypeOf(field *StructField) string { + var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s) + + // MySQL allows only one auto increment column per table, and it must + // be a KEY column. + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok { + if _, ok = field.TagSettings["INDEX"]; !ok && !field.IsPrimaryKey { + delete(field.TagSettings, "AUTO_INCREMENT") + } + } + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "boolean" + case reflect.Int8: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "tinyint AUTO_INCREMENT" + } else { + sqlType = "tinyint" + } + case reflect.Int, reflect.Int16, reflect.Int32: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "int AUTO_INCREMENT" + } else { + sqlType = "int" + } + case reflect.Uint8: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "tinyint unsigned AUTO_INCREMENT" + } else { + sqlType = "tinyint unsigned" + } + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "int unsigned AUTO_INCREMENT" + } else { + sqlType = "int unsigned" + } + case reflect.Int64: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "bigint AUTO_INCREMENT" + } else { + sqlType = "bigint" + } + case reflect.Uint64: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "bigint unsigned AUTO_INCREMENT" + } else { + sqlType = "bigint unsigned" + } + case reflect.Float32, reflect.Float64: + sqlType = "double" + case reflect.String: + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("varchar(%d)", size) + } else { + sqlType = "longtext" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + precision := "" + if p, ok := field.TagSettings["PRECISION"]; ok { + precision = fmt.Sprintf("(%s)", p) + } + + if _, ok := field.TagSettings["NOT NULL"]; ok { + sqlType = fmt.Sprintf("timestamp%v", precision) + } else { + sqlType = fmt.Sprintf("timestamp%v NULL", precision) + } + } + default: + if IsByteArrayOrSlice(dataValue) { + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("varbinary(%d)", size) + } else { + sqlType = "longblob" + } + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for mysql", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func (s mysql) RemoveIndex(tableName string, indexName string) error { + _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v ON %v", indexName, s.Quote(tableName))) + return err +} + +func (s mysql) ModifyColumn(tableName string, columnName string, typ string) error { + _, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v MODIFY COLUMN %v %v", tableName, columnName, typ)) + return err +} + +func (s mysql) LimitAndOffsetSQL(limit, offset interface{}) (sql string) { + if limit != nil { + if parsedLimit, err := strconv.ParseInt(fmt.Sprint(limit), 0, 0); err == nil && parsedLimit >= 0 { + sql += fmt.Sprintf(" LIMIT %d", parsedLimit) + + if offset != nil { + if parsedOffset, err := strconv.ParseInt(fmt.Sprint(offset), 0, 0); err == nil && parsedOffset >= 0 { + sql += fmt.Sprintf(" OFFSET %d", parsedOffset) + } + } + } + } + return +} + +func (s mysql) HasForeignKey(tableName string, foreignKeyName string) bool { + var count int + currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_SCHEMA=? AND TABLE_NAME=? AND CONSTRAINT_NAME=? AND CONSTRAINT_TYPE='FOREIGN KEY'", currentDatabase, tableName, foreignKeyName).Scan(&count) + return count > 0 +} + +func (s mysql) CurrentDatabase() (name string) { + s.db.QueryRow("SELECT DATABASE()").Scan(&name) + return +} + +func (mysql) SelectFromDummyTable() string { + return "FROM DUAL" +} + +func (s mysql) BuildKeyName(kind, tableName string, fields ...string) string { + keyName := s.commonDialect.BuildKeyName(kind, tableName, fields...) + if utf8.RuneCountInString(keyName) <= 64 { + return keyName + } + h := sha1.New() + h.Write([]byte(keyName)) + bs := h.Sum(nil) + + // sha1 is 40 characters, keep first 24 characters of destination + destRunes := []rune(regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString(fields[0], "_")) + if len(destRunes) > 24 { + destRunes = destRunes[:24] + } + + return fmt.Sprintf("%s%x", string(destRunes), bs) +} + +func (mysql) DefaultValueStr() string { + return "VALUES()" +} diff --git a/vendor/github.com/jinzhu/gorm/dialect_postgres.go b/vendor/github.com/jinzhu/gorm/dialect_postgres.go new file mode 100644 index 0000000..c44c6a5 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialect_postgres.go @@ -0,0 +1,143 @@ +package gorm + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "time" +) + +type postgres struct { + commonDialect +} + +func init() { + RegisterDialect("postgres", &postgres{}) + RegisterDialect("cloudsqlpostgres", &postgres{}) +} + +func (postgres) GetName() string { + return "postgres" +} + +func (postgres) BindVar(i int) string { + return fmt.Sprintf("$%v", i) +} + +func (s *postgres) DataTypeOf(field *StructField) string { + var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s) + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uintptr: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "serial" + } else { + sqlType = "integer" + } + case reflect.Int64, reflect.Uint32, reflect.Uint64: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "bigserial" + } else { + sqlType = "bigint" + } + case reflect.Float32, reflect.Float64: + sqlType = "numeric" + case reflect.String: + if _, ok := field.TagSettings["SIZE"]; !ok { + size = 0 // if SIZE haven't been set, use `text` as the default type, as there are no performance different + } + + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("varchar(%d)", size) + } else { + sqlType = "text" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + sqlType = "timestamp with time zone" + } + case reflect.Map: + if dataValue.Type().Name() == "Hstore" { + sqlType = "hstore" + } + default: + if IsByteArrayOrSlice(dataValue) { + sqlType = "bytea" + + if isUUID(dataValue) { + sqlType = "uuid" + } + + if isJSON(dataValue) { + sqlType = "jsonb" + } + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for postgres", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func (s postgres) HasIndex(tableName string, indexName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM pg_indexes WHERE tablename = $1 AND indexname = $2 AND schemaname = CURRENT_SCHEMA()", tableName, indexName).Scan(&count) + return count > 0 +} + +func (s postgres) HasForeignKey(tableName string, foreignKeyName string) bool { + var count int + s.db.QueryRow("SELECT count(con.conname) FROM pg_constraint con WHERE $1::regclass::oid = con.conrelid AND con.conname = $2 AND con.contype='f'", tableName, foreignKeyName).Scan(&count) + return count > 0 +} + +func (s postgres) HasTable(tableName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = $1 AND table_type = 'BASE TABLE' AND table_schema = CURRENT_SCHEMA()", tableName).Scan(&count) + return count > 0 +} + +func (s postgres) HasColumn(tableName string, columnName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.columns WHERE table_name = $1 AND column_name = $2 AND table_schema = CURRENT_SCHEMA()", tableName, columnName).Scan(&count) + return count > 0 +} + +func (s postgres) CurrentDatabase() (name string) { + s.db.QueryRow("SELECT CURRENT_DATABASE()").Scan(&name) + return +} + +func (s postgres) LastInsertIDReturningSuffix(tableName, key string) string { + return fmt.Sprintf("RETURNING %v.%v", tableName, key) +} + +func (postgres) SupportLastInsertID() bool { + return false +} + +func isUUID(value reflect.Value) bool { + if value.Kind() != reflect.Array || value.Type().Len() != 16 { + return false + } + typename := value.Type().Name() + lower := strings.ToLower(typename) + return "uuid" == lower || "guid" == lower +} + +func isJSON(value reflect.Value) bool { + _, ok := value.Interface().(json.RawMessage) + return ok +} diff --git a/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go b/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go new file mode 100644 index 0000000..f26f6be --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialect_sqlite3.go @@ -0,0 +1,107 @@ +package gorm + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +type sqlite3 struct { + commonDialect +} + +func init() { + RegisterDialect("sqlite3", &sqlite3{}) +} + +func (sqlite3) GetName() string { + return "sqlite3" +} + +// Get Data Type for Sqlite Dialect +func (s *sqlite3) DataTypeOf(field *StructField) string { + var dataValue, sqlType, size, additionalType = ParseFieldStructForDialect(field, s) + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "bool" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "integer primary key autoincrement" + } else { + sqlType = "integer" + } + case reflect.Int64, reflect.Uint64: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "integer primary key autoincrement" + } else { + sqlType = "bigint" + } + case reflect.Float32, reflect.Float64: + sqlType = "real" + case reflect.String: + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("varchar(%d)", size) + } else { + sqlType = "text" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + sqlType = "datetime" + } + default: + if IsByteArrayOrSlice(dataValue) { + sqlType = "blob" + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for sqlite3", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func (s sqlite3) HasIndex(tableName string, indexName string) bool { + var count int + s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND sql LIKE '%%INDEX %v ON%%'", indexName), tableName).Scan(&count) + return count > 0 +} + +func (s sqlite3) HasTable(tableName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", tableName).Scan(&count) + return count > 0 +} + +func (s sqlite3) HasColumn(tableName string, columnName string) bool { + var count int + s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND (sql LIKE '%%\"%v\" %%' OR sql LIKE '%%%v %%');\n", columnName, columnName), tableName).Scan(&count) + return count > 0 +} + +func (s sqlite3) CurrentDatabase() (name string) { + var ( + ifaces = make([]interface{}, 3) + pointers = make([]*string, 3) + i int + ) + for i = 0; i < 3; i++ { + ifaces[i] = &pointers[i] + } + if err := s.db.QueryRow("PRAGMA database_list").Scan(ifaces...); err != nil { + return + } + if pointers[1] != nil { + name = *pointers[1] + } + return +} diff --git a/vendor/github.com/jinzhu/gorm/dialects/mssql/mssql.go b/vendor/github.com/jinzhu/gorm/dialects/mssql/mssql.go new file mode 100644 index 0000000..e060646 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialects/mssql/mssql.go @@ -0,0 +1,196 @@ +package mssql + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" + + _ "github.com/denisenkom/go-mssqldb" + "github.com/jinzhu/gorm" +) + +func setIdentityInsert(scope *gorm.Scope) { + if scope.Dialect().GetName() == "mssql" { + for _, field := range scope.PrimaryFields() { + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok && !field.IsBlank { + scope.NewDB().Exec(fmt.Sprintf("SET IDENTITY_INSERT %v ON", scope.TableName())) + scope.InstanceSet("mssql:identity_insert_on", true) + } + } + } +} + +func turnOffIdentityInsert(scope *gorm.Scope) { + if scope.Dialect().GetName() == "mssql" { + if _, ok := scope.InstanceGet("mssql:identity_insert_on"); ok { + scope.NewDB().Exec(fmt.Sprintf("SET IDENTITY_INSERT %v OFF", scope.TableName())) + } + } +} + +func init() { + gorm.DefaultCallback.Create().After("gorm:begin_transaction").Register("mssql:set_identity_insert", setIdentityInsert) + gorm.DefaultCallback.Create().Before("gorm:commit_or_rollback_transaction").Register("mssql:turn_off_identity_insert", turnOffIdentityInsert) + gorm.RegisterDialect("mssql", &mssql{}) +} + +type mssql struct { + db gorm.SQLCommon + gorm.DefaultForeignKeyNamer +} + +func (mssql) GetName() string { + return "mssql" +} + +func (s *mssql) SetDB(db gorm.SQLCommon) { + s.db = db +} + +func (mssql) BindVar(i int) string { + return "$$$" // ? +} + +func (mssql) Quote(key string) string { + return fmt.Sprintf(`[%s]`, key) +} + +func (s *mssql) DataTypeOf(field *gorm.StructField) string { + var dataValue, sqlType, size, additionalType = gorm.ParseFieldStructForDialect(field, s) + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "bit" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "int IDENTITY(1,1)" + } else { + sqlType = "int" + } + case reflect.Int64, reflect.Uint64: + if s.fieldCanAutoIncrement(field) { + field.TagSettings["AUTO_INCREMENT"] = "AUTO_INCREMENT" + sqlType = "bigint IDENTITY(1,1)" + } else { + sqlType = "bigint" + } + case reflect.Float32, reflect.Float64: + sqlType = "float" + case reflect.String: + if size > 0 && size < 8000 { + sqlType = fmt.Sprintf("nvarchar(%d)", size) + } else { + sqlType = "nvarchar(max)" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + sqlType = "datetimeoffset" + } + default: + if gorm.IsByteArrayOrSlice(dataValue) { + if size > 0 && size < 8000 { + sqlType = fmt.Sprintf("varbinary(%d)", size) + } else { + sqlType = "varbinary(max)" + } + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for mssql", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func (s mssql) fieldCanAutoIncrement(field *gorm.StructField) bool { + if value, ok := field.TagSettings["AUTO_INCREMENT"]; ok { + return value != "FALSE" + } + return field.IsPrimaryKey +} + +func (s mssql) HasIndex(tableName string, indexName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM sys.indexes WHERE name=? AND object_id=OBJECT_ID(?)", indexName, tableName).Scan(&count) + return count > 0 +} + +func (s mssql) RemoveIndex(tableName string, indexName string) error { + _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v ON %v", indexName, s.Quote(tableName))) + return err +} + +func (s mssql) HasForeignKey(tableName string, foreignKeyName string) bool { + return false +} + +func (s mssql) HasTable(tableName string) bool { + var count int + currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.tables WHERE table_name = ? AND table_catalog = ?", tableName, currentDatabase).Scan(&count) + return count > 0 +} + +func (s mssql) HasColumn(tableName string, columnName string) bool { + var count int + currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) + s.db.QueryRow("SELECT count(*) FROM information_schema.columns WHERE table_catalog = ? AND table_name = ? AND column_name = ?", currentDatabase, tableName, columnName).Scan(&count) + return count > 0 +} + +func (s mssql) ModifyColumn(tableName string, columnName string, typ string) error { + _, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v ALTER COLUMN %v %v", tableName, columnName, typ)) + return err +} + +func (s mssql) CurrentDatabase() (name string) { + s.db.QueryRow("SELECT DB_NAME() AS [Current Database]").Scan(&name) + return +} + +func (mssql) LimitAndOffsetSQL(limit, offset interface{}) (sql string) { + if offset != nil { + if parsedOffset, err := strconv.ParseInt(fmt.Sprint(offset), 0, 0); err == nil && parsedOffset >= 0 { + sql += fmt.Sprintf(" OFFSET %d ROWS", parsedOffset) + } + } + if limit != nil { + if parsedLimit, err := strconv.ParseInt(fmt.Sprint(limit), 0, 0); err == nil && parsedLimit >= 0 { + if sql == "" { + // add default zero offset + sql += " OFFSET 0 ROWS" + } + sql += fmt.Sprintf(" FETCH NEXT %d ROWS ONLY", parsedLimit) + } + } + return +} + +func (mssql) SelectFromDummyTable() string { + return "" +} + +func (mssql) LastInsertIDReturningSuffix(tableName, columnName string) string { + return "" +} + +func (mssql) DefaultValueStr() string { + return "DEFAULT VALUES" +} + +func currentDatabaseAndTable(dialect gorm.Dialect, tableName string) (string, string) { + if strings.Contains(tableName, ".") { + splitStrings := strings.SplitN(tableName, ".", 2) + return splitStrings[0], splitStrings[1] + } + return dialect.CurrentDatabase(), tableName +} diff --git a/vendor/github.com/jinzhu/gorm/dialects/mysql/mysql.go b/vendor/github.com/jinzhu/gorm/dialects/mysql/mysql.go new file mode 100644 index 0000000..9deba48 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialects/mysql/mysql.go @@ -0,0 +1,3 @@ +package mysql + +import _ "github.com/go-sql-driver/mysql" diff --git a/vendor/github.com/jinzhu/gorm/dialects/postgres/postgres.go b/vendor/github.com/jinzhu/gorm/dialects/postgres/postgres.go new file mode 100644 index 0000000..1d0dcb6 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialects/postgres/postgres.go @@ -0,0 +1,80 @@ +package postgres + +import ( + "database/sql" + "database/sql/driver" + + _ "github.com/lib/pq" + "github.com/lib/pq/hstore" + "encoding/json" + "errors" + "fmt" +) + +type Hstore map[string]*string + +// Value get value of Hstore +func (h Hstore) Value() (driver.Value, error) { + hstore := hstore.Hstore{Map: map[string]sql.NullString{}} + if len(h) == 0 { + return nil, nil + } + + for key, value := range h { + var s sql.NullString + if value != nil { + s.String = *value + s.Valid = true + } + hstore.Map[key] = s + } + return hstore.Value() +} + +// Scan scan value into Hstore +func (h *Hstore) Scan(value interface{}) error { + hstore := hstore.Hstore{} + + if err := hstore.Scan(value); err != nil { + return err + } + + if len(hstore.Map) == 0 { + return nil + } + + *h = Hstore{} + for k := range hstore.Map { + if hstore.Map[k].Valid { + s := hstore.Map[k].String + (*h)[k] = &s + } else { + (*h)[k] = nil + } + } + + return nil +} + +// Jsonb Postgresql's JSONB data type +type Jsonb struct { + json.RawMessage +} + +// Value get value of Jsonb +func (j Jsonb) Value() (driver.Value, error) { + if len(j.RawMessage) == 0 { + return nil, nil + } + return j.MarshalJSON() +} + +// Scan scan value into Jsonb +func (j *Jsonb) Scan(value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return errors.New(fmt.Sprint("Failed to unmarshal JSONB value:", value)) + } + + return json.Unmarshal(bytes, j) +} diff --git a/vendor/github.com/jinzhu/gorm/dialects/sqlite/sqlite.go b/vendor/github.com/jinzhu/gorm/dialects/sqlite/sqlite.go new file mode 100644 index 0000000..069ad3a --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/dialects/sqlite/sqlite.go @@ -0,0 +1,3 @@ +package sqlite + +import _ "github.com/mattn/go-sqlite3" diff --git a/vendor/github.com/jinzhu/gorm/docker-compose.yml b/vendor/github.com/jinzhu/gorm/docker-compose.yml new file mode 100644 index 0000000..79bf5fc --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/docker-compose.yml @@ -0,0 +1,30 @@ +version: '3' + +services: + mysql: + image: 'mysql:latest' + ports: + - 9910:3306 + environment: + - MYSQL_DATABASE=gorm + - MYSQL_USER=gorm + - MYSQL_PASSWORD=gorm + - MYSQL_RANDOM_ROOT_PASSWORD="yes" + postgres: + image: 'postgres:latest' + ports: + - 9920:5432 + environment: + - POSTGRES_USER=gorm + - POSTGRES_DB=gorm + - POSTGRES_PASSWORD=gorm + mssql: + image: 'mcmoe/mssqldocker:latest' + ports: + - 9930:1433 + environment: + - ACCEPT_EULA=Y + - SA_PASSWORD=LoremIpsum86 + - MSSQL_DB=gorm + - MSSQL_USER=gorm + - MSSQL_PASSWORD=LoremIpsum86 diff --git a/vendor/github.com/jinzhu/gorm/embedded_struct_test.go b/vendor/github.com/jinzhu/gorm/embedded_struct_test.go new file mode 100644 index 0000000..5f8ece5 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/embedded_struct_test.go @@ -0,0 +1,91 @@ +package gorm_test + +import "testing" + +type BasePost struct { + Id int64 + Title string + URL string +} + +type Author struct { + ID string + Name string + Email string +} + +type HNPost struct { + BasePost + Author `gorm:"embedded_prefix:user_"` // Embedded struct + Upvotes int32 +} + +type EngadgetPost struct { + BasePost BasePost `gorm:"embedded"` + Author Author `gorm:"embedded;embedded_prefix:author_"` // Embedded struct + ImageUrl string +} + +func TestPrefixColumnNameForEmbeddedStruct(t *testing.T) { + dialect := DB.NewScope(&EngadgetPost{}).Dialect() + engadgetPostScope := DB.NewScope(&EngadgetPost{}) + if !dialect.HasColumn(engadgetPostScope.TableName(), "author_id") || !dialect.HasColumn(engadgetPostScope.TableName(), "author_name") || !dialect.HasColumn(engadgetPostScope.TableName(), "author_email") { + t.Errorf("should has prefix for embedded columns") + } + + if len(engadgetPostScope.PrimaryFields()) != 1 { + t.Errorf("should have only one primary field with embedded struct, but got %v", len(engadgetPostScope.PrimaryFields())) + } + + hnScope := DB.NewScope(&HNPost{}) + if !dialect.HasColumn(hnScope.TableName(), "user_id") || !dialect.HasColumn(hnScope.TableName(), "user_name") || !dialect.HasColumn(hnScope.TableName(), "user_email") { + t.Errorf("should has prefix for embedded columns") + } +} + +func TestSaveAndQueryEmbeddedStruct(t *testing.T) { + DB.Save(&HNPost{BasePost: BasePost{Title: "news"}}) + DB.Save(&HNPost{BasePost: BasePost{Title: "hn_news"}}) + var news HNPost + if err := DB.First(&news, "title = ?", "hn_news").Error; err != nil { + t.Errorf("no error should happen when query with embedded struct, but got %v", err) + } else if news.Title != "hn_news" { + t.Errorf("embedded struct's value should be scanned correctly") + } + + DB.Save(&EngadgetPost{BasePost: BasePost{Title: "engadget_news"}}) + var egNews EngadgetPost + if err := DB.First(&egNews, "title = ?", "engadget_news").Error; err != nil { + t.Errorf("no error should happen when query with embedded struct, but got %v", err) + } else if egNews.BasePost.Title != "engadget_news" { + t.Errorf("embedded struct's value should be scanned correctly") + } + + if DB.NewScope(&HNPost{}).PrimaryField() == nil { + t.Errorf("primary key with embedded struct should works") + } + + for _, field := range DB.NewScope(&HNPost{}).Fields() { + if field.Name == "BasePost" { + t.Errorf("scope Fields should not contain embedded struct") + } + } +} + +func TestEmbeddedPointerTypeStruct(t *testing.T) { + type HNPost struct { + *BasePost + Upvotes int32 + } + + DB.Create(&HNPost{BasePost: &BasePost{Title: "embedded_pointer_type"}}) + + var hnPost HNPost + if err := DB.First(&hnPost, "title = ?", "embedded_pointer_type").Error; err != nil { + t.Errorf("No error should happen when find embedded pointer type, but got %v", err) + } + + if hnPost.Title != "embedded_pointer_type" { + t.Errorf("Should find correct value for embedded pointer type") + } +} diff --git a/vendor/github.com/jinzhu/gorm/errors.go b/vendor/github.com/jinzhu/gorm/errors.go new file mode 100644 index 0000000..da2cf13 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/errors.go @@ -0,0 +1,72 @@ +package gorm + +import ( + "errors" + "strings" +) + +var ( + // ErrRecordNotFound record not found error, happens when haven't find any matched data when looking up with a struct + ErrRecordNotFound = errors.New("record not found") + // ErrInvalidSQL invalid SQL error, happens when you passed invalid SQL + ErrInvalidSQL = errors.New("invalid SQL") + // ErrInvalidTransaction invalid transaction when you are trying to `Commit` or `Rollback` + ErrInvalidTransaction = errors.New("no valid transaction") + // ErrCantStartTransaction can't start transaction when you are trying to start one with `Begin` + ErrCantStartTransaction = errors.New("can't start transaction") + // ErrUnaddressable unaddressable value + ErrUnaddressable = errors.New("using unaddressable value") +) + +// Errors contains all happened errors +type Errors []error + +// IsRecordNotFoundError returns current error has record not found error or not +func IsRecordNotFoundError(err error) bool { + if errs, ok := err.(Errors); ok { + for _, err := range errs { + if err == ErrRecordNotFound { + return true + } + } + } + return err == ErrRecordNotFound +} + +// GetErrors gets all happened errors +func (errs Errors) GetErrors() []error { + return errs +} + +// Add adds an error +func (errs Errors) Add(newErrors ...error) Errors { + for _, err := range newErrors { + if err == nil { + continue + } + + if errors, ok := err.(Errors); ok { + errs = errs.Add(errors...) + } else { + ok = true + for _, e := range errs { + if err == e { + ok = false + } + } + if ok { + errs = append(errs, err) + } + } + } + return errs +} + +// Error format happened errors +func (errs Errors) Error() string { + var errors = []string{} + for _, e := range errs { + errors = append(errors, e.Error()) + } + return strings.Join(errors, "; ") +} diff --git a/vendor/github.com/jinzhu/gorm/errors_test.go b/vendor/github.com/jinzhu/gorm/errors_test.go new file mode 100644 index 0000000..9a428de --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/errors_test.go @@ -0,0 +1,20 @@ +package gorm_test + +import ( + "errors" + "testing" + + "github.com/jinzhu/gorm" +) + +func TestErrorsCanBeUsedOutsideGorm(t *testing.T) { + errs := []error{errors.New("First"), errors.New("Second")} + + gErrs := gorm.Errors(errs) + gErrs = gErrs.Add(errors.New("Third")) + gErrs = gErrs.Add(gErrs) + + if gErrs.Error() != "First; Second; Third" { + t.Fatalf("Gave wrong error, got %s", gErrs.Error()) + } +} diff --git a/vendor/github.com/jinzhu/gorm/field.go b/vendor/github.com/jinzhu/gorm/field.go new file mode 100644 index 0000000..11c410b --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/field.go @@ -0,0 +1,58 @@ +package gorm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" +) + +// Field model field definition +type Field struct { + *StructField + IsBlank bool + Field reflect.Value +} + +// Set set a value to the field +func (field *Field) Set(value interface{}) (err error) { + if !field.Field.IsValid() { + return errors.New("field value not valid") + } + + if !field.Field.CanAddr() { + return ErrUnaddressable + } + + reflectValue, ok := value.(reflect.Value) + if !ok { + reflectValue = reflect.ValueOf(value) + } + + fieldValue := field.Field + if reflectValue.IsValid() { + if reflectValue.Type().ConvertibleTo(fieldValue.Type()) { + fieldValue.Set(reflectValue.Convert(fieldValue.Type())) + } else { + if fieldValue.Kind() == reflect.Ptr { + if fieldValue.IsNil() { + fieldValue.Set(reflect.New(field.Struct.Type.Elem())) + } + fieldValue = fieldValue.Elem() + } + + if reflectValue.Type().ConvertibleTo(fieldValue.Type()) { + fieldValue.Set(reflectValue.Convert(fieldValue.Type())) + } else if scanner, ok := fieldValue.Addr().Interface().(sql.Scanner); ok { + err = scanner.Scan(reflectValue.Interface()) + } else { + err = fmt.Errorf("could not convert argument of field %s from %s to %s", field.Name, reflectValue.Type(), fieldValue.Type()) + } + } + } else { + field.Field.Set(reflect.Zero(field.Field.Type())) + } + + field.IsBlank = isBlank(field.Field) + return err +} diff --git a/vendor/github.com/jinzhu/gorm/field_test.go b/vendor/github.com/jinzhu/gorm/field_test.go new file mode 100644 index 0000000..30e9a77 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/field_test.go @@ -0,0 +1,49 @@ +package gorm_test + +import ( + "testing" + + "github.com/jinzhu/gorm" +) + +type CalculateField struct { + gorm.Model + Name string + Children []CalculateFieldChild + Category CalculateFieldCategory + EmbeddedField +} + +type EmbeddedField struct { + EmbeddedName string `sql:"NOT NULL;DEFAULT:'hello'"` +} + +type CalculateFieldChild struct { + gorm.Model + CalculateFieldID uint + Name string +} + +type CalculateFieldCategory struct { + gorm.Model + CalculateFieldID uint + Name string +} + +func TestCalculateField(t *testing.T) { + var field CalculateField + var scope = DB.NewScope(&field) + if field, ok := scope.FieldByName("Children"); !ok || field.Relationship == nil { + t.Errorf("Should calculate fields correctly for the first time") + } + + if field, ok := scope.FieldByName("Category"); !ok || field.Relationship == nil { + t.Errorf("Should calculate fields correctly for the first time") + } + + if field, ok := scope.FieldByName("embedded_name"); !ok { + t.Errorf("should find embedded field") + } else if _, ok := field.TagSettings["NOT NULL"]; !ok { + t.Errorf("should find embedded field's tag settings") + } +} diff --git a/vendor/github.com/jinzhu/gorm/interface.go b/vendor/github.com/jinzhu/gorm/interface.go new file mode 100644 index 0000000..55128f7 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/interface.go @@ -0,0 +1,20 @@ +package gorm + +import "database/sql" + +// SQLCommon is the minimal database connection functionality gorm requires. Implemented by *sql.DB. +type SQLCommon interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Prepare(query string) (*sql.Stmt, error) + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row +} + +type sqlDb interface { + Begin() (*sql.Tx, error) +} + +type sqlTx interface { + Commit() error + Rollback() error +} diff --git a/vendor/github.com/jinzhu/gorm/join_table_handler.go b/vendor/github.com/jinzhu/gorm/join_table_handler.go new file mode 100644 index 0000000..a036d46 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/join_table_handler.go @@ -0,0 +1,211 @@ +package gorm + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +// JoinTableHandlerInterface is an interface for how to handle many2many relations +type JoinTableHandlerInterface interface { + // initialize join table handler + Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type) + // Table return join table's table name + Table(db *DB) string + // Add create relationship in join table for source and destination + Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error + // Delete delete relationship in join table for sources + Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error + // JoinWith query with `Join` conditions + JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB + // SourceForeignKeys return source foreign keys + SourceForeignKeys() []JoinTableForeignKey + // DestinationForeignKeys return destination foreign keys + DestinationForeignKeys() []JoinTableForeignKey +} + +// JoinTableForeignKey join table foreign key struct +type JoinTableForeignKey struct { + DBName string + AssociationDBName string +} + +// JoinTableSource is a struct that contains model type and foreign keys +type JoinTableSource struct { + ModelType reflect.Type + ForeignKeys []JoinTableForeignKey +} + +// JoinTableHandler default join table handler +type JoinTableHandler struct { + TableName string `sql:"-"` + Source JoinTableSource `sql:"-"` + Destination JoinTableSource `sql:"-"` +} + +// SourceForeignKeys return source foreign keys +func (s *JoinTableHandler) SourceForeignKeys() []JoinTableForeignKey { + return s.Source.ForeignKeys +} + +// DestinationForeignKeys return destination foreign keys +func (s *JoinTableHandler) DestinationForeignKeys() []JoinTableForeignKey { + return s.Destination.ForeignKeys +} + +// Setup initialize a default join table handler +func (s *JoinTableHandler) Setup(relationship *Relationship, tableName string, source reflect.Type, destination reflect.Type) { + s.TableName = tableName + + s.Source = JoinTableSource{ModelType: source} + s.Source.ForeignKeys = []JoinTableForeignKey{} + for idx, dbName := range relationship.ForeignFieldNames { + s.Source.ForeignKeys = append(s.Source.ForeignKeys, JoinTableForeignKey{ + DBName: relationship.ForeignDBNames[idx], + AssociationDBName: dbName, + }) + } + + s.Destination = JoinTableSource{ModelType: destination} + s.Destination.ForeignKeys = []JoinTableForeignKey{} + for idx, dbName := range relationship.AssociationForeignFieldNames { + s.Destination.ForeignKeys = append(s.Destination.ForeignKeys, JoinTableForeignKey{ + DBName: relationship.AssociationForeignDBNames[idx], + AssociationDBName: dbName, + }) + } +} + +// Table return join table's table name +func (s JoinTableHandler) Table(db *DB) string { + return DefaultTableNameHandler(db, s.TableName) +} + +func (s JoinTableHandler) updateConditionMap(conditionMap map[string]interface{}, db *DB, joinTableSources []JoinTableSource, sources ...interface{}) { + for _, source := range sources { + scope := db.NewScope(source) + modelType := scope.GetModelStruct().ModelType + + for _, joinTableSource := range joinTableSources { + if joinTableSource.ModelType == modelType { + for _, foreignKey := range joinTableSource.ForeignKeys { + if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok { + conditionMap[foreignKey.DBName] = field.Field.Interface() + } + } + break + } + } + } +} + +// Add create relationship in join table for source and destination +func (s JoinTableHandler) Add(handler JoinTableHandlerInterface, db *DB, source interface{}, destination interface{}) error { + var ( + scope = db.NewScope("") + conditionMap = map[string]interface{}{} + ) + + // Update condition map for source + s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source}, source) + + // Update condition map for destination + s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Destination}, destination) + + var assignColumns, binVars, conditions []string + var values []interface{} + for key, value := range conditionMap { + assignColumns = append(assignColumns, scope.Quote(key)) + binVars = append(binVars, `?`) + conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key))) + values = append(values, value) + } + + for _, value := range values { + values = append(values, value) + } + + quotedTable := scope.Quote(handler.Table(db)) + sql := fmt.Sprintf( + "INSERT INTO %v (%v) SELECT %v %v WHERE NOT EXISTS (SELECT * FROM %v WHERE %v)", + quotedTable, + strings.Join(assignColumns, ","), + strings.Join(binVars, ","), + scope.Dialect().SelectFromDummyTable(), + quotedTable, + strings.Join(conditions, " AND "), + ) + + return db.Exec(sql, values...).Error +} + +// Delete delete relationship in join table for sources +func (s JoinTableHandler) Delete(handler JoinTableHandlerInterface, db *DB, sources ...interface{}) error { + var ( + scope = db.NewScope(nil) + conditions []string + values []interface{} + conditionMap = map[string]interface{}{} + ) + + s.updateConditionMap(conditionMap, db, []JoinTableSource{s.Source, s.Destination}, sources...) + + for key, value := range conditionMap { + conditions = append(conditions, fmt.Sprintf("%v = ?", scope.Quote(key))) + values = append(values, value) + } + + return db.Table(handler.Table(db)).Where(strings.Join(conditions, " AND "), values...).Delete("").Error +} + +// JoinWith query with `Join` conditions +func (s JoinTableHandler) JoinWith(handler JoinTableHandlerInterface, db *DB, source interface{}) *DB { + var ( + scope = db.NewScope(source) + tableName = handler.Table(db) + quotedTableName = scope.Quote(tableName) + joinConditions []string + values []interface{} + ) + + if s.Source.ModelType == scope.GetModelStruct().ModelType { + destinationTableName := db.NewScope(reflect.New(s.Destination.ModelType).Interface()).QuotedTableName() + for _, foreignKey := range s.Destination.ForeignKeys { + joinConditions = append(joinConditions, fmt.Sprintf("%v.%v = %v.%v", quotedTableName, scope.Quote(foreignKey.DBName), destinationTableName, scope.Quote(foreignKey.AssociationDBName))) + } + + var foreignDBNames []string + var foreignFieldNames []string + + for _, foreignKey := range s.Source.ForeignKeys { + foreignDBNames = append(foreignDBNames, foreignKey.DBName) + if field, ok := scope.FieldByName(foreignKey.AssociationDBName); ok { + foreignFieldNames = append(foreignFieldNames, field.Name) + } + } + + foreignFieldValues := scope.getColumnAsArray(foreignFieldNames, scope.Value) + + var condString string + if len(foreignFieldValues) > 0 { + var quotedForeignDBNames []string + for _, dbName := range foreignDBNames { + quotedForeignDBNames = append(quotedForeignDBNames, tableName+"."+dbName) + } + + condString = fmt.Sprintf("%v IN (%v)", toQueryCondition(scope, quotedForeignDBNames), toQueryMarks(foreignFieldValues)) + + keys := scope.getColumnAsArray(foreignFieldNames, scope.Value) + values = append(values, toQueryValues(keys)) + } else { + condString = fmt.Sprintf("1 <> 1") + } + + return db.Joins(fmt.Sprintf("INNER JOIN %v ON %v", quotedTableName, strings.Join(joinConditions, " AND "))). + Where(condString, toQueryValues(foreignFieldValues)...) + } + + db.Error = errors.New("wrong source type for join table handler") + return db +} diff --git a/vendor/github.com/jinzhu/gorm/join_table_test.go b/vendor/github.com/jinzhu/gorm/join_table_test.go new file mode 100644 index 0000000..6d5f427 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/join_table_test.go @@ -0,0 +1,117 @@ +package gorm_test + +import ( + "fmt" + "strconv" + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +type Person struct { + Id int + Name string + Addresses []*Address `gorm:"many2many:person_addresses;"` +} + +type PersonAddress struct { + gorm.JoinTableHandler + PersonID int + AddressID int + DeletedAt *time.Time + CreatedAt time.Time +} + +func (*PersonAddress) Add(handler gorm.JoinTableHandlerInterface, db *gorm.DB, foreignValue interface{}, associationValue interface{}) error { + foreignPrimaryKey, _ := strconv.Atoi(fmt.Sprint(db.NewScope(foreignValue).PrimaryKeyValue())) + associationPrimaryKey, _ := strconv.Atoi(fmt.Sprint(db.NewScope(associationValue).PrimaryKeyValue())) + if result := db.Unscoped().Model(&PersonAddress{}).Where(map[string]interface{}{ + "person_id": foreignPrimaryKey, + "address_id": associationPrimaryKey, + }).Update(map[string]interface{}{ + "person_id": foreignPrimaryKey, + "address_id": associationPrimaryKey, + "deleted_at": gorm.Expr("NULL"), + }).RowsAffected; result == 0 { + return db.Create(&PersonAddress{ + PersonID: foreignPrimaryKey, + AddressID: associationPrimaryKey, + }).Error + } + + return nil +} + +func (*PersonAddress) Delete(handler gorm.JoinTableHandlerInterface, db *gorm.DB, sources ...interface{}) error { + return db.Delete(&PersonAddress{}).Error +} + +func (pa *PersonAddress) JoinWith(handler gorm.JoinTableHandlerInterface, db *gorm.DB, source interface{}) *gorm.DB { + table := pa.Table(db) + return db.Joins("INNER JOIN person_addresses ON person_addresses.address_id = addresses.id").Where(fmt.Sprintf("%v.deleted_at IS NULL OR %v.deleted_at <= '0001-01-02'", table, table)) +} + +func TestJoinTable(t *testing.T) { + DB.Exec("drop table person_addresses;") + DB.AutoMigrate(&Person{}) + DB.SetJoinTableHandler(&Person{}, "Addresses", &PersonAddress{}) + + address1 := &Address{Address1: "address 1"} + address2 := &Address{Address1: "address 2"} + person := &Person{Name: "person", Addresses: []*Address{address1, address2}} + DB.Save(person) + + DB.Model(person).Association("Addresses").Delete(address1) + + if DB.Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 1 { + t.Errorf("Should found one address") + } + + if DB.Model(person).Association("Addresses").Count() != 1 { + t.Errorf("Should found one address") + } + + if DB.Unscoped().Find(&[]PersonAddress{}, "person_id = ?", person.Id).RowsAffected != 2 { + t.Errorf("Found two addresses with Unscoped") + } + + if DB.Model(person).Association("Addresses").Clear(); DB.Model(person).Association("Addresses").Count() != 0 { + t.Errorf("Should deleted all addresses") + } +} + +func TestEmbeddedMany2ManyRelationship(t *testing.T) { + type EmbeddedPerson struct { + ID int + Name string + Addresses []*Address `gorm:"many2many:person_addresses;"` + } + + type NewPerson struct { + EmbeddedPerson + ExternalID uint + } + DB.Exec("drop table person_addresses;") + DB.AutoMigrate(&NewPerson{}) + + address1 := &Address{Address1: "address 1"} + address2 := &Address{Address1: "address 2"} + person := &NewPerson{ExternalID: 100, EmbeddedPerson: EmbeddedPerson{Name: "person", Addresses: []*Address{address1, address2}}} + if err := DB.Save(person).Error; err != nil { + t.Errorf("no error should return when save embedded many2many relationship, but got %v", err) + } + + if err := DB.Model(person).Association("Addresses").Delete(address1).Error; err != nil { + t.Errorf("no error should return when delete embedded many2many relationship, but got %v", err) + } + + association := DB.Model(person).Association("Addresses") + if count := association.Count(); count != 1 || association.Error != nil { + t.Errorf("Should found one address, but got %v, error is %v", count, association.Error) + } + + if association.Clear(); association.Count() != 0 { + t.Errorf("Should deleted all addresses") + } +} diff --git a/vendor/github.com/jinzhu/gorm/logger.go b/vendor/github.com/jinzhu/gorm/logger.go new file mode 100644 index 0000000..4324a2e --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/logger.go @@ -0,0 +1,119 @@ +package gorm + +import ( + "database/sql/driver" + "fmt" + "log" + "os" + "reflect" + "regexp" + "strconv" + "time" + "unicode" +) + +var ( + defaultLogger = Logger{log.New(os.Stdout, "\r\n", 0)} + sqlRegexp = regexp.MustCompile(`\?`) + numericPlaceHolderRegexp = regexp.MustCompile(`\$\d+`) +) + +func isPrintable(s string) bool { + for _, r := range s { + if !unicode.IsPrint(r) { + return false + } + } + return true +} + +var LogFormatter = func(values ...interface{}) (messages []interface{}) { + if len(values) > 1 { + var ( + sql string + formattedValues []string + level = values[0] + currentTime = "\n\033[33m[" + NowFunc().Format("2006-01-02 15:04:05") + "]\033[0m" + source = fmt.Sprintf("\033[35m(%v)\033[0m", values[1]) + ) + + messages = []interface{}{source, currentTime} + + if level == "sql" { + // duration + messages = append(messages, fmt.Sprintf(" \033[36;1m[%.2fms]\033[0m ", float64(values[2].(time.Duration).Nanoseconds()/1e4)/100.0)) + // sql + + for _, value := range values[4].([]interface{}) { + indirectValue := reflect.Indirect(reflect.ValueOf(value)) + if indirectValue.IsValid() { + value = indirectValue.Interface() + if t, ok := value.(time.Time); ok { + formattedValues = append(formattedValues, fmt.Sprintf("'%v'", t.Format("2006-01-02 15:04:05"))) + } else if b, ok := value.([]byte); ok { + if str := string(b); isPrintable(str) { + formattedValues = append(formattedValues, fmt.Sprintf("'%v'", str)) + } else { + formattedValues = append(formattedValues, "''") + } + } else if r, ok := value.(driver.Valuer); ok { + if value, err := r.Value(); err == nil && value != nil { + formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value)) + } else { + formattedValues = append(formattedValues, "NULL") + } + } else { + formattedValues = append(formattedValues, fmt.Sprintf("'%v'", value)) + } + } else { + formattedValues = append(formattedValues, "NULL") + } + } + + // differentiate between $n placeholders or else treat like ? + if numericPlaceHolderRegexp.MatchString(values[3].(string)) { + sql = values[3].(string) + for index, value := range formattedValues { + placeholder := fmt.Sprintf(`\$%d([^\d]|$)`, index+1) + sql = regexp.MustCompile(placeholder).ReplaceAllString(sql, value+"$1") + } + } else { + formattedValuesLength := len(formattedValues) + for index, value := range sqlRegexp.Split(values[3].(string), -1) { + sql += value + if index < formattedValuesLength { + sql += formattedValues[index] + } + } + } + + messages = append(messages, sql) + messages = append(messages, fmt.Sprintf(" \n\033[36;31m[%v]\033[0m ", strconv.FormatInt(values[5].(int64), 10)+" rows affected or returned ")) + } else { + messages = append(messages, "\033[31;1m") + messages = append(messages, values[2:]...) + messages = append(messages, "\033[0m") + } + } + + return +} + +type logger interface { + Print(v ...interface{}) +} + +// LogWriter log writer interface +type LogWriter interface { + Println(v ...interface{}) +} + +// Logger default logger +type Logger struct { + LogWriter +} + +// Print format & print log +func (logger Logger) Print(values ...interface{}) { + logger.Println(LogFormatter(values...)...) +} diff --git a/vendor/github.com/jinzhu/gorm/main.go b/vendor/github.com/jinzhu/gorm/main.go new file mode 100644 index 0000000..c26e05c --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/main.go @@ -0,0 +1,777 @@ +package gorm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" + "strings" + "time" +) + +// DB contains information for current db connection +type DB struct { + Value interface{} + Error error + RowsAffected int64 + + // single db + db SQLCommon + blockGlobalUpdate bool + logMode int + logger logger + search *search + values map[string]interface{} + + // global db + parent *DB + callbacks *Callback + dialect Dialect + singularTable bool +} + +// Open initialize a new db connection, need to import driver first, e.g: +// +// import _ "github.com/go-sql-driver/mysql" +// func main() { +// db, err := gorm.Open("mysql", "user:password@/dbname?charset=utf8&parseTime=True&loc=Local") +// } +// GORM has wrapped some drivers, for easier to remember driver's import path, so you could import the mysql driver with +// import _ "github.com/jinzhu/gorm/dialects/mysql" +// // import _ "github.com/jinzhu/gorm/dialects/postgres" +// // import _ "github.com/jinzhu/gorm/dialects/sqlite" +// // import _ "github.com/jinzhu/gorm/dialects/mssql" +func Open(dialect string, args ...interface{}) (db *DB, err error) { + if len(args) == 0 { + err = errors.New("invalid database source") + return nil, err + } + var source string + var dbSQL SQLCommon + + switch value := args[0].(type) { + case string: + var driver = dialect + if len(args) == 1 { + source = value + } else if len(args) >= 2 { + driver = value + source = args[1].(string) + } + dbSQL, err = sql.Open(driver, source) + case SQLCommon: + dbSQL = value + } + + db = &DB{ + db: dbSQL, + logger: defaultLogger, + values: map[string]interface{}{}, + callbacks: DefaultCallback, + dialect: newDialect(dialect, dbSQL), + } + db.parent = db + if err != nil { + return + } + // Send a ping to make sure the database connection is alive. + if d, ok := dbSQL.(*sql.DB); ok { + if err = d.Ping(); err != nil { + d.Close() + } + } + return +} + +// New clone a new db connection without search conditions +func (s *DB) New() *DB { + clone := s.clone() + clone.search = nil + clone.Value = nil + return clone +} + +type closer interface { + Close() error +} + +// Close close current db connection. If database connection is not an io.Closer, returns an error. +func (s *DB) Close() error { + if db, ok := s.parent.db.(closer); ok { + return db.Close() + } + return errors.New("can't close current db") +} + +// DB get `*sql.DB` from current connection +// If the underlying database connection is not a *sql.DB, returns nil +func (s *DB) DB() *sql.DB { + db, _ := s.db.(*sql.DB) + return db +} + +// CommonDB return the underlying `*sql.DB` or `*sql.Tx` instance, mainly intended to allow coexistence with legacy non-GORM code. +func (s *DB) CommonDB() SQLCommon { + return s.db +} + +// Dialect get dialect +func (s *DB) Dialect() Dialect { + return s.parent.dialect +} + +// Callback return `Callbacks` container, you could add/change/delete callbacks with it +// db.Callback().Create().Register("update_created_at", updateCreated) +// Refer https://jinzhu.github.io/gorm/development.html#callbacks +func (s *DB) Callback() *Callback { + s.parent.callbacks = s.parent.callbacks.clone() + return s.parent.callbacks +} + +// SetLogger replace default logger +func (s *DB) SetLogger(log logger) { + s.logger = log +} + +// LogMode set log mode, `true` for detailed logs, `false` for no log, default, will only print error logs +func (s *DB) LogMode(enable bool) *DB { + if enable { + s.logMode = 2 + } else { + s.logMode = 1 + } + return s +} + +// BlockGlobalUpdate if true, generates an error on update/delete without where clause. +// This is to prevent eventual error with empty objects updates/deletions +func (s *DB) BlockGlobalUpdate(enable bool) *DB { + s.blockGlobalUpdate = enable + return s +} + +// HasBlockGlobalUpdate return state of block +func (s *DB) HasBlockGlobalUpdate() bool { + return s.blockGlobalUpdate +} + +// SingularTable use singular table by default +func (s *DB) SingularTable(enable bool) { + modelStructsMap = newModelStructsMap() + s.parent.singularTable = enable +} + +// NewScope create a scope for current operation +func (s *DB) NewScope(value interface{}) *Scope { + dbClone := s.clone() + dbClone.Value = value + return &Scope{db: dbClone, Search: dbClone.search.clone(), Value: value} +} + +// QueryExpr returns the query as expr object +func (s *DB) QueryExpr() *expr { + scope := s.NewScope(s.Value) + scope.InstanceSet("skip_bindvar", true) + scope.prepareQuerySQL() + + return Expr(scope.SQL, scope.SQLVars...) +} + +// SubQuery returns the query as sub query +func (s *DB) SubQuery() *expr { + scope := s.NewScope(s.Value) + scope.InstanceSet("skip_bindvar", true) + scope.prepareQuerySQL() + + return Expr(fmt.Sprintf("(%v)", scope.SQL), scope.SQLVars...) +} + +// Where return a new relation, filter records with given conditions, accepts `map`, `struct` or `string` as conditions, refer http://jinzhu.github.io/gorm/crud.html#query +func (s *DB) Where(query interface{}, args ...interface{}) *DB { + return s.clone().search.Where(query, args...).db +} + +// Or filter records that match before conditions or this one, similar to `Where` +func (s *DB) Or(query interface{}, args ...interface{}) *DB { + return s.clone().search.Or(query, args...).db +} + +// Not filter records that don't match current conditions, similar to `Where` +func (s *DB) Not(query interface{}, args ...interface{}) *DB { + return s.clone().search.Not(query, args...).db +} + +// Limit specify the number of records to be retrieved +func (s *DB) Limit(limit interface{}) *DB { + return s.clone().search.Limit(limit).db +} + +// Offset specify the number of records to skip before starting to return the records +func (s *DB) Offset(offset interface{}) *DB { + return s.clone().search.Offset(offset).db +} + +// Order specify order when retrieve records from database, set reorder to `true` to overwrite defined conditions +// db.Order("name DESC") +// db.Order("name DESC", true) // reorder +// db.Order(gorm.Expr("name = ? DESC", "first")) // sql expression +func (s *DB) Order(value interface{}, reorder ...bool) *DB { + return s.clone().search.Order(value, reorder...).db +} + +// Select specify fields that you want to retrieve from database when querying, by default, will select all fields; +// When creating/updating, specify fields that you want to save to database +func (s *DB) Select(query interface{}, args ...interface{}) *DB { + return s.clone().search.Select(query, args...).db +} + +// Omit specify fields that you want to ignore when saving to database for creating, updating +func (s *DB) Omit(columns ...string) *DB { + return s.clone().search.Omit(columns...).db +} + +// Group specify the group method on the find +func (s *DB) Group(query string) *DB { + return s.clone().search.Group(query).db +} + +// Having specify HAVING conditions for GROUP BY +func (s *DB) Having(query interface{}, values ...interface{}) *DB { + return s.clone().search.Having(query, values...).db +} + +// Joins specify Joins conditions +// db.Joins("JOIN emails ON emails.user_id = users.id AND emails.email = ?", "jinzhu@example.org").Find(&user) +func (s *DB) Joins(query string, args ...interface{}) *DB { + return s.clone().search.Joins(query, args...).db +} + +// Scopes pass current database connection to arguments `func(*DB) *DB`, which could be used to add conditions dynamically +// func AmountGreaterThan1000(db *gorm.DB) *gorm.DB { +// return db.Where("amount > ?", 1000) +// } +// +// func OrderStatus(status []string) func (db *gorm.DB) *gorm.DB { +// return func (db *gorm.DB) *gorm.DB { +// return db.Scopes(AmountGreaterThan1000).Where("status in (?)", status) +// } +// } +// +// db.Scopes(AmountGreaterThan1000, OrderStatus([]string{"paid", "shipped"})).Find(&orders) +// Refer https://jinzhu.github.io/gorm/crud.html#scopes +func (s *DB) Scopes(funcs ...func(*DB) *DB) *DB { + for _, f := range funcs { + s = f(s) + } + return s +} + +// Unscoped return all record including deleted record, refer Soft Delete https://jinzhu.github.io/gorm/crud.html#soft-delete +func (s *DB) Unscoped() *DB { + return s.clone().search.unscoped().db +} + +// Attrs initialize struct with argument if record not found with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate +func (s *DB) Attrs(attrs ...interface{}) *DB { + return s.clone().search.Attrs(attrs...).db +} + +// Assign assign result with argument regardless it is found or not with `FirstOrInit` https://jinzhu.github.io/gorm/crud.html#firstorinit or `FirstOrCreate` https://jinzhu.github.io/gorm/crud.html#firstorcreate +func (s *DB) Assign(attrs ...interface{}) *DB { + return s.clone().search.Assign(attrs...).db +} + +// First find first record that match given conditions, order by primary key +func (s *DB) First(out interface{}, where ...interface{}) *DB { + newScope := s.NewScope(out) + newScope.Search.Limit(1) + return newScope.Set("gorm:order_by_primary_key", "ASC"). + inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db +} + +// Take return a record that match given conditions, the order will depend on the database implementation +func (s *DB) Take(out interface{}, where ...interface{}) *DB { + newScope := s.NewScope(out) + newScope.Search.Limit(1) + return newScope.inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db +} + +// Last find last record that match given conditions, order by primary key +func (s *DB) Last(out interface{}, where ...interface{}) *DB { + newScope := s.NewScope(out) + newScope.Search.Limit(1) + return newScope.Set("gorm:order_by_primary_key", "DESC"). + inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db +} + +// Find find records that match given conditions +func (s *DB) Find(out interface{}, where ...interface{}) *DB { + return s.NewScope(out).inlineCondition(where...).callCallbacks(s.parent.callbacks.queries).db +} + +// Scan scan value to a struct +func (s *DB) Scan(dest interface{}) *DB { + return s.NewScope(s.Value).Set("gorm:query_destination", dest).callCallbacks(s.parent.callbacks.queries).db +} + +// Row return `*sql.Row` with given conditions +func (s *DB) Row() *sql.Row { + return s.NewScope(s.Value).row() +} + +// Rows return `*sql.Rows` with given conditions +func (s *DB) Rows() (*sql.Rows, error) { + return s.NewScope(s.Value).rows() +} + +// ScanRows scan `*sql.Rows` to give struct +func (s *DB) ScanRows(rows *sql.Rows, result interface{}) error { + var ( + scope = s.NewScope(result) + clone = scope.db + columns, err = rows.Columns() + ) + + if clone.AddError(err) == nil { + scope.scan(rows, columns, scope.Fields()) + } + + return clone.Error +} + +// Pluck used to query single column from a model as a map +// var ages []int64 +// db.Find(&users).Pluck("age", &ages) +func (s *DB) Pluck(column string, value interface{}) *DB { + return s.NewScope(s.Value).pluck(column, value).db +} + +// Count get how many records for a model +func (s *DB) Count(value interface{}) *DB { + return s.NewScope(s.Value).count(value).db +} + +// Related get related associations +func (s *DB) Related(value interface{}, foreignKeys ...string) *DB { + return s.NewScope(s.Value).related(value, foreignKeys...).db +} + +// FirstOrInit find first matched record or initialize a new one with given conditions (only works with struct, map conditions) +// https://jinzhu.github.io/gorm/crud.html#firstorinit +func (s *DB) FirstOrInit(out interface{}, where ...interface{}) *DB { + c := s.clone() + if result := c.First(out, where...); result.Error != nil { + if !result.RecordNotFound() { + return result + } + c.NewScope(out).inlineCondition(where...).initialize() + } else { + c.NewScope(out).updatedAttrsWithValues(c.search.assignAttrs) + } + return c +} + +// FirstOrCreate find first matched record or create a new one with given conditions (only works with struct, map conditions) +// https://jinzhu.github.io/gorm/crud.html#firstorcreate +func (s *DB) FirstOrCreate(out interface{}, where ...interface{}) *DB { + c := s.clone() + if result := s.First(out, where...); result.Error != nil { + if !result.RecordNotFound() { + return result + } + return c.NewScope(out).inlineCondition(where...).initialize().callCallbacks(c.parent.callbacks.creates).db + } else if len(c.search.assignAttrs) > 0 { + return c.NewScope(out).InstanceSet("gorm:update_interface", c.search.assignAttrs).callCallbacks(c.parent.callbacks.updates).db + } + return c +} + +// Update update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update +func (s *DB) Update(attrs ...interface{}) *DB { + return s.Updates(toSearchableMap(attrs...), true) +} + +// Updates update attributes with callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update +func (s *DB) Updates(values interface{}, ignoreProtectedAttrs ...bool) *DB { + return s.NewScope(s.Value). + Set("gorm:ignore_protected_attrs", len(ignoreProtectedAttrs) > 0). + InstanceSet("gorm:update_interface", values). + callCallbacks(s.parent.callbacks.updates).db +} + +// UpdateColumn update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update +func (s *DB) UpdateColumn(attrs ...interface{}) *DB { + return s.UpdateColumns(toSearchableMap(attrs...)) +} + +// UpdateColumns update attributes without callbacks, refer: https://jinzhu.github.io/gorm/crud.html#update +func (s *DB) UpdateColumns(values interface{}) *DB { + return s.NewScope(s.Value). + Set("gorm:update_column", true). + Set("gorm:save_associations", false). + InstanceSet("gorm:update_interface", values). + callCallbacks(s.parent.callbacks.updates).db +} + +// Save update value in database, if the value doesn't have primary key, will insert it +func (s *DB) Save(value interface{}) *DB { + scope := s.NewScope(value) + if !scope.PrimaryKeyZero() { + newDB := scope.callCallbacks(s.parent.callbacks.updates).db + if newDB.Error == nil && newDB.RowsAffected == 0 { + return s.New().FirstOrCreate(value) + } + return newDB + } + return scope.callCallbacks(s.parent.callbacks.creates).db +} + +// Create insert the value into database +func (s *DB) Create(value interface{}) *DB { + scope := s.NewScope(value) + return scope.callCallbacks(s.parent.callbacks.creates).db +} + +// Delete delete value match given conditions, if the value has primary key, then will including the primary key as condition +func (s *DB) Delete(value interface{}, where ...interface{}) *DB { + return s.NewScope(value).inlineCondition(where...).callCallbacks(s.parent.callbacks.deletes).db +} + +// Raw use raw sql as conditions, won't run it unless invoked by other methods +// db.Raw("SELECT name, age FROM users WHERE name = ?", 3).Scan(&result) +func (s *DB) Raw(sql string, values ...interface{}) *DB { + return s.clone().search.Raw(true).Where(sql, values...).db +} + +// Exec execute raw sql +func (s *DB) Exec(sql string, values ...interface{}) *DB { + scope := s.NewScope(nil) + generatedSQL := scope.buildCondition(map[string]interface{}{"query": sql, "args": values}, true) + generatedSQL = strings.TrimSuffix(strings.TrimPrefix(generatedSQL, "("), ")") + scope.Raw(generatedSQL) + return scope.Exec().db +} + +// Model specify the model you would like to run db operations +// // update all users's name to `hello` +// db.Model(&User{}).Update("name", "hello") +// // if user's primary key is non-blank, will use it as condition, then will only update the user's name to `hello` +// db.Model(&user).Update("name", "hello") +func (s *DB) Model(value interface{}) *DB { + c := s.clone() + c.Value = value + return c +} + +// Table specify the table you would like to run db operations +func (s *DB) Table(name string) *DB { + clone := s.clone() + clone.search.Table(name) + clone.Value = nil + return clone +} + +// Debug start debug mode +func (s *DB) Debug() *DB { + return s.clone().LogMode(true) +} + +// Begin begin a transaction +func (s *DB) Begin() *DB { + c := s.clone() + if db, ok := c.db.(sqlDb); ok && db != nil { + tx, err := db.Begin() + c.db = interface{}(tx).(SQLCommon) + c.AddError(err) + } else { + c.AddError(ErrCantStartTransaction) + } + return c +} + +// Commit commit a transaction +func (s *DB) Commit() *DB { + if db, ok := s.db.(sqlTx); ok && db != nil { + s.AddError(db.Commit()) + } else { + s.AddError(ErrInvalidTransaction) + } + return s +} + +// Rollback rollback a transaction +func (s *DB) Rollback() *DB { + if db, ok := s.db.(sqlTx); ok && db != nil { + s.AddError(db.Rollback()) + } else { + s.AddError(ErrInvalidTransaction) + } + return s +} + +// NewRecord check if value's primary key is blank +func (s *DB) NewRecord(value interface{}) bool { + return s.NewScope(value).PrimaryKeyZero() +} + +// RecordNotFound check if returning ErrRecordNotFound error +func (s *DB) RecordNotFound() bool { + for _, err := range s.GetErrors() { + if err == ErrRecordNotFound { + return true + } + } + return false +} + +// CreateTable create table for models +func (s *DB) CreateTable(models ...interface{}) *DB { + db := s.Unscoped() + for _, model := range models { + db = db.NewScope(model).createTable().db + } + return db +} + +// DropTable drop table for models +func (s *DB) DropTable(values ...interface{}) *DB { + db := s.clone() + for _, value := range values { + if tableName, ok := value.(string); ok { + db = db.Table(tableName) + } + + db = db.NewScope(value).dropTable().db + } + return db +} + +// DropTableIfExists drop table if it is exist +func (s *DB) DropTableIfExists(values ...interface{}) *DB { + db := s.clone() + for _, value := range values { + if s.HasTable(value) { + db.AddError(s.DropTable(value).Error) + } + } + return db +} + +// HasTable check has table or not +func (s *DB) HasTable(value interface{}) bool { + var ( + scope = s.NewScope(value) + tableName string + ) + + if name, ok := value.(string); ok { + tableName = name + } else { + tableName = scope.TableName() + } + + has := scope.Dialect().HasTable(tableName) + s.AddError(scope.db.Error) + return has +} + +// AutoMigrate run auto migration for given models, will only add missing fields, won't delete/change current data +func (s *DB) AutoMigrate(values ...interface{}) *DB { + db := s.Unscoped() + for _, value := range values { + db = db.NewScope(value).autoMigrate().db + } + return db +} + +// ModifyColumn modify column to type +func (s *DB) ModifyColumn(column string, typ string) *DB { + scope := s.NewScope(s.Value) + scope.modifyColumn(column, typ) + return scope.db +} + +// DropColumn drop a column +func (s *DB) DropColumn(column string) *DB { + scope := s.NewScope(s.Value) + scope.dropColumn(column) + return scope.db +} + +// AddIndex add index for columns with given name +func (s *DB) AddIndex(indexName string, columns ...string) *DB { + scope := s.Unscoped().NewScope(s.Value) + scope.addIndex(false, indexName, columns...) + return scope.db +} + +// AddUniqueIndex add unique index for columns with given name +func (s *DB) AddUniqueIndex(indexName string, columns ...string) *DB { + scope := s.Unscoped().NewScope(s.Value) + scope.addIndex(true, indexName, columns...) + return scope.db +} + +// RemoveIndex remove index with name +func (s *DB) RemoveIndex(indexName string) *DB { + scope := s.NewScope(s.Value) + scope.removeIndex(indexName) + return scope.db +} + +// AddForeignKey Add foreign key to the given scope, e.g: +// db.Model(&User{}).AddForeignKey("city_id", "cities(id)", "RESTRICT", "RESTRICT") +func (s *DB) AddForeignKey(field string, dest string, onDelete string, onUpdate string) *DB { + scope := s.NewScope(s.Value) + scope.addForeignKey(field, dest, onDelete, onUpdate) + return scope.db +} + +// RemoveForeignKey Remove foreign key from the given scope, e.g: +// db.Model(&User{}).RemoveForeignKey("city_id", "cities(id)") +func (s *DB) RemoveForeignKey(field string, dest string) *DB { + scope := s.clone().NewScope(s.Value) + scope.removeForeignKey(field, dest) + return scope.db +} + +// Association start `Association Mode` to handler relations things easir in that mode, refer: https://jinzhu.github.io/gorm/associations.html#association-mode +func (s *DB) Association(column string) *Association { + var err error + var scope = s.Set("gorm:association:source", s.Value).NewScope(s.Value) + + if primaryField := scope.PrimaryField(); primaryField.IsBlank { + err = errors.New("primary key can't be nil") + } else { + if field, ok := scope.FieldByName(column); ok { + if field.Relationship == nil || len(field.Relationship.ForeignFieldNames) == 0 { + err = fmt.Errorf("invalid association %v for %v", column, scope.IndirectValue().Type()) + } else { + return &Association{scope: scope, column: column, field: field} + } + } else { + err = fmt.Errorf("%v doesn't have column %v", scope.IndirectValue().Type(), column) + } + } + + return &Association{Error: err} +} + +// Preload preload associations with given conditions +// db.Preload("Orders", "state NOT IN (?)", "cancelled").Find(&users) +func (s *DB) Preload(column string, conditions ...interface{}) *DB { + return s.clone().search.Preload(column, conditions...).db +} + +// Set set setting by name, which could be used in callbacks, will clone a new db, and update its setting +func (s *DB) Set(name string, value interface{}) *DB { + return s.clone().InstantSet(name, value) +} + +// InstantSet instant set setting, will affect current db +func (s *DB) InstantSet(name string, value interface{}) *DB { + s.values[name] = value + return s +} + +// Get get setting by name +func (s *DB) Get(name string) (value interface{}, ok bool) { + value, ok = s.values[name] + return +} + +// SetJoinTableHandler set a model's join table handler for a relation +func (s *DB) SetJoinTableHandler(source interface{}, column string, handler JoinTableHandlerInterface) { + scope := s.NewScope(source) + for _, field := range scope.GetModelStruct().StructFields { + if field.Name == column || field.DBName == column { + if many2many := field.TagSettings["MANY2MANY"]; many2many != "" { + source := (&Scope{Value: source}).GetModelStruct().ModelType + destination := (&Scope{Value: reflect.New(field.Struct.Type).Interface()}).GetModelStruct().ModelType + handler.Setup(field.Relationship, many2many, source, destination) + field.Relationship.JoinTableHandler = handler + if table := handler.Table(s); scope.Dialect().HasTable(table) { + s.Table(table).AutoMigrate(handler) + } + } + } + } +} + +// AddError add error to the db +func (s *DB) AddError(err error) error { + if err != nil { + if err != ErrRecordNotFound { + if s.logMode == 0 { + go s.print(fileWithLineNum(), err) + } else { + s.log(err) + } + + errors := Errors(s.GetErrors()) + errors = errors.Add(err) + if len(errors) > 1 { + err = errors + } + } + + s.Error = err + } + return err +} + +// GetErrors get happened errors from the db +func (s *DB) GetErrors() []error { + if errs, ok := s.Error.(Errors); ok { + return errs + } else if s.Error != nil { + return []error{s.Error} + } + return []error{} +} + +//////////////////////////////////////////////////////////////////////////////// +// Private Methods For DB +//////////////////////////////////////////////////////////////////////////////// + +func (s *DB) clone() *DB { + db := &DB{ + db: s.db, + parent: s.parent, + logger: s.logger, + logMode: s.logMode, + values: map[string]interface{}{}, + Value: s.Value, + Error: s.Error, + blockGlobalUpdate: s.blockGlobalUpdate, + } + + for key, value := range s.values { + db.values[key] = value + } + + if s.search == nil { + db.search = &search{limit: -1, offset: -1} + } else { + db.search = s.search.clone() + } + + db.search.db = db + return db +} + +func (s *DB) print(v ...interface{}) { + s.logger.Print(v...) +} + +func (s *DB) log(v ...interface{}) { + if s != nil && s.logMode == 2 { + s.print(append([]interface{}{"log", fileWithLineNum()}, v...)...) + } +} + +func (s *DB) slog(sql string, t time.Time, vars ...interface{}) { + if s.logMode == 2 { + s.print("sql", fileWithLineNum(), NowFunc().Sub(t), sql, vars, s.RowsAffected) + } +} diff --git a/vendor/github.com/jinzhu/gorm/main_test.go b/vendor/github.com/jinzhu/gorm/main_test.go new file mode 100644 index 0000000..66c46af --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/main_test.go @@ -0,0 +1,949 @@ +package gorm_test + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "os" + "path/filepath" + "reflect" + "strconv" + "testing" + "time" + + "github.com/erikstmartin/go-testdb" + "github.com/jinzhu/gorm" + _ "github.com/jinzhu/gorm/dialects/mssql" + _ "github.com/jinzhu/gorm/dialects/mysql" + "github.com/jinzhu/gorm/dialects/postgres" + _ "github.com/jinzhu/gorm/dialects/sqlite" + "github.com/jinzhu/now" +) + +var ( + DB *gorm.DB + t1, t2, t3, t4, t5 time.Time +) + +func init() { + var err error + + if DB, err = OpenTestConnection(); err != nil { + panic(fmt.Sprintf("No error should happen when connecting to test database, but got err=%+v", err)) + } + + runMigration() +} + +func OpenTestConnection() (db *gorm.DB, err error) { + dbDSN := os.Getenv("GORM_DSN") + switch os.Getenv("GORM_DIALECT") { + case "mysql": + fmt.Println("testing mysql...") + if dbDSN == "" { + dbDSN = "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True" + } + db, err = gorm.Open("mysql", dbDSN) + case "postgres": + fmt.Println("testing postgres...") + if dbDSN == "" { + dbDSN = "user=gorm password=gorm DB.name=gorm port=9920 sslmode=disable" + } + db, err = gorm.Open("postgres", dbDSN) + case "mssql": + // CREATE LOGIN gorm WITH PASSWORD = 'LoremIpsum86'; + // CREATE DATABASE gorm; + // USE gorm; + // CREATE USER gorm FROM LOGIN gorm; + // sp_changedbowner 'gorm'; + fmt.Println("testing mssql...") + if dbDSN == "" { + dbDSN = "sqlserver://gorm:LoremIpsum86@localhost:9930?database=gorm" + } + db, err = gorm.Open("mssql", dbDSN) + default: + fmt.Println("testing sqlite3...") + db, err = gorm.Open("sqlite3", filepath.Join(os.TempDir(), "gorm.db")) + } + + // db.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)}) + // db.SetLogger(log.New(os.Stdout, "\r\n", 0)) + if debug := os.Getenv("DEBUG"); debug == "true" { + db.LogMode(true) + } else if debug == "false" { + db.LogMode(false) + } + + db.DB().SetMaxIdleConns(10) + + return +} + +func TestStringPrimaryKey(t *testing.T) { + type UUIDStruct struct { + ID string `gorm:"primary_key"` + Name string + } + DB.DropTable(&UUIDStruct{}) + DB.AutoMigrate(&UUIDStruct{}) + + data := UUIDStruct{ID: "uuid", Name: "hello"} + if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello" { + t.Errorf("string primary key should not be populated") + } + + data = UUIDStruct{ID: "uuid", Name: "hello world"} + if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello world" { + t.Errorf("string primary key should not be populated") + } +} + +func TestExceptionsWithInvalidSql(t *testing.T) { + var columns []string + if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + var count1, count2 int64 + DB.Model(&User{}).Count(&count1) + if count1 <= 0 { + t.Errorf("Should find some users") + } + + if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error == nil { + t.Errorf("Should got error with invalid SQL") + } + + DB.Model(&User{}).Count(&count2) + if count1 != count2 { + t.Errorf("No user should not be deleted by invalid SQL") + } +} + +func TestSetTable(t *testing.T) { + DB.Create(getPreparedUser("pluck_user1", "pluck_user")) + DB.Create(getPreparedUser("pluck_user2", "pluck_user")) + DB.Create(getPreparedUser("pluck_user3", "pluck_user")) + + if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error; err != nil { + t.Error("No errors should happen if set table for pluck", err) + } + + var users []User + if DB.Table("users").Find(&[]User{}).Error != nil { + t.Errorf("No errors should happen if set table for find") + } + + if DB.Table("invalid_table").Find(&users).Error == nil { + t.Errorf("Should got error when table is set to an invalid table") + } + + DB.Exec("drop table deleted_users;") + if DB.Table("deleted_users").CreateTable(&User{}).Error != nil { + t.Errorf("Create table with specified table") + } + + DB.Table("deleted_users").Save(&User{Name: "DeletedUser"}) + + var deletedUsers []User + DB.Table("deleted_users").Find(&deletedUsers) + if len(deletedUsers) != 1 { + t.Errorf("Query from specified table") + } + + DB.Save(getPreparedUser("normal_user", "reset_table")) + DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table")) + var user1, user2, user3 User + DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3) + if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") { + t.Errorf("unset specified table with blank string") + } +} + +type Order struct { +} + +type Cart struct { +} + +func (c Cart) TableName() string { + return "shopping_cart" +} + +func TestHasTable(t *testing.T) { + type Foo struct { + Id int + Stuff string + } + DB.DropTable(&Foo{}) + + // Table should not exist at this point, HasTable should return false + if ok := DB.HasTable("foos"); ok { + t.Errorf("Table should not exist, but does") + } + if ok := DB.HasTable(&Foo{}); ok { + t.Errorf("Table should not exist, but does") + } + + // We create the table + if err := DB.CreateTable(&Foo{}).Error; err != nil { + t.Errorf("Table should be created") + } + + // And now it should exits, and HasTable should return true + if ok := DB.HasTable("foos"); !ok { + t.Errorf("Table should exist, but HasTable informs it does not") + } + if ok := DB.HasTable(&Foo{}); !ok { + t.Errorf("Table should exist, but HasTable informs it does not") + } +} + +func TestTableName(t *testing.T) { + DB := DB.Model("") + if DB.NewScope(Order{}).TableName() != "orders" { + t.Errorf("Order's table name should be orders") + } + + if DB.NewScope(&Order{}).TableName() != "orders" { + t.Errorf("&Order's table name should be orders") + } + + if DB.NewScope([]Order{}).TableName() != "orders" { + t.Errorf("[]Order's table name should be orders") + } + + if DB.NewScope(&[]Order{}).TableName() != "orders" { + t.Errorf("&[]Order's table name should be orders") + } + + DB.SingularTable(true) + if DB.NewScope(Order{}).TableName() != "order" { + t.Errorf("Order's singular table name should be order") + } + + if DB.NewScope(&Order{}).TableName() != "order" { + t.Errorf("&Order's singular table name should be order") + } + + if DB.NewScope([]Order{}).TableName() != "order" { + t.Errorf("[]Order's singular table name should be order") + } + + if DB.NewScope(&[]Order{}).TableName() != "order" { + t.Errorf("&[]Order's singular table name should be order") + } + + if DB.NewScope(&Cart{}).TableName() != "shopping_cart" { + t.Errorf("&Cart's singular table name should be shopping_cart") + } + + if DB.NewScope(Cart{}).TableName() != "shopping_cart" { + t.Errorf("Cart's singular table name should be shopping_cart") + } + + if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" { + t.Errorf("&[]Cart's singular table name should be shopping_cart") + } + + if DB.NewScope([]Cart{}).TableName() != "shopping_cart" { + t.Errorf("[]Cart's singular table name should be shopping_cart") + } + DB.SingularTable(false) +} + +func TestNullValues(t *testing.T) { + DB.DropTable(&NullValue{}) + DB.AutoMigrate(&NullValue{}) + + if err := DB.Save(&NullValue{ + Name: sql.NullString{String: "hello", Valid: true}, + Gender: &sql.NullString{String: "M", Valid: true}, + Age: sql.NullInt64{Int64: 18, Valid: true}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: true}, + }).Error; err != nil { + t.Errorf("Not error should raise when test null value") + } + + var nv NullValue + DB.First(&nv, "name = ?", "hello") + + if nv.Name.String != "hello" || nv.Gender.String != "M" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true { + t.Errorf("Should be able to fetch null value") + } + + if err := DB.Save(&NullValue{ + Name: sql.NullString{String: "hello-2", Valid: true}, + Gender: &sql.NullString{String: "F", Valid: true}, + Age: sql.NullInt64{Int64: 18, Valid: false}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: false}, + }).Error; err != nil { + t.Errorf("Not error should raise when test null value") + } + + var nv2 NullValue + DB.First(&nv2, "name = ?", "hello-2") + if nv2.Name.String != "hello-2" || nv2.Gender.String != "F" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false { + t.Errorf("Should be able to fetch null value") + } + + if err := DB.Save(&NullValue{ + Name: sql.NullString{String: "hello-3", Valid: false}, + Gender: &sql.NullString{String: "M", Valid: true}, + Age: sql.NullInt64{Int64: 18, Valid: false}, + Male: sql.NullBool{Bool: true, Valid: true}, + Height: sql.NullFloat64{Float64: 100.11, Valid: true}, + AddedAt: NullTime{Time: time.Now(), Valid: false}, + }).Error; err == nil { + t.Errorf("Can't save because of name can't be null") + } +} + +func TestNullValuesWithFirstOrCreate(t *testing.T) { + var nv1 = NullValue{ + Name: sql.NullString{String: "first_or_create", Valid: true}, + Gender: &sql.NullString{String: "M", Valid: true}, + } + + var nv2 NullValue + result := DB.Where(nv1).FirstOrCreate(&nv2) + + if result.RowsAffected != 1 { + t.Errorf("RowsAffected should be 1 after create some record") + } + + if result.Error != nil { + t.Errorf("Should not raise any error, but got %v", result.Error) + } + + if nv2.Name.String != "first_or_create" || nv2.Gender.String != "M" { + t.Errorf("first or create with nullvalues") + } + + if err := DB.Where(nv1).Assign(NullValue{Age: sql.NullInt64{Int64: 18, Valid: true}}).FirstOrCreate(&nv2).Error; err != nil { + t.Errorf("Should not raise any error, but got %v", err) + } + + if nv2.Age.Int64 != 18 { + t.Errorf("should update age to 18") + } +} + +func TestTransaction(t *testing.T) { + tx := DB.Begin() + u := User{Name: "transcation"} + if err := tx.Save(&u).Error; err != nil { + t.Errorf("No error should raise") + } + + if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil { + t.Errorf("Should find saved record") + } + + if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil { + t.Errorf("Should return the underlying sql.Tx") + } + + tx.Rollback() + + if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil { + t.Errorf("Should not find record after rollback") + } + + tx2 := DB.Begin() + u2 := User{Name: "transcation-2"} + if err := tx2.Save(&u2).Error; err != nil { + t.Errorf("No error should raise") + } + + if err := tx2.First(&User{}, "name = ?", "transcation-2").Error; err != nil { + t.Errorf("Should find saved record") + } + + tx2.Commit() + + if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil { + t.Errorf("Should be able to find committed record") + } +} + +func TestRow(t *testing.T) { + user1 := User{Name: "RowUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "RowUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "RowUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row() + var age int64 + row.Scan(&age) + if age != 10 { + t.Errorf("Scan with Row") + } +} + +func TestRows(t *testing.T) { + user1 := User{Name: "RowsUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "RowsUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "RowsUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows() + if err != nil { + t.Errorf("Not error should happen, got %v", err) + } + + count := 0 + for rows.Next() { + var name string + var age int64 + rows.Scan(&name, &age) + count++ + } + + if count != 2 { + t.Errorf("Should found two records") + } +} + +func TestScanRows(t *testing.T) { + user1 := User{Name: "ScanRowsUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "ScanRowsUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "ScanRowsUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows() + if err != nil { + t.Errorf("Not error should happen, got %v", err) + } + + type Result struct { + Name string + Age int + } + + var results []Result + for rows.Next() { + var result Result + if err := DB.ScanRows(rows, &result); err != nil { + t.Errorf("should get no error, but got %v", err) + } + results = append(results, result) + } + + if !reflect.DeepEqual(results, []Result{{Name: "ScanRowsUser2", Age: 10}, {Name: "ScanRowsUser3", Age: 20}}) { + t.Errorf("Should find expected results") + } +} + +func TestScan(t *testing.T) { + user1 := User{Name: "ScanUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "ScanUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "ScanUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + type result struct { + Name string + Age int + } + + var res result + DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res) + if res.Name != user3.Name { + t.Errorf("Scan into struct should work") + } + + var doubleAgeRes = &result{} + if err := DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes).Error; err != nil { + t.Errorf("Scan to pointer of pointer") + } + if doubleAgeRes.Age != res.Age*2 { + t.Errorf("Scan double age as age") + } + + var ress []result + DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress) + if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name { + t.Errorf("Scan into struct map") + } +} + +func TestRaw(t *testing.T) { + user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + type result struct { + Name string + Email string + } + + var ress []result + DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress) + if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name { + t.Errorf("Raw with scan") + } + + rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows() + count := 0 + for rows.Next() { + count++ + } + if count != 1 { + t.Errorf("Raw with Rows should find one record with name 3") + } + + DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name}) + if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error != gorm.ErrRecordNotFound { + t.Error("Raw sql to update records") + } +} + +func TestGroup(t *testing.T) { + rows, err := DB.Select("name").Table("users").Group("name").Rows() + + if err == nil { + defer rows.Close() + for rows.Next() { + var name string + rows.Scan(&name) + } + } else { + t.Errorf("Should not raise any error") + } +} + +func TestJoins(t *testing.T) { + var user = User{ + Name: "joins", + CreditCard: CreditCard{Number: "411111111111"}, + Emails: []Email{{Email: "join1@example.com"}, {Email: "join2@example.com"}}, + } + DB.Save(&user) + + var users1 []User + DB.Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").Find(&users1) + if len(users1) != 2 { + t.Errorf("should find two users using left join") + } + + var users2 []User + DB.Joins("left join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Where("name = ?", "joins").First(&users2) + if len(users2) != 1 { + t.Errorf("should find one users using left join with conditions") + } + + var users3 []User + DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where("name = ?", "joins").First(&users3) + if len(users3) != 1 { + t.Errorf("should find one users using multiple left join conditions") + } + + var users4 []User + DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "422222222222").Where("name = ?", "joins").First(&users4) + if len(users4) != 0 { + t.Errorf("should find no user when searching with unexisting credit card") + } + + var users5 []User + db5 := DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "join1@example.com").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where(User{Id: 1}).Where(Email{Id: 1}).Not(Email{Id: 10}).First(&users5) + if db5.Error != nil { + t.Errorf("Should not raise error for join where identical fields in different tables. Error: %s", db5.Error.Error()) + } +} + +func TestJoinsWithSelect(t *testing.T) { + type result struct { + Name string + Email string + } + + user := User{ + Name: "joins_with_select", + Emails: []Email{{Email: "join1@example.com"}, {Email: "join2@example.com"}}, + } + DB.Save(&user) + + var results []result + DB.Table("users").Select("name, emails.email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins_with_select").Scan(&results) + if len(results) != 2 || results[0].Email != "join1@example.com" || results[1].Email != "join2@example.com" { + t.Errorf("Should find all two emails with Join select") + } +} + +func TestHaving(t *testing.T) { + rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows() + + if err == nil { + defer rows.Close() + for rows.Next() { + var name string + var total int64 + rows.Scan(&name, &total) + + if name == "2" && total != 1 { + t.Errorf("Should have one user having name 2") + } + if name == "3" && total != 2 { + t.Errorf("Should have two users having name 3") + } + } + } else { + t.Errorf("Should not raise any error") + } +} + +func TestQueryBuilderSubselectInWhere(t *testing.T) { + user := User{Name: "query_expr_select_ruser1", Email: "root@user1.com", Age: 32} + DB.Save(&user) + user = User{Name: "query_expr_select_ruser2", Email: "nobody@user2.com", Age: 16} + DB.Save(&user) + user = User{Name: "query_expr_select_ruser3", Email: "root@user3.com", Age: 64} + DB.Save(&user) + user = User{Name: "query_expr_select_ruser4", Email: "somebody@user3.com", Age: 128} + DB.Save(&user) + + var users []User + DB.Select("*").Where("name IN (?)", DB. + Select("name").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users) + + if len(users) != 4 { + t.Errorf("Four users should be found, instead found %d", len(users)) + } + + DB.Select("*").Where("name LIKE ?", "query_expr_select%").Where("age >= (?)", DB. + Select("AVG(age)").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users) + + if len(users) != 2 { + t.Errorf("Two users should be found, instead found %d", len(users)) + } +} + +func TestQueryBuilderRawQueryWithSubquery(t *testing.T) { + user := User{Name: "subquery_test_user1", Age: 10} + DB.Save(&user) + user = User{Name: "subquery_test_user2", Age: 11} + DB.Save(&user) + user = User{Name: "subquery_test_user3", Age: 12} + DB.Save(&user) + + var count int + err := DB.Raw("select count(*) from (?) tmp", + DB.Table("users"). + Select("name"). + Where("age >= ? and name in (?)", 10, []string{"subquery_test_user1", "subquery_test_user2"}). + Group("name"). + QueryExpr(), + ).Count(&count).Error + + if err != nil { + t.Errorf("Expected to get no errors, but got %v", err) + } + if count != 2 { + t.Errorf("Row count must be 2, instead got %d", count) + } + + err = DB.Raw("select count(*) from (?) tmp", + DB.Table("users"). + Select("name"). + Where("name LIKE ?", "subquery_test%"). + Not("age <= ?", 10).Not("name in (?)", []string{"subquery_test_user1", "subquery_test_user2"}). + Group("name"). + QueryExpr(), + ).Count(&count).Error + + if err != nil { + t.Errorf("Expected to get no errors, but got %v", err) + } + if count != 1 { + t.Errorf("Row count must be 1, instead got %d", count) + } +} + +func TestQueryBuilderSubselectInHaving(t *testing.T) { + user := User{Name: "query_expr_having_ruser1", Email: "root@user1.com", Age: 64} + DB.Save(&user) + user = User{Name: "query_expr_having_ruser2", Email: "root@user2.com", Age: 128} + DB.Save(&user) + user = User{Name: "query_expr_having_ruser3", Email: "root@user1.com", Age: 64} + DB.Save(&user) + user = User{Name: "query_expr_having_ruser4", Email: "root@user2.com", Age: 128} + DB.Save(&user) + + var users []User + DB.Select("AVG(age) as avgage").Where("name LIKE ?", "query_expr_having_%").Group("email").Having("AVG(age) > (?)", DB. + Select("AVG(age)").Where("name LIKE ?", "query_expr_having_%").Table("users").QueryExpr()).Find(&users) + + if len(users) != 1 { + t.Errorf("Two user group should be found, instead found %d", len(users)) + } +} + +func DialectHasTzSupport() bool { + // NB: mssql and FoundationDB do not support time zones. + if dialect := os.Getenv("GORM_DIALECT"); dialect == "foundation" { + return false + } + return true +} + +func TestTimeWithZone(t *testing.T) { + var format = "2006-01-02 15:04:05 -0700" + var times []time.Time + GMT8, _ := time.LoadLocation("Asia/Shanghai") + times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8)) + times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC)) + + for index, vtime := range times { + name := "time_with_zone_" + strconv.Itoa(index) + user := User{Name: name, Birthday: &vtime} + + if !DialectHasTzSupport() { + // If our driver dialect doesn't support TZ's, just use UTC for everything here. + utcBirthday := user.Birthday.UTC() + user.Birthday = &utcBirthday + } + + DB.Save(&user) + expectedBirthday := "2013-02-18 17:51:49 +0000" + foundBirthday := user.Birthday.UTC().Format(format) + if foundBirthday != expectedBirthday { + t.Errorf("User's birthday should not be changed after save for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday) + } + + var findUser, findUser2, findUser3 User + DB.First(&findUser, "name = ?", name) + foundBirthday = findUser.Birthday.UTC().Format(format) + if foundBirthday != expectedBirthday { + t.Errorf("User's birthday should not be changed after find for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday) + } + + if DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(-time.Minute)).First(&findUser2).RecordNotFound() { + t.Errorf("User should be found") + } + + if !DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(time.Minute)).First(&findUser3).RecordNotFound() { + t.Errorf("User should not be found") + } + } +} + +func TestHstore(t *testing.T) { + type Details struct { + Id int64 + Bulk postgres.Hstore + } + + if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" { + t.Skip() + } + + if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error; err != nil { + fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER gorm WITH SUPERUSER;)\033[0m") + panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err)) + } + + DB.Exec("drop table details") + + if err := DB.CreateTable(&Details{}).Error; err != nil { + panic(fmt.Sprintf("No error should happen when create table, but got %+v", err)) + } + + bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait" + bulk := map[string]*string{ + "bankAccountId": &bankAccountId, + "phoneNumber": &phoneNumber, + "opinion": &opinion, + } + d := Details{Bulk: bulk} + DB.Save(&d) + + var d2 Details + if err := DB.First(&d2).Error; err != nil { + t.Errorf("Got error when tried to fetch details: %+v", err) + } + + for k := range bulk { + if r, ok := d2.Bulk[k]; ok { + if res, _ := bulk[k]; *res != *r { + t.Errorf("Details should be equal") + } + } else { + t.Errorf("Details should be existed") + } + } +} + +func TestSetAndGet(t *testing.T) { + if value, ok := DB.Set("hello", "world").Get("hello"); !ok { + t.Errorf("Should be able to get setting after set") + } else { + if value.(string) != "world" { + t.Errorf("Setted value should not be changed") + } + } + + if _, ok := DB.Get("non_existing"); ok { + t.Errorf("Get non existing key should return error") + } +} + +func TestCompatibilityMode(t *testing.T) { + DB, _ := gorm.Open("testdb", "") + testdb.SetQueryFunc(func(query string) (driver.Rows, error) { + columns := []string{"id", "name", "age"} + result := ` + 1,Tim,20 + 2,Joe,25 + 3,Bob,30 + ` + return testdb.RowsFromCSVString(columns, result), nil + }) + + var users []User + DB.Find(&users) + if (users[0].Name != "Tim") || len(users) != 3 { + t.Errorf("Unexcepted result returned") + } +} + +func TestOpenExistingDB(t *testing.T) { + DB.Save(&User{Name: "jnfeinstein"}) + dialect := os.Getenv("GORM_DIALECT") + + db, err := gorm.Open(dialect, DB.DB()) + if err != nil { + t.Errorf("Should have wrapped the existing DB connection") + } + + var user User + if db.Where("name = ?", "jnfeinstein").First(&user).Error == gorm.ErrRecordNotFound { + t.Errorf("Should have found existing record") + } +} + +func TestDdlErrors(t *testing.T) { + var err error + + if err = DB.Close(); err != nil { + t.Errorf("Closing DDL test db connection err=%s", err) + } + defer func() { + // Reopen DB connection. + if DB, err = OpenTestConnection(); err != nil { + t.Fatalf("Failed re-opening db connection: %s", err) + } + }() + + if err := DB.Find(&User{}).Error; err == nil { + t.Errorf("Expected operation on closed db to produce an error, but err was nil") + } +} + +func TestOpenWithOneParameter(t *testing.T) { + db, err := gorm.Open("dialect") + if db != nil { + t.Error("Open with one parameter returned non nil for db") + } + if err == nil { + t.Error("Open with one parameter returned err as nil") + } +} + +func TestBlockGlobalUpdate(t *testing.T) { + db := DB.New() + db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"}) + + err := db.Model(&Toy{}).Update("OwnerType", "Human").Error + if err != nil { + t.Error("Unexpected error on global update") + } + + err = db.Delete(&Toy{}).Error + if err != nil { + t.Error("Unexpected error on global delete") + } + + db.BlockGlobalUpdate(true) + + db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"}) + + err = db.Model(&Toy{}).Update("OwnerType", "Human").Error + if err == nil { + t.Error("Expected error on global update") + } + + err = db.Model(&Toy{}).Where(&Toy{OwnerType: "Martian"}).Update("OwnerType", "Astronaut").Error + if err != nil { + t.Error("Unxpected error on conditional update") + } + + err = db.Delete(&Toy{}).Error + if err == nil { + t.Error("Expected error on global delete") + } + err = db.Where(&Toy{OwnerType: "Martian"}).Delete(&Toy{}).Error + if err != nil { + t.Error("Unexpected error on conditional delete") + } +} + +func BenchmarkGorm(b *testing.B) { + b.N = 2000 + for x := 0; x < b.N; x++ { + e := strconv.Itoa(x) + "benchmark@example.org" + now := time.Now() + email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now} + // Insert + DB.Save(&email) + // Query + DB.First(&EmailWithIdx{}, "email = ?", e) + // Update + DB.Model(&email).UpdateColumn("email", "new-"+e) + // Delete + DB.Delete(&email) + } +} + +func BenchmarkRawSql(b *testing.B) { + DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable") + DB.SetMaxIdleConns(10) + insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id" + querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1" + updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3" + deleteSql := "DELETE FROM orders WHERE id = $1" + + b.N = 2000 + for x := 0; x < b.N; x++ { + var id int64 + e := strconv.Itoa(x) + "benchmark@example.org" + now := time.Now() + email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now} + // Insert + DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id) + // Query + rows, _ := DB.Query(querySql, email.Email) + rows.Close() + // Update + DB.Exec(updateSql, "new-"+e, time.Now(), id) + // Delete + DB.Exec(deleteSql, id) + } +} + +func parseTime(str string) *time.Time { + t := now.New(time.Now().UTC()).MustParse(str) + return &t +} diff --git a/vendor/github.com/jinzhu/gorm/migration_test.go b/vendor/github.com/jinzhu/gorm/migration_test.go new file mode 100644 index 0000000..7c69448 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/migration_test.go @@ -0,0 +1,485 @@ +package gorm_test + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "os" + "reflect" + "strconv" + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +type User struct { + Id int64 + Age int64 + UserNum Num + Name string `sql:"size:255"` + Email string + Birthday *time.Time // Time + CreatedAt time.Time // CreatedAt: Time of record is created, will be insert automatically + UpdatedAt time.Time // UpdatedAt: Time of record is updated, will be updated automatically + Emails []Email // Embedded structs + BillingAddress Address // Embedded struct + BillingAddressID sql.NullInt64 // Embedded struct's foreign key + ShippingAddress Address // Embedded struct + ShippingAddressId int64 // Embedded struct's foreign key + CreditCard CreditCard + Latitude float64 + Languages []Language `gorm:"many2many:user_languages;"` + CompanyID *int + Company Company + Role Role + Password EncryptedData + PasswordHash []byte + IgnoreMe int64 `sql:"-"` + IgnoreStringSlice []string `sql:"-"` + Ignored struct{ Name string } `sql:"-"` + IgnoredPointer *User `sql:"-"` +} + +type NotSoLongTableName struct { + Id int64 + ReallyLongThingID int64 + ReallyLongThing ReallyLongTableNameToTestMySQLNameLengthLimit +} + +type ReallyLongTableNameToTestMySQLNameLengthLimit struct { + Id int64 +} + +type ReallyLongThingThatReferencesShort struct { + Id int64 + ShortID int64 + Short Short +} + +type Short struct { + Id int64 +} + +type CreditCard struct { + ID int8 + Number string + UserId sql.NullInt64 + CreatedAt time.Time `sql:"not null"` + UpdatedAt time.Time + DeletedAt *time.Time `sql:"column:deleted_time"` +} + +type Email struct { + Id int16 + UserId int + Email string `sql:"type:varchar(100);"` + CreatedAt time.Time + UpdatedAt time.Time +} + +type Address struct { + ID int + Address1 string + Address2 string + Post string + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time +} + +type Language struct { + gorm.Model + Name string + Users []User `gorm:"many2many:user_languages;"` +} + +type Product struct { + Id int64 + Code string + Price int64 + CreatedAt time.Time + UpdatedAt time.Time + AfterFindCallTimes int64 + BeforeCreateCallTimes int64 + AfterCreateCallTimes int64 + BeforeUpdateCallTimes int64 + AfterUpdateCallTimes int64 + BeforeSaveCallTimes int64 + AfterSaveCallTimes int64 + BeforeDeleteCallTimes int64 + AfterDeleteCallTimes int64 +} + +type Company struct { + Id int64 + Name string + Owner *User `sql:"-"` +} + +type EncryptedData []byte + +func (data *EncryptedData) Scan(value interface{}) error { + if b, ok := value.([]byte); ok { + if len(b) < 3 || b[0] != '*' || b[1] != '*' || b[2] != '*' { + return errors.New("Too short") + } + + *data = b[3:] + return nil + } + + return errors.New("Bytes expected") +} + +func (data EncryptedData) Value() (driver.Value, error) { + if len(data) > 0 && data[0] == 'x' { + //needed to test failures + return nil, errors.New("Should not start with 'x'") + } + + //prepend asterisks + return append([]byte("***"), data...), nil +} + +type Role struct { + Name string `gorm:"size:256"` +} + +func (role *Role) Scan(value interface{}) error { + if b, ok := value.([]uint8); ok { + role.Name = string(b) + } else { + role.Name = value.(string) + } + return nil +} + +func (role Role) Value() (driver.Value, error) { + return role.Name, nil +} + +func (role Role) IsAdmin() bool { + return role.Name == "admin" +} + +type Num int64 + +func (i *Num) Scan(src interface{}) error { + switch s := src.(type) { + case []byte: + n, _ := strconv.Atoi(string(s)) + *i = Num(n) + case int64: + *i = Num(s) + default: + return errors.New("Cannot scan NamedInt from " + reflect.ValueOf(src).String()) + } + return nil +} + +type Animal struct { + Counter uint64 `gorm:"primary_key:yes"` + Name string `sql:"DEFAULT:'galeone'"` + From string //test reserved sql keyword as field name + Age time.Time `sql:"DEFAULT:current_timestamp"` + unexported string // unexported value + CreatedAt time.Time + UpdatedAt time.Time +} + +type JoinTable struct { + From uint64 + To uint64 + Time time.Time `sql:"default: null"` +} + +type Post struct { + Id int64 + CategoryId sql.NullInt64 + MainCategoryId int64 + Title string + Body string + Comments []*Comment + Category Category + MainCategory Category +} + +type Category struct { + gorm.Model + Name string + + Categories []Category + CategoryID *uint +} + +type Comment struct { + gorm.Model + PostId int64 + Content string + Post Post +} + +// Scanner +type NullValue struct { + Id int64 + Name sql.NullString `sql:"not null"` + Gender *sql.NullString `sql:"not null"` + Age sql.NullInt64 + Male sql.NullBool + Height sql.NullFloat64 + AddedAt NullTime +} + +type NullTime struct { + Time time.Time + Valid bool +} + +func (nt *NullTime) Scan(value interface{}) error { + if value == nil { + nt.Valid = false + return nil + } + nt.Time, nt.Valid = value.(time.Time), true + return nil +} + +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func getPreparedUser(name string, role string) *User { + var company Company + DB.Where(Company{Name: role}).FirstOrCreate(&company) + + return &User{ + Name: name, + Age: 20, + Role: Role{role}, + BillingAddress: Address{Address1: fmt.Sprintf("Billing Address %v", name)}, + ShippingAddress: Address{Address1: fmt.Sprintf("Shipping Address %v", name)}, + CreditCard: CreditCard{Number: fmt.Sprintf("123456%v", name)}, + Emails: []Email{ + {Email: fmt.Sprintf("user_%v@example1.com", name)}, {Email: fmt.Sprintf("user_%v@example2.com", name)}, + }, + Company: company, + Languages: []Language{ + {Name: fmt.Sprintf("lang_1_%v", name)}, + {Name: fmt.Sprintf("lang_2_%v", name)}, + }, + } +} + +func runMigration() { + if err := DB.DropTableIfExists(&User{}).Error; err != nil { + fmt.Printf("Got error when try to delete table users, %+v\n", err) + } + + for _, table := range []string{"animals", "user_languages"} { + DB.Exec(fmt.Sprintf("drop table %v;", table)) + } + + values := []interface{}{&Short{}, &ReallyLongThingThatReferencesShort{}, &ReallyLongTableNameToTestMySQLNameLengthLimit{}, &NotSoLongTableName{}, &Product{}, &Email{}, &Address{}, &CreditCard{}, &Company{}, &Role{}, &Language{}, &HNPost{}, &EngadgetPost{}, &Animal{}, &User{}, &JoinTable{}, &Post{}, &Category{}, &Comment{}, &Cat{}, &Dog{}, &Hamster{}, &Toy{}, &ElementWithIgnoredField{}} + for _, value := range values { + DB.DropTable(value) + } + if err := DB.AutoMigrate(values...).Error; err != nil { + panic(fmt.Sprintf("No error should happen when create table, but got %+v", err)) + } +} + +func TestIndexes(t *testing.T) { + if err := DB.Model(&Email{}).AddIndex("idx_email_email", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + scope := DB.NewScope(&Email{}) + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email") { + t.Errorf("Email should have index idx_email_email") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email") { + t.Errorf("Email's index idx_email_email should be deleted") + } + + if err := DB.Model(&Email{}).AddIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email should have index idx_email_email_and_user_id") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email's index idx_email_email_and_user_id should be deleted") + } + + if err := DB.Model(&Email{}).AddUniqueIndex("idx_email_email_and_user_id", "user_id", "email").Error; err != nil { + t.Errorf("Got error when tried to create index: %+v", err) + } + + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email should have index idx_email_email_and_user_id") + } + + if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "user1@example.comiii"}, {Email: "user1@example.com"}, {Email: "user1@example.com"}}}).Error == nil { + t.Errorf("Should get to create duplicate record when having unique index") + } + + var user = User{Name: "sample_user"} + DB.Save(&user) + if DB.Model(&user).Association("Emails").Append(Email{Email: "not-1duplicated@gmail.com"}, Email{Email: "not-duplicated2@gmail.com"}).Error != nil { + t.Errorf("Should get no error when append two emails for user") + } + + if DB.Model(&user).Association("Emails").Append(Email{Email: "duplicated@gmail.com"}, Email{Email: "duplicated@gmail.com"}).Error == nil { + t.Errorf("Should get no duplicated email error when insert duplicated emails for a user") + } + + if err := DB.Model(&Email{}).RemoveIndex("idx_email_email_and_user_id").Error; err != nil { + t.Errorf("Got error when tried to remove index: %+v", err) + } + + if scope.Dialect().HasIndex(scope.TableName(), "idx_email_email_and_user_id") { + t.Errorf("Email's index idx_email_email_and_user_id should be deleted") + } + + if DB.Save(&User{Name: "unique_indexes", Emails: []Email{{Email: "user1@example.com"}, {Email: "user1@example.com"}}}).Error != nil { + t.Errorf("Should be able to create duplicated emails after remove unique index") + } +} + +type EmailWithIdx struct { + Id int64 + UserId int64 + Email string `sql:"index:idx_email_agent"` + UserAgent string `sql:"index:idx_email_agent"` + RegisteredAt *time.Time `sql:"unique_index"` + CreatedAt time.Time + UpdatedAt time.Time +} + +func TestAutoMigration(t *testing.T) { + DB.AutoMigrate(&Address{}) + DB.DropTable(&EmailWithIdx{}) + if err := DB.AutoMigrate(&EmailWithIdx{}).Error; err != nil { + t.Errorf("Auto Migrate should not raise any error") + } + + now := time.Now() + DB.Save(&EmailWithIdx{Email: "jinzhu@example.org", UserAgent: "pc", RegisteredAt: &now}) + + scope := DB.NewScope(&EmailWithIdx{}) + if !scope.Dialect().HasIndex(scope.TableName(), "idx_email_agent") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope.TableName(), "uix_email_with_idxes_registered_at") { + t.Errorf("Failed to create index") + } + + var bigemail EmailWithIdx + DB.First(&bigemail, "user_agent = ?", "pc") + if bigemail.Email != "jinzhu@example.org" || bigemail.UserAgent != "pc" || bigemail.RegisteredAt.IsZero() { + t.Error("Big Emails should be saved and fetched correctly") + } +} + +type MultipleIndexes struct { + ID int64 + UserID int64 `sql:"unique_index:uix_multipleindexes_user_name,uix_multipleindexes_user_email;index:idx_multipleindexes_user_other"` + Name string `sql:"unique_index:uix_multipleindexes_user_name"` + Email string `sql:"unique_index:,uix_multipleindexes_user_email"` + Other string `sql:"index:,idx_multipleindexes_user_other"` +} + +func TestMultipleIndexes(t *testing.T) { + if err := DB.DropTableIfExists(&MultipleIndexes{}).Error; err != nil { + fmt.Printf("Got error when try to delete table multiple_indexes, %+v\n", err) + } + + DB.AutoMigrate(&MultipleIndexes{}) + if err := DB.AutoMigrate(&EmailWithIdx{}).Error; err != nil { + t.Errorf("Auto Migrate should not raise any error") + } + + DB.Save(&MultipleIndexes{UserID: 1, Name: "jinzhu", Email: "jinzhu@example.org", Other: "foo"}) + + scope := DB.NewScope(&MultipleIndexes{}) + if !scope.Dialect().HasIndex(scope.TableName(), "uix_multipleindexes_user_name") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope.TableName(), "uix_multipleindexes_user_email") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope.TableName(), "uix_multiple_indexes_email") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope.TableName(), "idx_multipleindexes_user_other") { + t.Errorf("Failed to create index") + } + + if !scope.Dialect().HasIndex(scope.TableName(), "idx_multiple_indexes_other") { + t.Errorf("Failed to create index") + } + + var mutipleIndexes MultipleIndexes + DB.First(&mutipleIndexes, "name = ?", "jinzhu") + if mutipleIndexes.Email != "jinzhu@example.org" || mutipleIndexes.Name != "jinzhu" { + t.Error("MutipleIndexes should be saved and fetched correctly") + } + + // Check unique constraints + if err := DB.Save(&MultipleIndexes{UserID: 1, Name: "name1", Email: "jinzhu@example.org", Other: "foo"}).Error; err == nil { + t.Error("MultipleIndexes unique index failed") + } + + if err := DB.Save(&MultipleIndexes{UserID: 1, Name: "name1", Email: "foo@example.org", Other: "foo"}).Error; err != nil { + t.Error("MultipleIndexes unique index failed") + } + + if err := DB.Save(&MultipleIndexes{UserID: 2, Name: "name1", Email: "jinzhu@example.org", Other: "foo"}).Error; err == nil { + t.Error("MultipleIndexes unique index failed") + } + + if err := DB.Save(&MultipleIndexes{UserID: 2, Name: "name1", Email: "foo2@example.org", Other: "foo"}).Error; err != nil { + t.Error("MultipleIndexes unique index failed") + } +} + +func TestModifyColumnType(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" && dialect != "mysql" && dialect != "mssql" { + t.Skip("Skipping this because only postgres, mysql and mssql support altering a column type") + } + + type ModifyColumnType struct { + gorm.Model + Name1 string `gorm:"length:100"` + Name2 string `gorm:"length:200"` + } + DB.DropTable(&ModifyColumnType{}) + DB.CreateTable(&ModifyColumnType{}) + + name2Field, _ := DB.NewScope(&ModifyColumnType{}).FieldByName("Name2") + name2Type := DB.Dialect().DataTypeOf(name2Field.StructField) + + if err := DB.Model(&ModifyColumnType{}).ModifyColumn("name1", name2Type).Error; err != nil { + t.Errorf("No error should happen when ModifyColumn, but got %v", err) + } +} diff --git a/vendor/github.com/jinzhu/gorm/model.go b/vendor/github.com/jinzhu/gorm/model.go new file mode 100644 index 0000000..f37ff7e --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/model.go @@ -0,0 +1,14 @@ +package gorm + +import "time" + +// Model base model definition, including fields `ID`, `CreatedAt`, `UpdatedAt`, `DeletedAt`, which could be embedded in your models +// type User struct { +// gorm.Model +// } +type Model struct { + ID uint `gorm:"primary_key"` + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt *time.Time `sql:"index"` +} diff --git a/vendor/github.com/jinzhu/gorm/model_struct.go b/vendor/github.com/jinzhu/gorm/model_struct.go new file mode 100644 index 0000000..f571e2e --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/model_struct.go @@ -0,0 +1,629 @@ +package gorm + +import ( + "database/sql" + "errors" + "go/ast" + "reflect" + "strings" + "sync" + "time" + + "github.com/jinzhu/inflection" +) + +// DefaultTableNameHandler default table name handler +var DefaultTableNameHandler = func(db *DB, defaultTableName string) string { + return defaultTableName +} + +type safeModelStructsMap struct { + m map[reflect.Type]*ModelStruct + l *sync.RWMutex +} + +func (s *safeModelStructsMap) Set(key reflect.Type, value *ModelStruct) { + s.l.Lock() + defer s.l.Unlock() + s.m[key] = value +} + +func (s *safeModelStructsMap) Get(key reflect.Type) *ModelStruct { + s.l.RLock() + defer s.l.RUnlock() + return s.m[key] +} + +func newModelStructsMap() *safeModelStructsMap { + return &safeModelStructsMap{l: new(sync.RWMutex), m: make(map[reflect.Type]*ModelStruct)} +} + +var modelStructsMap = newModelStructsMap() + +// ModelStruct model definition +type ModelStruct struct { + PrimaryFields []*StructField + StructFields []*StructField + ModelType reflect.Type + defaultTableName string +} + +// TableName get model's table name +func (s *ModelStruct) TableName(db *DB) string { + if s.defaultTableName == "" && db != nil && s.ModelType != nil { + // Set default table name + if tabler, ok := reflect.New(s.ModelType).Interface().(tabler); ok { + s.defaultTableName = tabler.TableName() + } else { + tableName := ToDBName(s.ModelType.Name()) + if db == nil || !db.parent.singularTable { + tableName = inflection.Plural(tableName) + } + s.defaultTableName = tableName + } + } + + return DefaultTableNameHandler(db, s.defaultTableName) +} + +// StructField model field's struct definition +type StructField struct { + DBName string + Name string + Names []string + IsPrimaryKey bool + IsNormal bool + IsIgnored bool + IsScanner bool + HasDefaultValue bool + Tag reflect.StructTag + TagSettings map[string]string + Struct reflect.StructField + IsForeignKey bool + Relationship *Relationship +} + +func (structField *StructField) clone() *StructField { + clone := &StructField{ + DBName: structField.DBName, + Name: structField.Name, + Names: structField.Names, + IsPrimaryKey: structField.IsPrimaryKey, + IsNormal: structField.IsNormal, + IsIgnored: structField.IsIgnored, + IsScanner: structField.IsScanner, + HasDefaultValue: structField.HasDefaultValue, + Tag: structField.Tag, + TagSettings: map[string]string{}, + Struct: structField.Struct, + IsForeignKey: structField.IsForeignKey, + } + + if structField.Relationship != nil { + relationship := *structField.Relationship + clone.Relationship = &relationship + } + + for key, value := range structField.TagSettings { + clone.TagSettings[key] = value + } + + return clone +} + +// Relationship described the relationship between models +type Relationship struct { + Kind string + PolymorphicType string + PolymorphicDBName string + PolymorphicValue string + ForeignFieldNames []string + ForeignDBNames []string + AssociationForeignFieldNames []string + AssociationForeignDBNames []string + JoinTableHandler JoinTableHandlerInterface +} + +func getForeignField(column string, fields []*StructField) *StructField { + for _, field := range fields { + if field.Name == column || field.DBName == column || field.DBName == ToDBName(column) { + return field + } + } + return nil +} + +// GetModelStruct get value's model struct, relationships based on struct and tag definition +func (scope *Scope) GetModelStruct() *ModelStruct { + var modelStruct ModelStruct + // Scope value can't be nil + if scope.Value == nil { + return &modelStruct + } + + reflectType := reflect.ValueOf(scope.Value).Type() + for reflectType.Kind() == reflect.Slice || reflectType.Kind() == reflect.Ptr { + reflectType = reflectType.Elem() + } + + // Scope value need to be a struct + if reflectType.Kind() != reflect.Struct { + return &modelStruct + } + + // Get Cached model struct + if value := modelStructsMap.Get(reflectType); value != nil { + return value + } + + modelStruct.ModelType = reflectType + + // Get all fields + for i := 0; i < reflectType.NumField(); i++ { + if fieldStruct := reflectType.Field(i); ast.IsExported(fieldStruct.Name) { + field := &StructField{ + Struct: fieldStruct, + Name: fieldStruct.Name, + Names: []string{fieldStruct.Name}, + Tag: fieldStruct.Tag, + TagSettings: parseTagSetting(fieldStruct.Tag), + } + + // is ignored field + if _, ok := field.TagSettings["-"]; ok { + field.IsIgnored = true + } else { + if _, ok := field.TagSettings["PRIMARY_KEY"]; ok { + field.IsPrimaryKey = true + modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field) + } + + if _, ok := field.TagSettings["DEFAULT"]; ok { + field.HasDefaultValue = true + } + + if _, ok := field.TagSettings["AUTO_INCREMENT"]; ok && !field.IsPrimaryKey { + field.HasDefaultValue = true + } + + indirectType := fieldStruct.Type + for indirectType.Kind() == reflect.Ptr { + indirectType = indirectType.Elem() + } + + fieldValue := reflect.New(indirectType).Interface() + if _, isScanner := fieldValue.(sql.Scanner); isScanner { + // is scanner + field.IsScanner, field.IsNormal = true, true + if indirectType.Kind() == reflect.Struct { + for i := 0; i < indirectType.NumField(); i++ { + for key, value := range parseTagSetting(indirectType.Field(i).Tag) { + if _, ok := field.TagSettings[key]; !ok { + field.TagSettings[key] = value + } + } + } + } + } else if _, isTime := fieldValue.(*time.Time); isTime { + // is time + field.IsNormal = true + } else if _, ok := field.TagSettings["EMBEDDED"]; ok || fieldStruct.Anonymous { + // is embedded struct + for _, subField := range scope.New(fieldValue).GetModelStruct().StructFields { + subField = subField.clone() + subField.Names = append([]string{fieldStruct.Name}, subField.Names...) + if prefix, ok := field.TagSettings["EMBEDDED_PREFIX"]; ok { + subField.DBName = prefix + subField.DBName + } + + if subField.IsPrimaryKey { + if _, ok := subField.TagSettings["PRIMARY_KEY"]; ok { + modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, subField) + } else { + subField.IsPrimaryKey = false + } + } + + if subField.Relationship != nil && subField.Relationship.JoinTableHandler != nil { + if joinTableHandler, ok := subField.Relationship.JoinTableHandler.(*JoinTableHandler); ok { + newJoinTableHandler := &JoinTableHandler{} + newJoinTableHandler.Setup(subField.Relationship, joinTableHandler.TableName, reflectType, joinTableHandler.Destination.ModelType) + subField.Relationship.JoinTableHandler = newJoinTableHandler + } + } + + modelStruct.StructFields = append(modelStruct.StructFields, subField) + } + continue + } else { + // build relationships + switch indirectType.Kind() { + case reflect.Slice: + defer func(field *StructField) { + var ( + relationship = &Relationship{} + toScope = scope.New(reflect.New(field.Struct.Type).Interface()) + foreignKeys []string + associationForeignKeys []string + elemType = field.Struct.Type + ) + + if foreignKey := field.TagSettings["FOREIGNKEY"]; foreignKey != "" { + foreignKeys = strings.Split(foreignKey, ",") + } + + if foreignKey := field.TagSettings["ASSOCIATION_FOREIGNKEY"]; foreignKey != "" { + associationForeignKeys = strings.Split(foreignKey, ",") + } else if foreignKey := field.TagSettings["ASSOCIATIONFOREIGNKEY"]; foreignKey != "" { + associationForeignKeys = strings.Split(foreignKey, ",") + } + + for elemType.Kind() == reflect.Slice || elemType.Kind() == reflect.Ptr { + elemType = elemType.Elem() + } + + if elemType.Kind() == reflect.Struct { + if many2many := field.TagSettings["MANY2MANY"]; many2many != "" { + relationship.Kind = "many_to_many" + + { // Foreign Keys for Source + joinTableDBNames := []string{} + + if foreignKey := field.TagSettings["JOINTABLE_FOREIGNKEY"]; foreignKey != "" { + joinTableDBNames = strings.Split(foreignKey, ",") + } + + // if no foreign keys defined with tag + if len(foreignKeys) == 0 { + for _, field := range modelStruct.PrimaryFields { + foreignKeys = append(foreignKeys, field.DBName) + } + } + + for idx, foreignKey := range foreignKeys { + if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil { + // source foreign keys (db names) + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.DBName) + + // setup join table foreign keys for source + if len(joinTableDBNames) > idx { + // if defined join table's foreign key + relationship.ForeignDBNames = append(relationship.ForeignDBNames, joinTableDBNames[idx]) + } else { + defaultJointableForeignKey := ToDBName(reflectType.Name()) + "_" + foreignField.DBName + relationship.ForeignDBNames = append(relationship.ForeignDBNames, defaultJointableForeignKey) + } + } + } + } + + { // Foreign Keys for Association (Destination) + associationJoinTableDBNames := []string{} + + if foreignKey := field.TagSettings["ASSOCIATION_JOINTABLE_FOREIGNKEY"]; foreignKey != "" { + associationJoinTableDBNames = strings.Split(foreignKey, ",") + } + + // if no association foreign keys defined with tag + if len(associationForeignKeys) == 0 { + for _, field := range toScope.PrimaryFields() { + associationForeignKeys = append(associationForeignKeys, field.DBName) + } + } + + for idx, name := range associationForeignKeys { + if field, ok := toScope.FieldByName(name); ok { + // association foreign keys (db names) + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, field.DBName) + + // setup join table foreign keys for association + if len(associationJoinTableDBNames) > idx { + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationJoinTableDBNames[idx]) + } else { + // join table foreign keys for association + joinTableDBName := ToDBName(elemType.Name()) + "_" + field.DBName + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, joinTableDBName) + } + } + } + } + + joinTableHandler := JoinTableHandler{} + joinTableHandler.Setup(relationship, many2many, reflectType, elemType) + relationship.JoinTableHandler = &joinTableHandler + field.Relationship = relationship + } else { + // User has many comments, associationType is User, comment use UserID as foreign key + var associationType = reflectType.Name() + var toFields = toScope.GetStructFields() + relationship.Kind = "has_many" + + if polymorphic := field.TagSettings["POLYMORPHIC"]; polymorphic != "" { + // Dog has many toys, tag polymorphic is Owner, then associationType is Owner + // Toy use OwnerID, OwnerType ('dogs') as foreign key + if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil { + associationType = polymorphic + relationship.PolymorphicType = polymorphicType.Name + relationship.PolymorphicDBName = polymorphicType.DBName + // if Dog has multiple set of toys set name of the set (instead of default 'dogs') + if value, ok := field.TagSettings["POLYMORPHIC_VALUE"]; ok { + relationship.PolymorphicValue = value + } else { + relationship.PolymorphicValue = scope.TableName() + } + polymorphicType.IsForeignKey = true + } + } + + // if no foreign keys defined with tag + if len(foreignKeys) == 0 { + // if no association foreign keys defined with tag + if len(associationForeignKeys) == 0 { + for _, field := range modelStruct.PrimaryFields { + foreignKeys = append(foreignKeys, associationType+field.Name) + associationForeignKeys = append(associationForeignKeys, field.Name) + } + } else { + // generate foreign keys from defined association foreign keys + for _, scopeFieldName := range associationForeignKeys { + if foreignField := getForeignField(scopeFieldName, modelStruct.StructFields); foreignField != nil { + foreignKeys = append(foreignKeys, associationType+foreignField.Name) + associationForeignKeys = append(associationForeignKeys, foreignField.Name) + } + } + } + } else { + // generate association foreign keys from foreign keys + if len(associationForeignKeys) == 0 { + for _, foreignKey := range foreignKeys { + if strings.HasPrefix(foreignKey, associationType) { + associationForeignKey := strings.TrimPrefix(foreignKey, associationType) + if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil { + associationForeignKeys = append(associationForeignKeys, associationForeignKey) + } + } + } + if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 { + associationForeignKeys = []string{scope.PrimaryKey()} + } + } else if len(foreignKeys) != len(associationForeignKeys) { + scope.Err(errors.New("invalid foreign keys, should have same length")) + return + } + } + + for idx, foreignKey := range foreignKeys { + if foreignField := getForeignField(foreignKey, toFields); foreignField != nil { + if associationField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); associationField != nil { + // source foreign keys + foreignField.IsForeignKey = true + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName) + + // association foreign keys + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + } + } + } + + if len(relationship.ForeignFieldNames) != 0 { + field.Relationship = relationship + } + } + } else { + field.IsNormal = true + } + }(field) + case reflect.Struct: + defer func(field *StructField) { + var ( + // user has one profile, associationType is User, profile use UserID as foreign key + // user belongs to profile, associationType is Profile, user use ProfileID as foreign key + associationType = reflectType.Name() + relationship = &Relationship{} + toScope = scope.New(reflect.New(field.Struct.Type).Interface()) + toFields = toScope.GetStructFields() + tagForeignKeys []string + tagAssociationForeignKeys []string + ) + + if foreignKey := field.TagSettings["FOREIGNKEY"]; foreignKey != "" { + tagForeignKeys = strings.Split(foreignKey, ",") + } + + if foreignKey := field.TagSettings["ASSOCIATION_FOREIGNKEY"]; foreignKey != "" { + tagAssociationForeignKeys = strings.Split(foreignKey, ",") + } else if foreignKey := field.TagSettings["ASSOCIATIONFOREIGNKEY"]; foreignKey != "" { + tagAssociationForeignKeys = strings.Split(foreignKey, ",") + } + + if polymorphic := field.TagSettings["POLYMORPHIC"]; polymorphic != "" { + // Cat has one toy, tag polymorphic is Owner, then associationType is Owner + // Toy use OwnerID, OwnerType ('cats') as foreign key + if polymorphicType := getForeignField(polymorphic+"Type", toFields); polymorphicType != nil { + associationType = polymorphic + relationship.PolymorphicType = polymorphicType.Name + relationship.PolymorphicDBName = polymorphicType.DBName + // if Cat has several different types of toys set name for each (instead of default 'cats') + if value, ok := field.TagSettings["POLYMORPHIC_VALUE"]; ok { + relationship.PolymorphicValue = value + } else { + relationship.PolymorphicValue = scope.TableName() + } + polymorphicType.IsForeignKey = true + } + } + + // Has One + { + var foreignKeys = tagForeignKeys + var associationForeignKeys = tagAssociationForeignKeys + // if no foreign keys defined with tag + if len(foreignKeys) == 0 { + // if no association foreign keys defined with tag + if len(associationForeignKeys) == 0 { + for _, primaryField := range modelStruct.PrimaryFields { + foreignKeys = append(foreignKeys, associationType+primaryField.Name) + associationForeignKeys = append(associationForeignKeys, primaryField.Name) + } + } else { + // generate foreign keys form association foreign keys + for _, associationForeignKey := range tagAssociationForeignKeys { + if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil { + foreignKeys = append(foreignKeys, associationType+foreignField.Name) + associationForeignKeys = append(associationForeignKeys, foreignField.Name) + } + } + } + } else { + // generate association foreign keys from foreign keys + if len(associationForeignKeys) == 0 { + for _, foreignKey := range foreignKeys { + if strings.HasPrefix(foreignKey, associationType) { + associationForeignKey := strings.TrimPrefix(foreignKey, associationType) + if foreignField := getForeignField(associationForeignKey, modelStruct.StructFields); foreignField != nil { + associationForeignKeys = append(associationForeignKeys, associationForeignKey) + } + } + } + if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 { + associationForeignKeys = []string{scope.PrimaryKey()} + } + } else if len(foreignKeys) != len(associationForeignKeys) { + scope.Err(errors.New("invalid foreign keys, should have same length")) + return + } + } + + for idx, foreignKey := range foreignKeys { + if foreignField := getForeignField(foreignKey, toFields); foreignField != nil { + if scopeField := getForeignField(associationForeignKeys[idx], modelStruct.StructFields); scopeField != nil { + foreignField.IsForeignKey = true + // source foreign keys + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, scopeField.Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, scopeField.DBName) + + // association foreign keys + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + } + } + } + } + + if len(relationship.ForeignFieldNames) != 0 { + relationship.Kind = "has_one" + field.Relationship = relationship + } else { + var foreignKeys = tagForeignKeys + var associationForeignKeys = tagAssociationForeignKeys + + if len(foreignKeys) == 0 { + // generate foreign keys & association foreign keys + if len(associationForeignKeys) == 0 { + for _, primaryField := range toScope.PrimaryFields() { + foreignKeys = append(foreignKeys, field.Name+primaryField.Name) + associationForeignKeys = append(associationForeignKeys, primaryField.Name) + } + } else { + // generate foreign keys with association foreign keys + for _, associationForeignKey := range associationForeignKeys { + if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil { + foreignKeys = append(foreignKeys, field.Name+foreignField.Name) + associationForeignKeys = append(associationForeignKeys, foreignField.Name) + } + } + } + } else { + // generate foreign keys & association foreign keys + if len(associationForeignKeys) == 0 { + for _, foreignKey := range foreignKeys { + if strings.HasPrefix(foreignKey, field.Name) { + associationForeignKey := strings.TrimPrefix(foreignKey, field.Name) + if foreignField := getForeignField(associationForeignKey, toFields); foreignField != nil { + associationForeignKeys = append(associationForeignKeys, associationForeignKey) + } + } + } + if len(associationForeignKeys) == 0 && len(foreignKeys) == 1 { + associationForeignKeys = []string{toScope.PrimaryKey()} + } + } else if len(foreignKeys) != len(associationForeignKeys) { + scope.Err(errors.New("invalid foreign keys, should have same length")) + return + } + } + + for idx, foreignKey := range foreignKeys { + if foreignField := getForeignField(foreignKey, modelStruct.StructFields); foreignField != nil { + if associationField := getForeignField(associationForeignKeys[idx], toFields); associationField != nil { + foreignField.IsForeignKey = true + + // association foreign keys + relationship.AssociationForeignFieldNames = append(relationship.AssociationForeignFieldNames, associationField.Name) + relationship.AssociationForeignDBNames = append(relationship.AssociationForeignDBNames, associationField.DBName) + + // source foreign keys + relationship.ForeignFieldNames = append(relationship.ForeignFieldNames, foreignField.Name) + relationship.ForeignDBNames = append(relationship.ForeignDBNames, foreignField.DBName) + } + } + } + + if len(relationship.ForeignFieldNames) != 0 { + relationship.Kind = "belongs_to" + field.Relationship = relationship + } + } + }(field) + default: + field.IsNormal = true + } + } + } + + // Even it is ignored, also possible to decode db value into the field + if value, ok := field.TagSettings["COLUMN"]; ok { + field.DBName = value + } else { + field.DBName = ToDBName(fieldStruct.Name) + } + + modelStruct.StructFields = append(modelStruct.StructFields, field) + } + } + + if len(modelStruct.PrimaryFields) == 0 { + if field := getForeignField("id", modelStruct.StructFields); field != nil { + field.IsPrimaryKey = true + modelStruct.PrimaryFields = append(modelStruct.PrimaryFields, field) + } + } + + modelStructsMap.Set(reflectType, &modelStruct) + + return &modelStruct +} + +// GetStructFields get model's field structs +func (scope *Scope) GetStructFields() (fields []*StructField) { + return scope.GetModelStruct().StructFields +} + +func parseTagSetting(tags reflect.StructTag) map[string]string { + setting := map[string]string{} + for _, str := range []string{tags.Get("sql"), tags.Get("gorm")} { + tags := strings.Split(str, ";") + for _, value := range tags { + v := strings.Split(value, ":") + k := strings.TrimSpace(strings.ToUpper(v[0])) + if len(v) >= 2 { + setting[k] = strings.Join(v[1:], ":") + } else { + setting[k] = k + } + } + } + return setting +} diff --git a/vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go b/vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go new file mode 100644 index 0000000..32a1477 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/multi_primary_keys_test.go @@ -0,0 +1,381 @@ +package gorm_test + +import ( + "os" + "reflect" + "sort" + "testing" +) + +type Blog struct { + ID uint `gorm:"primary_key"` + Locale string `gorm:"primary_key"` + Subject string + Body string + Tags []Tag `gorm:"many2many:blog_tags;"` + SharedTags []Tag `gorm:"many2many:shared_blog_tags;ForeignKey:id;AssociationForeignKey:id"` + LocaleTags []Tag `gorm:"many2many:locale_blog_tags;ForeignKey:id,locale;AssociationForeignKey:id"` +} + +type Tag struct { + ID uint `gorm:"primary_key"` + Locale string `gorm:"primary_key"` + Value string + Blogs []*Blog `gorm:"many2many:blogs_tags"` +} + +func compareTags(tags []Tag, contents []string) bool { + var tagContents []string + for _, tag := range tags { + tagContents = append(tagContents, tag.Value) + } + sort.Strings(tagContents) + sort.Strings(contents) + return reflect.DeepEqual(tagContents, contents) +} + +func TestManyToManyWithMultiPrimaryKeys(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" && dialect != "mssql" { + DB.DropTable(&Blog{}, &Tag{}) + DB.DropTable("blog_tags") + DB.CreateTable(&Blog{}, &Tag{}) + blog := Blog{ + Locale: "ZH", + Subject: "subject", + Body: "body", + Tags: []Tag{ + {Locale: "ZH", Value: "tag1"}, + {Locale: "ZH", Value: "tag2"}, + }, + } + + DB.Save(&blog) + if !compareTags(blog.Tags, []string{"tag1", "tag2"}) { + t.Errorf("Blog should has two tags") + } + + // Append + var tag3 = &Tag{Locale: "ZH", Value: "tag3"} + DB.Model(&blog).Association("Tags").Append([]*Tag{tag3}) + if !compareTags(blog.Tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog).Association("Tags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + var tags []Tag + DB.Model(&blog).Related(&tags, "Tags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + var blog1 Blog + DB.Preload("Tags").Find(&blog1) + if !compareTags(blog1.Tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Preload many2many relations") + } + + // Replace + var tag5 = &Tag{Locale: "ZH", Value: "tag5"} + var tag6 = &Tag{Locale: "ZH", Value: "tag6"} + DB.Model(&blog).Association("Tags").Replace(tag5, tag6) + var tags2 []Tag + DB.Model(&blog).Related(&tags2, "Tags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + if DB.Model(&blog).Association("Tags").Count() != 2 { + t.Errorf("Blog should has three tags after Replace") + } + + // Delete + DB.Model(&blog).Association("Tags").Delete(tag5) + var tags3 []Tag + DB.Model(&blog).Related(&tags3, "Tags") + if !compareTags(tags3, []string{"tag6"}) { + t.Errorf("Should find 1 tags after Delete") + } + + if DB.Model(&blog).Association("Tags").Count() != 1 { + t.Errorf("Blog should has three tags after Delete") + } + + DB.Model(&blog).Association("Tags").Delete(tag3) + var tags4 []Tag + DB.Model(&blog).Related(&tags4, "Tags") + if !compareTags(tags4, []string{"tag6"}) { + t.Errorf("Tag should not be deleted when Delete with a unrelated tag") + } + + // Clear + DB.Model(&blog).Association("Tags").Clear() + if DB.Model(&blog).Association("Tags").Count() != 0 { + t.Errorf("All tags should be cleared") + } + } +} + +func TestManyToManyWithCustomizedForeignKeys(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" && dialect != "mssql" { + DB.DropTable(&Blog{}, &Tag{}) + DB.DropTable("shared_blog_tags") + DB.CreateTable(&Blog{}, &Tag{}) + blog := Blog{ + Locale: "ZH", + Subject: "subject", + Body: "body", + SharedTags: []Tag{ + {Locale: "ZH", Value: "tag1"}, + {Locale: "ZH", Value: "tag2"}, + }, + } + DB.Save(&blog) + + blog2 := Blog{ + ID: blog.ID, + Locale: "EN", + } + DB.Create(&blog2) + + if !compareTags(blog.SharedTags, []string{"tag1", "tag2"}) { + t.Errorf("Blog should has two tags") + } + + // Append + var tag3 = &Tag{Locale: "ZH", Value: "tag3"} + DB.Model(&blog).Association("SharedTags").Append([]*Tag{tag3}) + if !compareTags(blog.SharedTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog).Association("SharedTags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog2).Association("SharedTags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + var tags []Tag + DB.Model(&blog).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + DB.Model(&blog2).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + var blog1 Blog + DB.Preload("SharedTags").Find(&blog1) + if !compareTags(blog1.SharedTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Preload many2many relations") + } + + var tag4 = &Tag{Locale: "ZH", Value: "tag4"} + DB.Model(&blog2).Association("SharedTags").Append(tag4) + + DB.Model(&blog).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3", "tag4"}) { + t.Errorf("Should find 3 tags with Related") + } + + DB.Model(&blog2).Related(&tags, "SharedTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3", "tag4"}) { + t.Errorf("Should find 3 tags with Related") + } + + // Replace + var tag5 = &Tag{Locale: "ZH", Value: "tag5"} + var tag6 = &Tag{Locale: "ZH", Value: "tag6"} + DB.Model(&blog2).Association("SharedTags").Replace(tag5, tag6) + var tags2 []Tag + DB.Model(&blog).Related(&tags2, "SharedTags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + DB.Model(&blog2).Related(&tags2, "SharedTags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + if DB.Model(&blog).Association("SharedTags").Count() != 2 { + t.Errorf("Blog should has three tags after Replace") + } + + // Delete + DB.Model(&blog).Association("SharedTags").Delete(tag5) + var tags3 []Tag + DB.Model(&blog).Related(&tags3, "SharedTags") + if !compareTags(tags3, []string{"tag6"}) { + t.Errorf("Should find 1 tags after Delete") + } + + if DB.Model(&blog).Association("SharedTags").Count() != 1 { + t.Errorf("Blog should has three tags after Delete") + } + + DB.Model(&blog2).Association("SharedTags").Delete(tag3) + var tags4 []Tag + DB.Model(&blog).Related(&tags4, "SharedTags") + if !compareTags(tags4, []string{"tag6"}) { + t.Errorf("Tag should not be deleted when Delete with a unrelated tag") + } + + // Clear + DB.Model(&blog2).Association("SharedTags").Clear() + if DB.Model(&blog).Association("SharedTags").Count() != 0 { + t.Errorf("All tags should be cleared") + } + } +} + +func TestManyToManyWithCustomizedForeignKeys2(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect != "" && dialect != "sqlite" && dialect != "mssql" { + DB.DropTable(&Blog{}, &Tag{}) + DB.DropTable("locale_blog_tags") + DB.CreateTable(&Blog{}, &Tag{}) + blog := Blog{ + Locale: "ZH", + Subject: "subject", + Body: "body", + LocaleTags: []Tag{ + {Locale: "ZH", Value: "tag1"}, + {Locale: "ZH", Value: "tag2"}, + }, + } + DB.Save(&blog) + + blog2 := Blog{ + ID: blog.ID, + Locale: "EN", + } + DB.Create(&blog2) + + // Append + var tag3 = &Tag{Locale: "ZH", Value: "tag3"} + DB.Model(&blog).Association("LocaleTags").Append([]*Tag{tag3}) + if !compareTags(blog.LocaleTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("Blog should has three tags after Append") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 0 { + t.Errorf("EN Blog should has 0 tags after ZH Blog Append") + } + + var tags []Tag + DB.Model(&blog).Related(&tags, "LocaleTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related") + } + + DB.Model(&blog2).Related(&tags, "LocaleTags") + if len(tags) != 0 { + t.Errorf("Should find 0 tags with Related for EN Blog") + } + + var blog1 Blog + DB.Preload("LocaleTags").Find(&blog1, "locale = ? AND id = ?", "ZH", blog.ID) + if !compareTags(blog1.LocaleTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Preload many2many relations") + } + + var tag4 = &Tag{Locale: "ZH", Value: "tag4"} + DB.Model(&blog2).Association("LocaleTags").Append(tag4) + + DB.Model(&blog).Related(&tags, "LocaleTags") + if !compareTags(tags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("Should find 3 tags with Related for EN Blog") + } + + DB.Model(&blog2).Related(&tags, "LocaleTags") + if !compareTags(tags, []string{"tag4"}) { + t.Errorf("Should find 1 tags with Related for EN Blog") + } + + // Replace + var tag5 = &Tag{Locale: "ZH", Value: "tag5"} + var tag6 = &Tag{Locale: "ZH", Value: "tag6"} + DB.Model(&blog2).Association("LocaleTags").Replace(tag5, tag6) + + var tags2 []Tag + DB.Model(&blog).Related(&tags2, "LocaleTags") + if !compareTags(tags2, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("CN Blog's tags should not be changed after EN Blog Replace") + } + + var blog11 Blog + DB.Preload("LocaleTags").First(&blog11, "id = ? AND locale = ?", blog.ID, blog.Locale) + if !compareTags(blog11.LocaleTags, []string{"tag1", "tag2", "tag3"}) { + t.Errorf("CN Blog's tags should not be changed after EN Blog Replace") + } + + DB.Model(&blog2).Related(&tags2, "LocaleTags") + if !compareTags(tags2, []string{"tag5", "tag6"}) { + t.Errorf("Should find 2 tags after Replace") + } + + var blog21 Blog + DB.Preload("LocaleTags").First(&blog21, "id = ? AND locale = ?", blog2.ID, blog2.Locale) + if !compareTags(blog21.LocaleTags, []string{"tag5", "tag6"}) { + t.Errorf("EN Blog's tags should be changed after Replace") + } + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog should has three tags after Replace") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 2 { + t.Errorf("EN Blog should has two tags after Replace") + } + + // Delete + DB.Model(&blog).Association("LocaleTags").Delete(tag5) + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog should has three tags after Delete with EN's tag") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 2 { + t.Errorf("EN Blog should has two tags after ZH Blog Delete with EN's tag") + } + + DB.Model(&blog2).Association("LocaleTags").Delete(tag5) + + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog should has three tags after EN Blog Delete with EN's tag") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 1 { + t.Errorf("EN Blog should has 1 tags after EN Blog Delete with EN's tag") + } + + // Clear + DB.Model(&blog2).Association("LocaleTags").Clear() + if DB.Model(&blog).Association("LocaleTags").Count() != 3 { + t.Errorf("ZH Blog's tags should not be cleared when clear EN Blog's tags") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 0 { + t.Errorf("EN Blog's tags should be cleared when clear EN Blog's tags") + } + + DB.Model(&blog).Association("LocaleTags").Clear() + if DB.Model(&blog).Association("LocaleTags").Count() != 0 { + t.Errorf("ZH Blog's tags should be cleared when clear ZH Blog's tags") + } + + if DB.Model(&blog2).Association("LocaleTags").Count() != 0 { + t.Errorf("EN Blog's tags should be cleared") + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/pointer_test.go b/vendor/github.com/jinzhu/gorm/pointer_test.go new file mode 100644 index 0000000..2a68a5a --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/pointer_test.go @@ -0,0 +1,84 @@ +package gorm_test + +import "testing" + +type PointerStruct struct { + ID int64 + Name *string + Num *int +} + +type NormalStruct struct { + ID int64 + Name string + Num int +} + +func TestPointerFields(t *testing.T) { + DB.DropTable(&PointerStruct{}) + DB.AutoMigrate(&PointerStruct{}) + var name = "pointer struct 1" + var num = 100 + pointerStruct := PointerStruct{Name: &name, Num: &num} + if DB.Create(&pointerStruct).Error != nil { + t.Errorf("Failed to save pointer struct") + } + + var pointerStructResult PointerStruct + if err := DB.First(&pointerStructResult, "id = ?", pointerStruct.ID).Error; err != nil || *pointerStructResult.Name != name || *pointerStructResult.Num != num { + t.Errorf("Failed to query saved pointer struct") + } + + var tableName = DB.NewScope(&PointerStruct{}).TableName() + + var normalStruct NormalStruct + DB.Table(tableName).First(&normalStruct) + if normalStruct.Name != name || normalStruct.Num != num { + t.Errorf("Failed to query saved Normal struct") + } + + var nilPointerStruct = PointerStruct{} + if err := DB.Create(&nilPointerStruct).Error; err != nil { + t.Error("Failed to save nil pointer struct", err) + } + + var pointerStruct2 PointerStruct + if err := DB.First(&pointerStruct2, "id = ?", nilPointerStruct.ID).Error; err != nil { + t.Error("Failed to query saved nil pointer struct", err) + } + + var normalStruct2 NormalStruct + if err := DB.Table(tableName).First(&normalStruct2, "id = ?", nilPointerStruct.ID).Error; err != nil { + t.Error("Failed to query saved nil pointer struct", err) + } + + var partialNilPointerStruct1 = PointerStruct{Num: &num} + if err := DB.Create(&partialNilPointerStruct1).Error; err != nil { + t.Error("Failed to save partial nil pointer struct", err) + } + + var pointerStruct3 PointerStruct + if err := DB.First(&pointerStruct3, "id = ?", partialNilPointerStruct1.ID).Error; err != nil || *pointerStruct3.Num != num { + t.Error("Failed to query saved partial nil pointer struct", err) + } + + var normalStruct3 NormalStruct + if err := DB.Table(tableName).First(&normalStruct3, "id = ?", partialNilPointerStruct1.ID).Error; err != nil || normalStruct3.Num != num { + t.Error("Failed to query saved partial pointer struct", err) + } + + var partialNilPointerStruct2 = PointerStruct{Name: &name} + if err := DB.Create(&partialNilPointerStruct2).Error; err != nil { + t.Error("Failed to save partial nil pointer struct", err) + } + + var pointerStruct4 PointerStruct + if err := DB.First(&pointerStruct4, "id = ?", partialNilPointerStruct2.ID).Error; err != nil || *pointerStruct4.Name != name { + t.Error("Failed to query saved partial nil pointer struct", err) + } + + var normalStruct4 NormalStruct + if err := DB.Table(tableName).First(&normalStruct4, "id = ?", partialNilPointerStruct2.ID).Error; err != nil || normalStruct4.Name != name { + t.Error("Failed to query saved partial pointer struct", err) + } +} diff --git a/vendor/github.com/jinzhu/gorm/polymorphic_test.go b/vendor/github.com/jinzhu/gorm/polymorphic_test.go new file mode 100644 index 0000000..d1ecfbb --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/polymorphic_test.go @@ -0,0 +1,366 @@ +package gorm_test + +import ( + "reflect" + "sort" + "testing" +) + +type Cat struct { + Id int + Name string + Toy Toy `gorm:"polymorphic:Owner;"` +} + +type Dog struct { + Id int + Name string + Toys []Toy `gorm:"polymorphic:Owner;"` +} + +type Hamster struct { + Id int + Name string + PreferredToy Toy `gorm:"polymorphic:Owner;polymorphic_value:hamster_preferred"` + OtherToy Toy `gorm:"polymorphic:Owner;polymorphic_value:hamster_other"` +} + +type Toy struct { + Id int + Name string + OwnerId int + OwnerType string +} + +var compareToys = func(toys []Toy, contents []string) bool { + var toyContents []string + for _, toy := range toys { + toyContents = append(toyContents, toy.Name) + } + sort.Strings(toyContents) + sort.Strings(contents) + return reflect.DeepEqual(toyContents, contents) +} + +func TestPolymorphic(t *testing.T) { + cat := Cat{Name: "Mr. Bigglesworth", Toy: Toy{Name: "cat toy"}} + dog := Dog{Name: "Pluto", Toys: []Toy{{Name: "dog toy 1"}, {Name: "dog toy 2"}}} + DB.Save(&cat).Save(&dog) + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1") + } + + if DB.Model(&dog).Association("Toys").Count() != 2 { + t.Errorf("Dog's toys count should be 2") + } + + // Query + var catToys []Toy + if DB.Model(&cat).Related(&catToys, "Toy").RecordNotFound() { + t.Errorf("Did not find any has one polymorphic association") + } else if len(catToys) != 1 { + t.Errorf("Should have found only one polymorphic has one association") + } else if catToys[0].Name != cat.Toy.Name { + t.Errorf("Should have found the proper has one polymorphic association") + } + + var dogToys []Toy + if DB.Model(&dog).Related(&dogToys, "Toys").RecordNotFound() { + t.Errorf("Did not find any polymorphic has many associations") + } else if len(dogToys) != len(dog.Toys) { + t.Errorf("Should have found all polymorphic has many associations") + } + + var catToy Toy + DB.Model(&cat).Association("Toy").Find(&catToy) + if catToy.Name != cat.Toy.Name { + t.Errorf("Should find has one polymorphic association") + } + + var dogToys1 []Toy + DB.Model(&dog).Association("Toys").Find(&dogToys1) + if !compareToys(dogToys1, []string{"dog toy 1", "dog toy 2"}) { + t.Errorf("Should find has many polymorphic association") + } + + // Append + DB.Model(&cat).Association("Toy").Append(&Toy{ + Name: "cat toy 2", + }) + + var catToy2 Toy + DB.Model(&cat).Association("Toy").Find(&catToy2) + if catToy2.Name != "cat toy 2" { + t.Errorf("Should update has one polymorphic association with Append") + } + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1 after Append") + } + + if DB.Model(&dog).Association("Toys").Count() != 2 { + t.Errorf("Should return two polymorphic has many associations") + } + + DB.Model(&dog).Association("Toys").Append(&Toy{ + Name: "dog toy 3", + }) + + var dogToys2 []Toy + DB.Model(&dog).Association("Toys").Find(&dogToys2) + if !compareToys(dogToys2, []string{"dog toy 1", "dog toy 2", "dog toy 3"}) { + t.Errorf("Dog's toys should be updated with Append") + } + + if DB.Model(&dog).Association("Toys").Count() != 3 { + t.Errorf("Should return three polymorphic has many associations") + } + + // Replace + DB.Model(&cat).Association("Toy").Replace(&Toy{ + Name: "cat toy 3", + }) + + var catToy3 Toy + DB.Model(&cat).Association("Toy").Find(&catToy3) + if catToy3.Name != "cat toy 3" { + t.Errorf("Should update has one polymorphic association with Replace") + } + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1 after Replace") + } + + if DB.Model(&dog).Association("Toys").Count() != 3 { + t.Errorf("Should return three polymorphic has many associations") + } + + DB.Model(&dog).Association("Toys").Replace(&Toy{ + Name: "dog toy 4", + }, []Toy{ + {Name: "dog toy 5"}, {Name: "dog toy 6"}, {Name: "dog toy 7"}, + }) + + var dogToys3 []Toy + DB.Model(&dog).Association("Toys").Find(&dogToys3) + if !compareToys(dogToys3, []string{"dog toy 4", "dog toy 5", "dog toy 6", "dog toy 7"}) { + t.Errorf("Dog's toys should be updated with Replace") + } + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Should return three polymorphic has many associations") + } + + // Delete + DB.Model(&cat).Association("Toy").Delete(&catToy2) + + var catToy4 Toy + DB.Model(&cat).Association("Toy").Find(&catToy4) + if catToy4.Name != "cat toy 3" { + t.Errorf("Should not update has one polymorphic association when Delete a unrelated Toy") + } + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys count should be 1") + } + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Dog's toys count should be 4") + } + + DB.Model(&cat).Association("Toy").Delete(&catToy3) + + if !DB.Model(&cat).Related(&Toy{}, "Toy").RecordNotFound() { + t.Errorf("Toy should be deleted with Delete") + } + + if DB.Model(&cat).Association("Toy").Count() != 0 { + t.Errorf("Cat's toys count should be 0 after Delete") + } + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Dog's toys count should not be changed when delete cat's toy") + } + + DB.Model(&dog).Association("Toys").Delete(&dogToys2) + + if DB.Model(&dog).Association("Toys").Count() != 4 { + t.Errorf("Dog's toys count should not be changed when delete unrelated toys") + } + + DB.Model(&dog).Association("Toys").Delete(&dogToys3) + + if DB.Model(&dog).Association("Toys").Count() != 0 { + t.Errorf("Dog's toys count should be deleted with Delete") + } + + // Clear + DB.Model(&cat).Association("Toy").Append(&Toy{ + Name: "cat toy 2", + }) + + if DB.Model(&cat).Association("Toy").Count() != 1 { + t.Errorf("Cat's toys should be added with Append") + } + + DB.Model(&cat).Association("Toy").Clear() + + if DB.Model(&cat).Association("Toy").Count() != 0 { + t.Errorf("Cat's toys should be cleared with Clear") + } + + DB.Model(&dog).Association("Toys").Append(&Toy{ + Name: "dog toy 8", + }) + + if DB.Model(&dog).Association("Toys").Count() != 1 { + t.Errorf("Dog's toys should be added with Append") + } + + DB.Model(&dog).Association("Toys").Clear() + + if DB.Model(&dog).Association("Toys").Count() != 0 { + t.Errorf("Dog's toys should be cleared with Clear") + } +} + +func TestNamedPolymorphic(t *testing.T) { + hamster := Hamster{Name: "Mr. Hammond", PreferredToy: Toy{Name: "bike"}, OtherToy: Toy{Name: "treadmill"}} + DB.Save(&hamster) + + hamster2 := Hamster{} + DB.Preload("PreferredToy").Preload("OtherToy").Find(&hamster2, hamster.Id) + if hamster2.PreferredToy.Id != hamster.PreferredToy.Id || hamster2.PreferredToy.Name != hamster.PreferredToy.Name { + t.Errorf("Hamster's preferred toy couldn't be preloaded") + } + if hamster2.OtherToy.Id != hamster.OtherToy.Id || hamster2.OtherToy.Name != hamster.OtherToy.Name { + t.Errorf("Hamster's other toy couldn't be preloaded") + } + + // clear to omit Toy.Id in count + hamster2.PreferredToy = Toy{} + hamster2.OtherToy = Toy{} + + if DB.Model(&hamster2).Association("PreferredToy").Count() != 1 { + t.Errorf("Hamster's preferred toy count should be 1") + } + + if DB.Model(&hamster2).Association("OtherToy").Count() != 1 { + t.Errorf("Hamster's other toy count should be 1") + } + + // Query + var hamsterToys []Toy + if DB.Model(&hamster).Related(&hamsterToys, "PreferredToy").RecordNotFound() { + t.Errorf("Did not find any has one polymorphic association") + } else if len(hamsterToys) != 1 { + t.Errorf("Should have found only one polymorphic has one association") + } else if hamsterToys[0].Name != hamster.PreferredToy.Name { + t.Errorf("Should have found the proper has one polymorphic association") + } + + if DB.Model(&hamster).Related(&hamsterToys, "OtherToy").RecordNotFound() { + t.Errorf("Did not find any has one polymorphic association") + } else if len(hamsterToys) != 1 { + t.Errorf("Should have found only one polymorphic has one association") + } else if hamsterToys[0].Name != hamster.OtherToy.Name { + t.Errorf("Should have found the proper has one polymorphic association") + } + + hamsterToy := Toy{} + DB.Model(&hamster).Association("PreferredToy").Find(&hamsterToy) + if hamsterToy.Name != hamster.PreferredToy.Name { + t.Errorf("Should find has one polymorphic association") + } + hamsterToy = Toy{} + DB.Model(&hamster).Association("OtherToy").Find(&hamsterToy) + if hamsterToy.Name != hamster.OtherToy.Name { + t.Errorf("Should find has one polymorphic association") + } + + // Append + DB.Model(&hamster).Association("PreferredToy").Append(&Toy{ + Name: "bike 2", + }) + DB.Model(&hamster).Association("OtherToy").Append(&Toy{ + Name: "treadmill 2", + }) + + hamsterToy = Toy{} + DB.Model(&hamster).Association("PreferredToy").Find(&hamsterToy) + if hamsterToy.Name != "bike 2" { + t.Errorf("Should update has one polymorphic association with Append") + } + + hamsterToy = Toy{} + DB.Model(&hamster).Association("OtherToy").Find(&hamsterToy) + if hamsterToy.Name != "treadmill 2" { + t.Errorf("Should update has one polymorphic association with Append") + } + + if DB.Model(&hamster2).Association("PreferredToy").Count() != 1 { + t.Errorf("Hamster's toys count should be 1 after Append") + } + + if DB.Model(&hamster2).Association("OtherToy").Count() != 1 { + t.Errorf("Hamster's toys count should be 1 after Append") + } + + // Replace + DB.Model(&hamster).Association("PreferredToy").Replace(&Toy{ + Name: "bike 3", + }) + DB.Model(&hamster).Association("OtherToy").Replace(&Toy{ + Name: "treadmill 3", + }) + + hamsterToy = Toy{} + DB.Model(&hamster).Association("PreferredToy").Find(&hamsterToy) + if hamsterToy.Name != "bike 3" { + t.Errorf("Should update has one polymorphic association with Replace") + } + + hamsterToy = Toy{} + DB.Model(&hamster).Association("OtherToy").Find(&hamsterToy) + if hamsterToy.Name != "treadmill 3" { + t.Errorf("Should update has one polymorphic association with Replace") + } + + if DB.Model(&hamster2).Association("PreferredToy").Count() != 1 { + t.Errorf("hamster's toys count should be 1 after Replace") + } + + if DB.Model(&hamster2).Association("OtherToy").Count() != 1 { + t.Errorf("hamster's toys count should be 1 after Replace") + } + + // Clear + DB.Model(&hamster).Association("PreferredToy").Append(&Toy{ + Name: "bike 2", + }) + DB.Model(&hamster).Association("OtherToy").Append(&Toy{ + Name: "treadmill 2", + }) + + if DB.Model(&hamster).Association("PreferredToy").Count() != 1 { + t.Errorf("Hamster's toys should be added with Append") + } + if DB.Model(&hamster).Association("OtherToy").Count() != 1 { + t.Errorf("Hamster's toys should be added with Append") + } + + DB.Model(&hamster).Association("PreferredToy").Clear() + + if DB.Model(&hamster2).Association("PreferredToy").Count() != 0 { + t.Errorf("Hamster's preferred toy should be cleared with Clear") + } + if DB.Model(&hamster2).Association("OtherToy").Count() != 1 { + t.Errorf("Hamster's other toy should be still available") + } + + DB.Model(&hamster).Association("OtherToy").Clear() + if DB.Model(&hamster).Association("OtherToy").Count() != 0 { + t.Errorf("Hamster's other toy should be cleared with Clear") + } +} diff --git a/vendor/github.com/jinzhu/gorm/preload_test.go b/vendor/github.com/jinzhu/gorm/preload_test.go new file mode 100644 index 0000000..311ad0b --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/preload_test.go @@ -0,0 +1,1675 @@ +package gorm_test + +import ( + "database/sql" + "encoding/json" + "os" + "reflect" + "testing" + + "github.com/jinzhu/gorm" +) + +func getPreloadUser(name string) *User { + return getPreparedUser(name, "Preload") +} + +func checkUserHasPreloadData(user User, t *testing.T) { + u := getPreloadUser(user.Name) + if user.BillingAddress.Address1 != u.BillingAddress.Address1 { + t.Error("Failed to preload user's BillingAddress") + } + + if user.ShippingAddress.Address1 != u.ShippingAddress.Address1 { + t.Error("Failed to preload user's ShippingAddress") + } + + if user.CreditCard.Number != u.CreditCard.Number { + t.Error("Failed to preload user's CreditCard") + } + + if user.Company.Name != u.Company.Name { + t.Error("Failed to preload user's Company") + } + + if len(user.Emails) != len(u.Emails) { + t.Error("Failed to preload user's Emails") + } else { + var found int + for _, e1 := range u.Emails { + for _, e2 := range user.Emails { + if e1.Email == e2.Email { + found++ + break + } + } + } + if found != len(u.Emails) { + t.Error("Failed to preload user's email details") + } + } +} + +func TestPreload(t *testing.T) { + user1 := getPreloadUser("user1") + DB.Save(user1) + + preloadDB := DB.Where("role = ?", "Preload").Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company") + var user User + preloadDB.Find(&user) + checkUserHasPreloadData(user, t) + + user2 := getPreloadUser("user2") + DB.Save(user2) + + user3 := getPreloadUser("user3") + DB.Save(user3) + + var users []User + preloadDB.Find(&users) + + for _, user := range users { + checkUserHasPreloadData(user, t) + } + + var users2 []*User + preloadDB.Find(&users2) + + for _, user := range users2 { + checkUserHasPreloadData(*user, t) + } + + var users3 []*User + preloadDB.Preload("Emails", "email = ?", user3.Emails[0].Email).Find(&users3) + + for _, user := range users3 { + if user.Name == user3.Name { + if len(user.Emails) != 1 { + t.Errorf("should only preload one emails for user3 when with condition") + } + } else if len(user.Emails) != 0 { + t.Errorf("should not preload any emails for other users when with condition") + } else if user.Emails == nil { + t.Errorf("should return an empty slice to indicate zero results") + } + } +} + +func TestAutoPreload(t *testing.T) { + user1 := getPreloadUser("auto_user1") + DB.Save(user1) + + preloadDB := DB.Set("gorm:auto_preload", true).Where("role = ?", "Preload") + var user User + preloadDB.Find(&user) + checkUserHasPreloadData(user, t) + + user2 := getPreloadUser("auto_user2") + DB.Save(user2) + + var users []User + preloadDB.Find(&users) + + for _, user := range users { + checkUserHasPreloadData(user, t) + } + + var users2 []*User + preloadDB.Find(&users2) + + for _, user := range users2 { + checkUserHasPreloadData(*user, t) + } +} + +func TestNestedPreload1(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{Level2: Level2{Level1: Level1{Value: "value"}}} + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got, "name = ?", "not_found").Error; err != gorm.ErrRecordNotFound { + t.Error(err) + } +} + +func TestNestedPreload2(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []*Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Level2s: []Level2{ + { + Level1s: []*Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + { + Level1s: []*Level1{ + {Value: "value3"}, + }, + }, + }, + } + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload3(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + Name string + ID uint + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value1"}}, + {Level1: Level1{Value: "value2"}}, + }, + } + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload4(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + } + if err := DB.Create(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +// Slice: []Level3 +func TestNestedPreload5(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{Level2: Level2{Level1: Level1{Value: "value"}}} + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + want[1] = Level3{Level2: Level2{Level1: Level1{Value: "value2"}}} + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload6(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2s: []Level2{ + { + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + { + Level1s: []Level1{ + {Value: "value3"}, + }, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + + want[1] = Level3{ + Level2s: []Level2{ + { + Level1s: []Level1{ + {Value: "value3"}, + {Value: "value4"}, + }, + }, + { + Level1s: []Level1{ + {Value: "value5"}, + }, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2s.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload7(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1 Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2s []Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value1"}}, + {Level1: Level1{Value: "value2"}}, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + + want[1] = Level3{ + Level2s: []Level2{ + {Level1: Level1{Value: "value3"}}, + {Level1: Level1{Value: "value4"}}, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2s.Level1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload8(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + Level2ID uint + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + want[1] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value3"}, + {Value: "value4"}, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2.Level1s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestNestedPreload9(t *testing.T) { + type ( + Level0 struct { + ID uint + Value string + Level1ID uint + } + Level1 struct { + ID uint + Value string + Level2ID uint + Level2_1ID uint + Level0s []Level0 + } + Level2 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level2_1 struct { + ID uint + Level1s []Level1 + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level2 Level2 + Level2_1 Level2_1 + } + ) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level2_1{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level0{}) + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}, &Level2_1{}, &Level0{}).Error; err != nil { + t.Error(err) + } + + want := make([]Level3, 2) + want[0] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value1"}, + {Value: "value2"}, + }, + }, + Level2_1: Level2_1{ + Level1s: []Level1{ + { + Value: "value1-1", + Level0s: []Level0{{Value: "Level0-1"}}, + }, + { + Value: "value2-2", + Level0s: []Level0{{Value: "Level0-2"}}, + }, + }, + }, + } + if err := DB.Create(&want[0]).Error; err != nil { + t.Error(err) + } + want[1] = Level3{ + Level2: Level2{ + Level1s: []Level1{ + {Value: "value3"}, + {Value: "value4"}, + }, + }, + Level2_1: Level2_1{ + Level1s: []Level1{ + { + Value: "value3-3", + Level0s: []Level0{}, + }, + { + Value: "value4-4", + Level0s: []Level0{}, + }, + }, + }, + } + if err := DB.Create(&want[1]).Error; err != nil { + t.Error(err) + } + + var got []Level3 + if err := DB.Preload("Level2").Preload("Level2.Level1s").Preload("Level2_1").Preload("Level2_1.Level1s").Preload("Level2_1.Level1s.Level0s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +type LevelA1 struct { + ID uint + Value string +} + +type LevelA2 struct { + ID uint + Value string + LevelA3s []*LevelA3 +} + +type LevelA3 struct { + ID uint + Value string + LevelA1ID sql.NullInt64 + LevelA1 *LevelA1 + LevelA2ID sql.NullInt64 + LevelA2 *LevelA2 +} + +func TestNestedPreload10(t *testing.T) { + DB.DropTableIfExists(&LevelA3{}) + DB.DropTableIfExists(&LevelA2{}) + DB.DropTableIfExists(&LevelA1{}) + + if err := DB.AutoMigrate(&LevelA1{}, &LevelA2{}, &LevelA3{}).Error; err != nil { + t.Error(err) + } + + levelA1 := &LevelA1{Value: "foo"} + if err := DB.Save(levelA1).Error; err != nil { + t.Error(err) + } + + want := []*LevelA2{ + { + Value: "bar", + LevelA3s: []*LevelA3{ + { + Value: "qux", + LevelA1: levelA1, + }, + }, + }, + { + Value: "bar 2", + LevelA3s: []*LevelA3{}, + }, + } + for _, levelA2 := range want { + if err := DB.Save(levelA2).Error; err != nil { + t.Error(err) + } + } + + var got []*LevelA2 + if err := DB.Preload("LevelA3s.LevelA1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +type LevelB1 struct { + ID uint + Value string + LevelB3s []*LevelB3 +} + +type LevelB2 struct { + ID uint + Value string +} + +type LevelB3 struct { + ID uint + Value string + LevelB1ID sql.NullInt64 + LevelB1 *LevelB1 + LevelB2s []*LevelB2 `gorm:"many2many:levelb1_levelb3_levelb2s"` +} + +func TestNestedPreload11(t *testing.T) { + DB.DropTableIfExists(&LevelB2{}) + DB.DropTableIfExists(&LevelB3{}) + DB.DropTableIfExists(&LevelB1{}) + if err := DB.AutoMigrate(&LevelB1{}, &LevelB2{}, &LevelB3{}).Error; err != nil { + t.Error(err) + } + + levelB1 := &LevelB1{Value: "foo"} + if err := DB.Create(levelB1).Error; err != nil { + t.Error(err) + } + + levelB3 := &LevelB3{ + Value: "bar", + LevelB1ID: sql.NullInt64{Valid: true, Int64: int64(levelB1.ID)}, + } + if err := DB.Create(levelB3).Error; err != nil { + t.Error(err) + } + levelB1.LevelB3s = []*LevelB3{levelB3} + + want := []*LevelB1{levelB1} + var got []*LevelB1 + if err := DB.Preload("LevelB3s.LevelB2s").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +type LevelC1 struct { + ID uint + Value string + LevelC2ID uint +} + +type LevelC2 struct { + ID uint + Value string + LevelC1 LevelC1 +} + +type LevelC3 struct { + ID uint + Value string + LevelC2ID uint + LevelC2 LevelC2 +} + +func TestNestedPreload12(t *testing.T) { + DB.DropTableIfExists(&LevelC2{}) + DB.DropTableIfExists(&LevelC3{}) + DB.DropTableIfExists(&LevelC1{}) + if err := DB.AutoMigrate(&LevelC1{}, &LevelC2{}, &LevelC3{}).Error; err != nil { + t.Error(err) + } + + level2 := LevelC2{ + Value: "c2", + LevelC1: LevelC1{ + Value: "c1", + }, + } + DB.Create(&level2) + + want := []LevelC3{ + { + Value: "c3-1", + LevelC2: level2, + }, { + Value: "c3-2", + LevelC2: level2, + }, + } + + for i := range want { + if err := DB.Create(&want[i]).Error; err != nil { + t.Error(err) + } + } + + var got []LevelC3 + if err := DB.Preload("LevelC2").Preload("LevelC2.LevelC1").Find(&got).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestManyToManyPreloadWithMultiPrimaryKeys(t *testing.T) { + if dialect := os.Getenv("GORM_DIALECT"); dialect == "" || dialect == "sqlite" || dialect == "mssql" { + return + } + + type ( + Level1 struct { + ID uint `gorm:"primary_key;"` + LanguageCode string `gorm:"primary_key"` + Value string + } + Level2 struct { + ID uint `gorm:"primary_key;"` + LanguageCode string `gorm:"primary_key"` + Value string + Level1s []Level1 `gorm:"many2many:levels;"` + } + ) + + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists("levels") + + if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level2{Value: "Bob", LanguageCode: "ru", Level1s: []Level1{ + {Value: "ru", LanguageCode: "ru"}, + {Value: "en", LanguageCode: "en"}, + }} + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level2{Value: "Tom", LanguageCode: "zh", Level1s: []Level1{ + {Value: "zh", LanguageCode: "zh"}, + {Value: "de", LanguageCode: "de"}, + }} + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got Level2 + if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + var got2 Level2 + if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got2, want2) { + t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2)) + } + + var got3 []Level2 + if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got3, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2})) + } + + var got4 []Level2 + if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + var ruLevel1 Level1 + var zhLevel1 Level1 + DB.First(&ruLevel1, "value = ?", "ru") + DB.First(&zhLevel1, "value = ?", "zh") + + got.Level1s = []Level1{ruLevel1} + got2.Level1s = []Level1{zhLevel1} + if !reflect.DeepEqual(got4, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2})) + } + + if err := DB.Preload("Level1s").Find(&got4, "value IN (?)", []string{"non-existing"}).Error; err != nil { + t.Error(err) + } +} + +func TestManyToManyPreloadForNestedPointer(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:levels;"` + } + Level3 struct { + ID uint + Value string + Level2ID sql.NullInt64 + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists("levels") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Value: "Bob", + Level2: &Level2{ + Value: "Foo", + Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }, + }, + } + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level3{ + Value: "Tom", + Level2: &Level2{ + Value: "Bar", + Level1s: []*Level1{ + {Value: "zh"}, + {Value: "de"}, + }, + }, + } + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Bob").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + var got2 Level3 + if err := DB.Preload("Level2.Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got2, want2) { + t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2)) + } + + var got3 []Level3 + if err := DB.Preload("Level2.Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got3, []Level3{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level3{got, got2})) + } + + var got4 []Level3 + if err := DB.Preload("Level2.Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + var got5 Level3 + DB.Preload("Level2.Level1s").Find(&got5, "value = ?", "bogus") + + var ruLevel1 Level1 + var zhLevel1 Level1 + DB.First(&ruLevel1, "value = ?", "ru") + DB.First(&zhLevel1, "value = ?", "zh") + + got.Level2.Level1s = []*Level1{&ruLevel1} + got2.Level2.Level1s = []*Level1{&zhLevel1} + if !reflect.DeepEqual(got4, []Level3{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level3{got, got2})) + } +} + +func TestNestedManyToManyPreload(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:level1_level2;"` + } + Level3 struct { + ID uint + Value string + Level2s []Level2 `gorm:"many2many:level2_level3;"` + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists("level1_level2") + DB.DropTableIfExists("level2_level3") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Value: "Level3", + Level2s: []Level2{ + { + Value: "Bob", + Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }, + }, { + Value: "Tom", + Level1s: []*Level1{ + {Value: "zh"}, + {Value: "de"}, + }, + }, + }, + } + + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2s").Preload("Level2s.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + if err := DB.Preload("Level2s.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound { + t.Error(err) + } +} + +func TestNestedManyToManyPreload2(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:level1_level2;"` + } + Level3 struct { + ID uint + Value string + Level2ID sql.NullInt64 + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists("level1_level2") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level3{ + Value: "Level3", + Level2: &Level2{ + Value: "Bob", + Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }, + }, + } + + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + var got Level3 + if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "Level3").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + if err := DB.Preload("Level2.Level1s").Find(&got, "value = ?", "not_found").Error; err != gorm.ErrRecordNotFound { + t.Error(err) + } +} + +func TestNestedManyToManyPreload3(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:level1_level2;"` + } + Level3 struct { + ID uint + Value string + Level2ID sql.NullInt64 + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists("level1_level2") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + level1Zh := &Level1{Value: "zh"} + level1Ru := &Level1{Value: "ru"} + level1En := &Level1{Value: "en"} + + level21 := &Level2{ + Value: "Level2-1", + Level1s: []*Level1{level1Zh, level1Ru}, + } + + level22 := &Level2{ + Value: "Level2-2", + Level1s: []*Level1{level1Zh, level1En}, + } + + wants := []*Level3{ + { + Value: "Level3-1", + Level2: level21, + }, + { + Value: "Level3-2", + Level2: level22, + }, + { + Value: "Level3-3", + Level2: level21, + }, + } + + for _, want := range wants { + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + } + + var gots []*Level3 + if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB { + return db.Order("level1.id ASC") + }).Find(&gots).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(gots, wants) { + t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants)) + } +} + +func TestNestedManyToManyPreload3ForStruct(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []Level1 `gorm:"many2many:level1_level2;"` + } + Level3 struct { + ID uint + Value string + Level2ID sql.NullInt64 + Level2 Level2 + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists("level1_level2") + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + level1Zh := Level1{Value: "zh"} + level1Ru := Level1{Value: "ru"} + level1En := Level1{Value: "en"} + + level21 := Level2{ + Value: "Level2-1", + Level1s: []Level1{level1Zh, level1Ru}, + } + + level22 := Level2{ + Value: "Level2-2", + Level1s: []Level1{level1Zh, level1En}, + } + + wants := []*Level3{ + { + Value: "Level3-1", + Level2: level21, + }, + { + Value: "Level3-2", + Level2: level22, + }, + { + Value: "Level3-3", + Level2: level21, + }, + } + + for _, want := range wants { + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + } + + var gots []*Level3 + if err := DB.Preload("Level2.Level1s", func(db *gorm.DB) *gorm.DB { + return db.Order("level1.id ASC") + }).Find(&gots).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(gots, wants) { + t.Errorf("got %s; want %s", toJSONString(gots), toJSONString(wants)) + } +} + +func TestNestedManyToManyPreload4(t *testing.T) { + type ( + Level4 struct { + ID uint + Value string + Level3ID uint + } + Level3 struct { + ID uint + Value string + Level4s []*Level4 + } + Level2 struct { + ID uint + Value string + Level3s []*Level3 `gorm:"many2many:level2_level3;"` + } + Level1 struct { + ID uint + Value string + Level2s []*Level2 `gorm:"many2many:level1_level2;"` + } + ) + + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level4{}) + DB.DropTableIfExists("level1_level2") + DB.DropTableIfExists("level2_level3") + + dummy := Level1{ + Value: "Level1", + Level2s: []*Level2{{ + Value: "Level2", + Level3s: []*Level3{{ + Value: "Level3", + Level4s: []*Level4{{ + Value: "Level4", + }}, + }}, + }}, + } + + if err := DB.AutoMigrate(&Level4{}, &Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + if err := DB.Save(&dummy).Error; err != nil { + t.Error(err) + } + + var level1 Level1 + if err := DB.Preload("Level2s").Preload("Level2s.Level3s").Preload("Level2s.Level3s.Level4s").First(&level1).Error; err != nil { + t.Error(err) + } +} + +func TestManyToManyPreloadForPointer(t *testing.T) { + type ( + Level1 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level1s []*Level1 `gorm:"many2many:levels;"` + } + ) + + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + DB.DropTableIfExists("levels") + + if err := DB.AutoMigrate(&Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level2{Value: "Bob", Level1s: []*Level1{ + {Value: "ru"}, + {Value: "en"}, + }} + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level2{Value: "Tom", Level1s: []*Level1{ + {Value: "zh"}, + {Value: "de"}, + }} + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got Level2 + if err := DB.Preload("Level1s").Find(&got, "value = ?", "Bob").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } + + var got2 Level2 + if err := DB.Preload("Level1s").Find(&got2, "value = ?", "Tom").Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got2, want2) { + t.Errorf("got %s; want %s", toJSONString(got2), toJSONString(want2)) + } + + var got3 []Level2 + if err := DB.Preload("Level1s").Find(&got3, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got3, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got3), toJSONString([]Level2{got, got2})) + } + + var got4 []Level2 + if err := DB.Preload("Level1s", "value IN (?)", []string{"zh", "ru"}).Find(&got4, "value IN (?)", []string{"Bob", "Tom"}).Error; err != nil { + t.Error(err) + } + + var got5 Level2 + DB.Preload("Level1s").First(&got5, "value = ?", "bogus") + + var ruLevel1 Level1 + var zhLevel1 Level1 + DB.First(&ruLevel1, "value = ?", "ru") + DB.First(&zhLevel1, "value = ?", "zh") + + got.Level1s = []*Level1{&ruLevel1} + got2.Level1s = []*Level1{&zhLevel1} + if !reflect.DeepEqual(got4, []Level2{got, got2}) { + t.Errorf("got %s; want %s", toJSONString(got4), toJSONString([]Level2{got, got2})) + } +} + +func TestNilPointerSlice(t *testing.T) { + type ( + Level3 struct { + ID uint + Value string + } + Level2 struct { + ID uint + Value string + Level3ID uint + Level3 *Level3 + } + Level1 struct { + ID uint + Value string + Level2ID uint + Level2 *Level2 + } + ) + + DB.DropTableIfExists(&Level3{}) + DB.DropTableIfExists(&Level2{}) + DB.DropTableIfExists(&Level1{}) + + if err := DB.AutoMigrate(&Level3{}, &Level2{}, &Level1{}).Error; err != nil { + t.Error(err) + } + + want := Level1{ + Value: "Bob", + Level2: &Level2{ + Value: "en", + Level3: &Level3{ + Value: "native", + }, + }, + } + if err := DB.Save(&want).Error; err != nil { + t.Error(err) + } + + want2 := Level1{ + Value: "Tom", + Level2: nil, + } + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + var got []Level1 + if err := DB.Preload("Level2").Preload("Level2.Level3").Find(&got).Error; err != nil { + t.Error(err) + } + + if len(got) != 2 { + t.Errorf("got %v items, expected 2", len(got)) + } + + if !reflect.DeepEqual(got[0], want) && !reflect.DeepEqual(got[1], want) { + t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want)) + } + + if !reflect.DeepEqual(got[0], want2) && !reflect.DeepEqual(got[1], want2) { + t.Errorf("got %s; want array containing %s", toJSONString(got), toJSONString(want2)) + } +} + +func TestNilPointerSlice2(t *testing.T) { + type ( + Level4 struct { + ID uint + } + Level3 struct { + ID uint + Level4ID sql.NullInt64 `sql:"index"` + Level4 *Level4 + } + Level2 struct { + ID uint + Level3s []*Level3 `gorm:"many2many:level2_level3s"` + } + Level1 struct { + ID uint + Level2ID sql.NullInt64 `sql:"index"` + Level2 *Level2 + } + ) + + DB.DropTableIfExists(new(Level4)) + DB.DropTableIfExists(new(Level3)) + DB.DropTableIfExists(new(Level2)) + DB.DropTableIfExists(new(Level1)) + + if err := DB.AutoMigrate(new(Level4), new(Level3), new(Level2), new(Level1)).Error; err != nil { + t.Error(err) + } + + want := new(Level1) + if err := DB.Save(want).Error; err != nil { + t.Error(err) + } + + got := new(Level1) + err := DB.Preload("Level2.Level3s.Level4").Last(&got).Error + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestPrefixedPreloadDuplication(t *testing.T) { + type ( + Level4 struct { + ID uint + Name string + Level3ID uint + } + Level3 struct { + ID uint + Name string + Level4s []*Level4 + } + Level2 struct { + ID uint + Name string + Level3ID sql.NullInt64 `sql:"index"` + Level3 *Level3 + } + Level1 struct { + ID uint + Name string + Level2ID sql.NullInt64 `sql:"index"` + Level2 *Level2 + } + ) + + DB.DropTableIfExists(new(Level3)) + DB.DropTableIfExists(new(Level4)) + DB.DropTableIfExists(new(Level2)) + DB.DropTableIfExists(new(Level1)) + + if err := DB.AutoMigrate(new(Level3), new(Level4), new(Level2), new(Level1)).Error; err != nil { + t.Error(err) + } + + lvl := &Level3{} + if err := DB.Save(lvl).Error; err != nil { + t.Error(err) + } + + sublvl1 := &Level4{Level3ID: lvl.ID} + if err := DB.Save(sublvl1).Error; err != nil { + t.Error(err) + } + sublvl2 := &Level4{Level3ID: lvl.ID} + if err := DB.Save(sublvl2).Error; err != nil { + t.Error(err) + } + + lvl.Level4s = []*Level4{sublvl1, sublvl2} + + want1 := Level1{ + Level2: &Level2{ + Level3: lvl, + }, + } + if err := DB.Save(&want1).Error; err != nil { + t.Error(err) + } + + want2 := Level1{ + Level2: &Level2{ + Level3: lvl, + }, + } + if err := DB.Save(&want2).Error; err != nil { + t.Error(err) + } + + want := []Level1{want1, want2} + + var got []Level1 + err := DB.Preload("Level2.Level3.Level4s").Find(&got).Error + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %s; want %s", toJSONString(got), toJSONString(want)) + } +} + +func TestPreloadManyToManyCallbacks(t *testing.T) { + type ( + Level2 struct { + ID uint + Name string + } + Level1 struct { + ID uint + Name string + Level2s []Level2 `gorm:"many2many:level1_level2s;AssociationForeignKey:ID;ForeignKey:ID"` + } + ) + + DB.DropTableIfExists("level1_level2s") + DB.DropTableIfExists(new(Level1)) + DB.DropTableIfExists(new(Level2)) + + if err := DB.AutoMigrate(new(Level1), new(Level2)).Error; err != nil { + t.Error(err) + } + + lvl := Level1{ + Name: "l1", + Level2s: []Level2{ + Level2{Name: "l2-1"}, Level2{Name: "l2-2"}, + }, + } + DB.Save(&lvl) + + called := 0 + + DB.Callback().Query().After("gorm:query").Register("TestPreloadManyToManyCallbacks", func(scope *gorm.Scope) { + called = called + 1 + }) + + DB.Preload("Level2s").First(&Level1{}, "id = ?", lvl.ID) + + if called != 3 { + t.Errorf("Wanted callback to be called 3 times but got %d", called) + } +} + +func toJSONString(v interface{}) []byte { + r, _ := json.MarshalIndent(v, "", " ") + return r +} diff --git a/vendor/github.com/jinzhu/gorm/query_test.go b/vendor/github.com/jinzhu/gorm/query_test.go new file mode 100644 index 0000000..fac7d4d --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/query_test.go @@ -0,0 +1,773 @@ +package gorm_test + +import ( + "fmt" + "reflect" + + "github.com/jinzhu/gorm" + + "testing" + "time" +) + +func TestFirstAndLast(t *testing.T) { + DB.Save(&User{Name: "user1", Emails: []Email{{Email: "user1@example.com"}}}) + DB.Save(&User{Name: "user2", Emails: []Email{{Email: "user2@example.com"}}}) + + var user1, user2, user3, user4 User + DB.First(&user1) + DB.Order("id").Limit(1).Find(&user2) + + ptrOfUser3 := &user3 + DB.Last(&ptrOfUser3) + DB.Order("id desc").Limit(1).Find(&user4) + if user1.Id != user2.Id || user3.Id != user4.Id { + t.Errorf("First and Last should by order by primary key") + } + + var users []User + DB.First(&users) + if len(users) != 1 { + t.Errorf("Find first record as slice") + } + + var user User + if DB.Joins("left join emails on emails.user_id = users.id").First(&user).Error != nil { + t.Errorf("Should not raise any error when order with Join table") + } + + if user.Email != "" { + t.Errorf("User's Email should be blank as no one set it") + } +} + +func TestFirstAndLastWithNoStdPrimaryKey(t *testing.T) { + DB.Save(&Animal{Name: "animal1"}) + DB.Save(&Animal{Name: "animal2"}) + + var animal1, animal2, animal3, animal4 Animal + DB.First(&animal1) + DB.Order("counter").Limit(1).Find(&animal2) + + DB.Last(&animal3) + DB.Order("counter desc").Limit(1).Find(&animal4) + if animal1.Counter != animal2.Counter || animal3.Counter != animal4.Counter { + t.Errorf("First and Last should work correctly") + } +} + +func TestFirstAndLastWithRaw(t *testing.T) { + user1 := User{Name: "user", Emails: []Email{{Email: "user1@example.com"}}} + user2 := User{Name: "user", Emails: []Email{{Email: "user2@example.com"}}} + DB.Save(&user1) + DB.Save(&user2) + + var user3, user4 User + DB.Raw("select * from users WHERE name = ?", "user").First(&user3) + if user3.Id != user1.Id { + t.Errorf("Find first record with raw") + } + + DB.Raw("select * from users WHERE name = ?", "user").Last(&user4) + if user4.Id != user2.Id { + t.Errorf("Find last record with raw") + } +} + +func TestUIntPrimaryKey(t *testing.T) { + var animal Animal + DB.First(&animal, uint64(1)) + if animal.Counter != 1 { + t.Errorf("Fetch a record from with a non-int primary key should work, but failed") + } + + DB.Model(Animal{}).Where(Animal{Counter: uint64(2)}).Scan(&animal) + if animal.Counter != 2 { + t.Errorf("Fetch a record from with a non-int primary key should work, but failed") + } +} + +func TestCustomizedTypePrimaryKey(t *testing.T) { + type ID uint + type CustomizedTypePrimaryKey struct { + ID ID + Name string + } + + DB.AutoMigrate(&CustomizedTypePrimaryKey{}) + + p1 := CustomizedTypePrimaryKey{Name: "p1"} + p2 := CustomizedTypePrimaryKey{Name: "p2"} + p3 := CustomizedTypePrimaryKey{Name: "p3"} + DB.Create(&p1) + DB.Create(&p2) + DB.Create(&p3) + + var p CustomizedTypePrimaryKey + + if err := DB.First(&p, p2.ID).Error; err == nil { + t.Errorf("Should return error for invalid query condition") + } + + if err := DB.First(&p, "id = ?", p2.ID).Error; err != nil { + t.Errorf("No error should happen when querying with customized type for primary key, got err %v", err) + } + + if p.Name != "p2" { + t.Errorf("Should find correct value when querying with customized type for primary key") + } +} + +func TestStringPrimaryKeyForNumericValueStartingWithZero(t *testing.T) { + type AddressByZipCode struct { + ZipCode string `gorm:"primary_key"` + Address string + } + + DB.AutoMigrate(&AddressByZipCode{}) + DB.Create(&AddressByZipCode{ZipCode: "00501", Address: "Holtsville"}) + + var address AddressByZipCode + DB.First(&address, "00501") + if address.ZipCode != "00501" { + t.Errorf("Fetch a record from with a string primary key for a numeric value starting with zero should work, but failed, zip code is %v", address.ZipCode) + } +} + +func TestFindAsSliceOfPointers(t *testing.T) { + DB.Save(&User{Name: "user"}) + + var users []User + DB.Find(&users) + + var userPointers []*User + DB.Find(&userPointers) + + if len(users) == 0 || len(users) != len(userPointers) { + t.Errorf("Find slice of pointers") + } +} + +func TestSearchWithPlainSQL(t *testing.T) { + user1 := User{Name: "PlainSqlUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "PlainSqlUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "PlainSqlUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + scopedb := DB.Where("name LIKE ?", "%PlainSqlUser%") + + if DB.Where("name = ?", user1.Name).First(&User{}).RecordNotFound() { + t.Errorf("Search with plain SQL") + } + + if DB.Where("name LIKE ?", "%"+user1.Name+"%").First(&User{}).RecordNotFound() { + t.Errorf("Search with plan SQL (regexp)") + } + + var users []User + DB.Find(&users, "name LIKE ? and age > ?", "%PlainSqlUser%", 1) + if len(users) != 2 { + t.Errorf("Should found 2 users that age > 1, but got %v", len(users)) + } + + DB.Where("name LIKE ?", "%PlainSqlUser%").Where("age >= ?", 1).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users that age >= 1, but got %v", len(users)) + } + + scopedb.Where("age <> ?", 20).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users age != 20, but got %v", len(users)) + } + + scopedb.Where("birthday > ?", parseTime("2000-1-1")).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users's birthday > 2000-1-1, but got %v", len(users)) + } + + scopedb.Where("birthday > ?", "2002-10-10").Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users's birthday >= 2002-10-10, but got %v", len(users)) + } + + scopedb.Where("birthday >= ?", "2010-1-1").Where("birthday < ?", "2020-1-1").Find(&users) + if len(users) != 1 { + t.Errorf("Should found 1 users's birthday < 2020-1-1 and >= 2010-1-1, but got %v", len(users)) + } + + DB.Where("name in (?)", []string{user1.Name, user2.Name}).Find(&users) + if len(users) != 2 { + t.Errorf("Should found 2 users, but got %v", len(users)) + } + + DB.Where("id in (?)", []int64{user1.Id, user2.Id, user3.Id}).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users, but got %v", len(users)) + } + + DB.Where("id in (?)", user1.Id).Find(&users) + if len(users) != 1 { + t.Errorf("Should found 1 users, but got %v", len(users)) + } + + if err := DB.Where("id IN (?)", []string{}).Find(&users).Error; err != nil { + t.Error("no error should happen when query with empty slice, but got: ", err) + } + + if err := DB.Not("id IN (?)", []string{}).Find(&users).Error; err != nil { + t.Error("no error should happen when query with empty slice, but got: ", err) + } + + if DB.Where("name = ?", "none existing").Find(&[]User{}).RecordNotFound() { + t.Errorf("Should not get RecordNotFound error when looking for none existing records") + } +} + +func TestSearchWithTwoDimensionalArray(t *testing.T) { + var users []User + user1 := User{Name: "2DSearchUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "2DSearchUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "2DSearchUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Create(&user1) + DB.Create(&user2) + DB.Create(&user3) + + if dialect := DB.Dialect().GetName(); dialect == "mysql" || dialect == "postgres" { + if err := DB.Where("(name, age) IN (?)", [][]interface{}{{"2DSearchUser1", 1}, {"2DSearchUser2", 10}}).Find(&users).Error; err != nil { + t.Errorf("No error should happen when query with 2D array, but got %v", err) + + if len(users) != 2 { + t.Errorf("Should find 2 users with 2D array, but got %v", len(users)) + } + } + } + + if dialect := DB.Dialect().GetName(); dialect == "mssql" { + if err := DB.Joins("JOIN (VALUES ?) AS x (col1, col2) ON x.col1 = name AND x.col2 = age", [][]interface{}{{"2DSearchUser1", 1}, {"2DSearchUser2", 10}}).Find(&users).Error; err != nil { + t.Errorf("No error should happen when query with 2D array, but got %v", err) + + if len(users) != 2 { + t.Errorf("Should find 2 users with 2D array, but got %v", len(users)) + } + } + } +} + +func TestSearchWithStruct(t *testing.T) { + user1 := User{Name: "StructSearchUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "StructSearchUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "StructSearchUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + if DB.Where(user1.Id).First(&User{}).RecordNotFound() { + t.Errorf("Search with primary key") + } + + if DB.First(&User{}, user1.Id).RecordNotFound() { + t.Errorf("Search with primary key as inline condition") + } + + if DB.First(&User{}, fmt.Sprintf("%v", user1.Id)).RecordNotFound() { + t.Errorf("Search with primary key as inline condition") + } + + var users []User + DB.Where([]int64{user1.Id, user2.Id, user3.Id}).Find(&users) + if len(users) != 3 { + t.Errorf("Should found 3 users when search with primary keys, but got %v", len(users)) + } + + var user User + DB.First(&user, &User{Name: user1.Name}) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with inline pointer of struct") + } + + DB.First(&user, User{Name: user1.Name}) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with inline struct") + } + + DB.Where(&User{Name: user1.Name}).First(&user) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with where struct") + } + + DB.Find(&users, &User{Name: user2.Name}) + if len(users) != 1 { + t.Errorf("Search all records with inline struct") + } +} + +func TestSearchWithMap(t *testing.T) { + companyID := 1 + user1 := User{Name: "MapSearchUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "MapSearchUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "MapSearchUser3", Age: 20, Birthday: parseTime("2020-1-1")} + user4 := User{Name: "MapSearchUser4", Age: 30, Birthday: parseTime("2020-1-1"), CompanyID: &companyID} + DB.Save(&user1).Save(&user2).Save(&user3).Save(&user4) + + var user User + DB.First(&user, map[string]interface{}{"name": user1.Name}) + if user.Id == 0 || user.Name != user1.Name { + t.Errorf("Search first record with inline map") + } + + user = User{} + DB.Where(map[string]interface{}{"name": user2.Name}).First(&user) + if user.Id == 0 || user.Name != user2.Name { + t.Errorf("Search first record with where map") + } + + var users []User + DB.Where(map[string]interface{}{"name": user3.Name}).Find(&users) + if len(users) != 1 { + t.Errorf("Search all records with inline map") + } + + DB.Find(&users, map[string]interface{}{"name": user3.Name}) + if len(users) != 1 { + t.Errorf("Search all records with inline map") + } + + DB.Find(&users, map[string]interface{}{"name": user4.Name, "company_id": nil}) + if len(users) != 0 { + t.Errorf("Search all records with inline map containing null value finding 0 records") + } + + DB.Find(&users, map[string]interface{}{"name": user1.Name, "company_id": nil}) + if len(users) != 1 { + t.Errorf("Search all records with inline map containing null value finding 1 record") + } + + DB.Find(&users, map[string]interface{}{"name": user4.Name, "company_id": companyID}) + if len(users) != 1 { + t.Errorf("Search all records with inline multiple value map") + } +} + +func TestSearchWithEmptyChain(t *testing.T) { + user1 := User{Name: "ChainSearchUser1", Age: 1, Birthday: parseTime("2000-1-1")} + user2 := User{Name: "ChainearchUser2", Age: 10, Birthday: parseTime("2010-1-1")} + user3 := User{Name: "ChainearchUser3", Age: 20, Birthday: parseTime("2020-1-1")} + DB.Save(&user1).Save(&user2).Save(&user3) + + if DB.Where("").Where("").First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty strings") + } + + if DB.Where(&User{}).Where("name = ?", user1.Name).First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty struct") + } + + if DB.Where(map[string]interface{}{}).Where("name = ?", user1.Name).First(&User{}).Error != nil { + t.Errorf("Should not raise any error if searching with empty map") + } +} + +func TestSelect(t *testing.T) { + user1 := User{Name: "SelectUser1"} + DB.Save(&user1) + + var user User + DB.Where("name = ?", user1.Name).Select("name").Find(&user) + if user.Id != 0 { + t.Errorf("Should not have ID because only selected name, %+v", user.Id) + } + + if user.Name != user1.Name { + t.Errorf("Should have user Name when selected it") + } +} + +func TestOrderAndPluck(t *testing.T) { + user1 := User{Name: "OrderPluckUser1", Age: 1} + user2 := User{Name: "OrderPluckUser2", Age: 10} + user3 := User{Name: "OrderPluckUser3", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + scopedb := DB.Model(&User{}).Where("name like ?", "%OrderPluckUser%") + + var user User + scopedb.Order(gorm.Expr("case when name = ? then 0 else 1 end", "OrderPluckUser2")).First(&user) + if user.Name != "OrderPluckUser2" { + t.Errorf("Order with sql expression") + } + + var ages []int64 + scopedb.Order("age desc").Pluck("age", &ages) + if ages[0] != 20 { + t.Errorf("The first age should be 20 when order with age desc") + } + + var ages1, ages2 []int64 + scopedb.Order("age desc").Pluck("age", &ages1).Pluck("age", &ages2) + if !reflect.DeepEqual(ages1, ages2) { + t.Errorf("The first order is the primary order") + } + + var ages3, ages4 []int64 + scopedb.Model(&User{}).Order("age desc").Pluck("age", &ages3).Order("age", true).Pluck("age", &ages4) + if reflect.DeepEqual(ages3, ages4) { + t.Errorf("Reorder should work") + } + + var names []string + var ages5 []int64 + scopedb.Model(User{}).Order("name").Order("age desc").Pluck("age", &ages5).Pluck("name", &names) + if names != nil && ages5 != nil { + if !(names[0] == user1.Name && names[1] == user2.Name && names[2] == user3.Name && ages5[2] == 20) { + t.Errorf("Order with multiple orders") + } + } else { + t.Errorf("Order with multiple orders") + } + + var ages6 []int64 + if err := scopedb.Order("").Pluck("age", &ages6).Error; err != nil { + t.Errorf("An empty string as order clause produces invalid queries") + } + + DB.Model(User{}).Select("name, age").Find(&[]User{}) +} + +func TestLimit(t *testing.T) { + user1 := User{Name: "LimitUser1", Age: 1} + user2 := User{Name: "LimitUser2", Age: 10} + user3 := User{Name: "LimitUser3", Age: 20} + user4 := User{Name: "LimitUser4", Age: 10} + user5 := User{Name: "LimitUser5", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3).Save(&user4).Save(&user5) + + var users1, users2, users3 []User + DB.Order("age desc").Limit(3).Find(&users1).Limit(5).Find(&users2).Limit(-1).Find(&users3) + + if len(users1) != 3 || len(users2) != 5 || len(users3) <= 5 { + t.Errorf("Limit should works") + } +} + +func TestOffset(t *testing.T) { + for i := 0; i < 20; i++ { + DB.Save(&User{Name: fmt.Sprintf("OffsetUser%v", i)}) + } + var users1, users2, users3, users4 []User + DB.Limit(100).Where("name like ?", "OffsetUser%").Order("age desc").Find(&users1).Offset(3).Find(&users2).Offset(5).Find(&users3).Offset(-1).Find(&users4) + + if (len(users1) != len(users4)) || (len(users1)-len(users2) != 3) || (len(users1)-len(users3) != 5) { + t.Errorf("Offset should work") + } +} + +func TestOr(t *testing.T) { + user1 := User{Name: "OrUser1", Age: 1} + user2 := User{Name: "OrUser2", Age: 10} + user3 := User{Name: "OrUser3", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + + var users []User + DB.Where("name = ?", user1.Name).Or("name = ?", user2.Name).Find(&users) + if len(users) != 2 { + t.Errorf("Find users with or") + } +} + +func TestCount(t *testing.T) { + user1 := User{Name: "CountUser1", Age: 1} + user2 := User{Name: "CountUser2", Age: 10} + user3 := User{Name: "CountUser3", Age: 20} + + DB.Save(&user1).Save(&user2).Save(&user3) + var count, count1, count2 int64 + var users []User + + if err := DB.Where("name = ?", user1.Name).Or("name = ?", user3.Name).Find(&users).Count(&count).Error; err != nil { + t.Errorf(fmt.Sprintf("Count should work, but got err %v", err)) + } + + if count != int64(len(users)) { + t.Errorf("Count() method should get correct value") + } + + DB.Model(&User{}).Where("name = ?", user1.Name).Count(&count1).Or("name in (?)", []string{user2.Name, user3.Name}).Count(&count2) + if count1 != 1 || count2 != 3 { + t.Errorf("Multiple count in chain") + } + + var count3 int + if err := DB.Model(&User{}).Where("name in (?)", []string{user2.Name, user2.Name, user3.Name}).Group("id").Count(&count3).Error; err != nil { + t.Errorf("Not error should happen, but got %v", err) + } + + if count3 != 2 { + t.Errorf("Should get correct count, but got %v", count3) + } +} + +func TestNot(t *testing.T) { + DB.Create(getPreparedUser("user1", "not")) + DB.Create(getPreparedUser("user2", "not")) + DB.Create(getPreparedUser("user3", "not")) + + user4 := getPreparedUser("user4", "not") + user4.Company = Company{} + DB.Create(user4) + + DB := DB.Where("role = ?", "not") + + var users1, users2, users3, users4, users5, users6, users7, users8, users9 []User + if DB.Find(&users1).RowsAffected != 4 { + t.Errorf("should find 4 not users") + } + DB.Not(users1[0].Id).Find(&users2) + + if len(users1)-len(users2) != 1 { + t.Errorf("Should ignore the first users with Not") + } + + DB.Not([]int{}).Find(&users3) + if len(users1)-len(users3) != 0 { + t.Errorf("Should find all users with a blank condition") + } + + var name3Count int64 + DB.Table("users").Where("name = ?", "user3").Count(&name3Count) + DB.Not("name", "user3").Find(&users4) + if len(users1)-len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not("name = ?", "user3").Find(&users4) + if len(users1)-len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not("name <> ?", "user3").Find(&users4) + if len(users4) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not(User{Name: "user3"}).Find(&users5) + + if len(users1)-len(users5) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not(map[string]interface{}{"name": "user3"}).Find(&users6) + if len(users1)-len(users6) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + DB.Not(map[string]interface{}{"name": "user3", "company_id": nil}).Find(&users7) + if len(users1)-len(users7) != 2 { // not user3 or user4 + t.Errorf("Should find all user's name not equal to 3 who do not have company id") + } + + DB.Not("name", []string{"user3"}).Find(&users8) + if len(users1)-len(users8) != int(name3Count) { + t.Errorf("Should find all users's name not equal 3") + } + + var name2Count int64 + DB.Table("users").Where("name = ?", "user2").Count(&name2Count) + DB.Not("name", []string{"user3", "user2"}).Find(&users9) + if len(users1)-len(users9) != (int(name3Count) + int(name2Count)) { + t.Errorf("Should find all users's name not equal 3") + } +} + +func TestFillSmallerStruct(t *testing.T) { + user1 := User{Name: "SmallerUser", Age: 100} + DB.Save(&user1) + type SimpleUser struct { + Name string + Id int64 + UpdatedAt time.Time + CreatedAt time.Time + } + + var simpleUser SimpleUser + DB.Table("users").Where("name = ?", user1.Name).First(&simpleUser) + + if simpleUser.Id == 0 || simpleUser.Name == "" { + t.Errorf("Should fill data correctly into smaller struct") + } +} + +func TestFindOrInitialize(t *testing.T) { + var user1, user2, user3, user4, user5, user6 User + DB.Where(&User{Name: "find or init", Age: 33}).FirstOrInit(&user1) + if user1.Name != "find or init" || user1.Id != 0 || user1.Age != 33 { + t.Errorf("user should be initialized with search value") + } + + DB.Where(User{Name: "find or init", Age: 33}).FirstOrInit(&user2) + if user2.Name != "find or init" || user2.Id != 0 || user2.Age != 33 { + t.Errorf("user should be initialized with search value") + } + + DB.FirstOrInit(&user3, map[string]interface{}{"name": "find or init 2"}) + if user3.Name != "find or init 2" || user3.Id != 0 { + t.Errorf("user should be initialized with inline search value") + } + + DB.Where(&User{Name: "find or init"}).Attrs(User{Age: 44}).FirstOrInit(&user4) + if user4.Name != "find or init" || user4.Id != 0 || user4.Age != 44 { + t.Errorf("user should be initialized with search value and attrs") + } + + DB.Where(&User{Name: "find or init"}).Assign("age", 44).FirstOrInit(&user4) + if user4.Name != "find or init" || user4.Id != 0 || user4.Age != 44 { + t.Errorf("user should be initialized with search value and assign attrs") + } + + DB.Save(&User{Name: "find or init", Age: 33}) + DB.Where(&User{Name: "find or init"}).Attrs("age", 44).FirstOrInit(&user5) + if user5.Name != "find or init" || user5.Id == 0 || user5.Age != 33 { + t.Errorf("user should be found and not initialized by Attrs") + } + + DB.Where(&User{Name: "find or init", Age: 33}).FirstOrInit(&user6) + if user6.Name != "find or init" || user6.Id == 0 || user6.Age != 33 { + t.Errorf("user should be found with FirstOrInit") + } + + DB.Where(&User{Name: "find or init"}).Assign(User{Age: 44}).FirstOrInit(&user6) + if user6.Name != "find or init" || user6.Id == 0 || user6.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } +} + +func TestFindOrCreate(t *testing.T) { + var user1, user2, user3, user4, user5, user6, user7, user8 User + DB.Where(&User{Name: "find or create", Age: 33}).FirstOrCreate(&user1) + if user1.Name != "find or create" || user1.Id == 0 || user1.Age != 33 { + t.Errorf("user should be created with search value") + } + + DB.Where(&User{Name: "find or create", Age: 33}).FirstOrCreate(&user2) + if user1.Id != user2.Id || user2.Name != "find or create" || user2.Id == 0 || user2.Age != 33 { + t.Errorf("user should be created with search value") + } + + DB.FirstOrCreate(&user3, map[string]interface{}{"name": "find or create 2"}) + if user3.Name != "find or create 2" || user3.Id == 0 { + t.Errorf("user should be created with inline search value") + } + + DB.Where(&User{Name: "find or create 3"}).Attrs("age", 44).FirstOrCreate(&user4) + if user4.Name != "find or create 3" || user4.Id == 0 || user4.Age != 44 { + t.Errorf("user should be created with search value and attrs") + } + + updatedAt1 := user4.UpdatedAt + DB.Where(&User{Name: "find or create 3"}).Assign("age", 55).FirstOrCreate(&user4) + if updatedAt1.Format(time.RFC3339Nano) == user4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("UpdateAt should be changed when update values with assign") + } + + DB.Where(&User{Name: "find or create 4"}).Assign(User{Age: 44}).FirstOrCreate(&user4) + if user4.Name != "find or create 4" || user4.Id == 0 || user4.Age != 44 { + t.Errorf("user should be created with search value and assigned attrs") + } + + DB.Where(&User{Name: "find or create"}).Attrs("age", 44).FirstOrInit(&user5) + if user5.Name != "find or create" || user5.Id == 0 || user5.Age != 33 { + t.Errorf("user should be found and not initialized by Attrs") + } + + DB.Where(&User{Name: "find or create"}).Assign(User{Age: 44}).FirstOrCreate(&user6) + if user6.Name != "find or create" || user6.Id == 0 || user6.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } + + DB.Where(&User{Name: "find or create"}).Find(&user7) + if user7.Name != "find or create" || user7.Id == 0 || user7.Age != 44 { + t.Errorf("user should be found and updated with assigned attrs") + } + + DB.Where(&User{Name: "find or create embedded struct"}).Assign(User{Age: 44, CreditCard: CreditCard{Number: "1231231231"}, Emails: []Email{{Email: "jinzhu@assign_embedded_struct.com"}, {Email: "jinzhu-2@assign_embedded_struct.com"}}}).FirstOrCreate(&user8) + if DB.Where("email = ?", "jinzhu-2@assign_embedded_struct.com").First(&Email{}).RecordNotFound() { + t.Errorf("embedded struct email should be saved") + } + + if DB.Where("email = ?", "1231231231").First(&CreditCard{}).RecordNotFound() { + t.Errorf("embedded struct credit card should be saved") + } +} + +func TestSelectWithEscapedFieldName(t *testing.T) { + user1 := User{Name: "EscapedFieldNameUser", Age: 1} + user2 := User{Name: "EscapedFieldNameUser", Age: 10} + user3 := User{Name: "EscapedFieldNameUser", Age: 20} + DB.Save(&user1).Save(&user2).Save(&user3) + + var names []string + DB.Model(User{}).Where(&User{Name: "EscapedFieldNameUser"}).Pluck("\"name\"", &names) + + if len(names) != 3 { + t.Errorf("Expected 3 name, but got: %d", len(names)) + } +} + +func TestSelectWithVariables(t *testing.T) { + DB.Save(&User{Name: "jinzhu"}) + + rows, _ := DB.Table("users").Select("? as fake", gorm.Expr("name")).Rows() + + if !rows.Next() { + t.Errorf("Should have returned at least one row") + } else { + columns, _ := rows.Columns() + if !reflect.DeepEqual(columns, []string{"fake"}) { + t.Errorf("Should only contains one column") + } + } + + rows.Close() +} + +func TestSelectWithArrayInput(t *testing.T) { + DB.Save(&User{Name: "jinzhu", Age: 42}) + + var user User + DB.Select([]string{"name", "age"}).Where("age = 42 AND name = 'jinzhu'").First(&user) + + if user.Name != "jinzhu" || user.Age != 42 { + t.Errorf("Should have selected both age and name") + } +} + +func TestPluckWithSelect(t *testing.T) { + var ( + user = User{Name: "matematik7_pluck_with_select", Age: 25} + combinedName = fmt.Sprintf("%v%v", user.Name, user.Age) + combineUserAgeSQL = fmt.Sprintf("concat(%v, %v)", DB.Dialect().Quote("name"), DB.Dialect().Quote("age")) + ) + + if dialect := DB.Dialect().GetName(); dialect == "sqlite3" { + combineUserAgeSQL = fmt.Sprintf("(%v || %v)", DB.Dialect().Quote("name"), DB.Dialect().Quote("age")) + } + + DB.Save(&user) + + selectStr := combineUserAgeSQL + " as user_age" + var userAges []string + err := DB.Model(&User{}).Where("age = ?", 25).Select(selectStr).Pluck("user_age", &userAges).Error + if err != nil { + t.Error(err) + } + + if len(userAges) != 1 || userAges[0] != combinedName { + t.Errorf("Should correctly pluck with select, got: %s", userAges) + } + + selectStr = combineUserAgeSQL + fmt.Sprintf(" as %v", DB.Dialect().Quote("user_age")) + userAges = userAges[:0] + err = DB.Model(&User{}).Where("age = ?", 25).Select(selectStr).Pluck("user_age", &userAges).Error + if err != nil { + t.Error(err) + } + + if len(userAges) != 1 || userAges[0] != combinedName { + t.Errorf("Should correctly pluck with select, got: %s", userAges) + } +} diff --git a/vendor/github.com/jinzhu/gorm/scaner_test.go b/vendor/github.com/jinzhu/gorm/scaner_test.go new file mode 100644 index 0000000..9e251dd --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/scaner_test.go @@ -0,0 +1,139 @@ +package gorm_test + +import ( + "database/sql/driver" + "encoding/json" + "errors" + "testing" + + "github.com/jinzhu/gorm" +) + +func TestScannableSlices(t *testing.T) { + if err := DB.AutoMigrate(&RecordWithSlice{}).Error; err != nil { + t.Errorf("Should create table with slice values correctly: %s", err) + } + + r1 := RecordWithSlice{ + Strings: ExampleStringSlice{"a", "b", "c"}, + Structs: ExampleStructSlice{ + {"name1", "value1"}, + {"name2", "value2"}, + }, + } + + if err := DB.Save(&r1).Error; err != nil { + t.Errorf("Should save record with slice values") + } + + var r2 RecordWithSlice + + if err := DB.Find(&r2).Error; err != nil { + t.Errorf("Should fetch record with slice values") + } + + if len(r2.Strings) != 3 || r2.Strings[0] != "a" || r2.Strings[1] != "b" || r2.Strings[2] != "c" { + t.Errorf("Should have serialised and deserialised a string array") + } + + if len(r2.Structs) != 2 || r2.Structs[0].Name != "name1" || r2.Structs[0].Value != "value1" || r2.Structs[1].Name != "name2" || r2.Structs[1].Value != "value2" { + t.Errorf("Should have serialised and deserialised a struct array") + } +} + +type RecordWithSlice struct { + ID uint64 + Strings ExampleStringSlice `sql:"type:text"` + Structs ExampleStructSlice `sql:"type:text"` +} + +type ExampleStringSlice []string + +func (l ExampleStringSlice) Value() (driver.Value, error) { + bytes, err := json.Marshal(l) + return string(bytes), err +} + +func (l *ExampleStringSlice) Scan(input interface{}) error { + switch value := input.(type) { + case string: + return json.Unmarshal([]byte(value), l) + case []byte: + return json.Unmarshal(value, l) + default: + return errors.New("not supported") + } +} + +type ExampleStruct struct { + Name string + Value string +} + +type ExampleStructSlice []ExampleStruct + +func (l ExampleStructSlice) Value() (driver.Value, error) { + bytes, err := json.Marshal(l) + return string(bytes), err +} + +func (l *ExampleStructSlice) Scan(input interface{}) error { + switch value := input.(type) { + case string: + return json.Unmarshal([]byte(value), l) + case []byte: + return json.Unmarshal(value, l) + default: + return errors.New("not supported") + } +} + +type ScannerDataType struct { + Street string `sql:"TYPE:varchar(24)"` +} + +func (ScannerDataType) Value() (driver.Value, error) { + return nil, nil +} + +func (*ScannerDataType) Scan(input interface{}) error { + return nil +} + +type ScannerDataTypeTestStruct struct { + Field1 int + ScannerDataType *ScannerDataType `sql:"TYPE:json"` +} + +type ScannerDataType2 struct { + Street string `sql:"TYPE:varchar(24)"` +} + +func (ScannerDataType2) Value() (driver.Value, error) { + return nil, nil +} + +func (*ScannerDataType2) Scan(input interface{}) error { + return nil +} + +type ScannerDataTypeTestStruct2 struct { + Field1 int + ScannerDataType *ScannerDataType2 +} + +func TestScannerDataType(t *testing.T) { + scope := gorm.Scope{Value: &ScannerDataTypeTestStruct{}} + if field, ok := scope.FieldByName("ScannerDataType"); ok { + if DB.Dialect().DataTypeOf(field.StructField) != "json" { + t.Errorf("data type for scanner is wrong") + } + } + + scope = gorm.Scope{Value: &ScannerDataTypeTestStruct2{}} + if field, ok := scope.FieldByName("ScannerDataType"); ok { + if DB.Dialect().DataTypeOf(field.StructField) != "varchar(24)" { + t.Errorf("data type for scanner is wrong") + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/scope.go b/vendor/github.com/jinzhu/gorm/scope.go new file mode 100644 index 0000000..150ac71 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/scope.go @@ -0,0 +1,1381 @@ +package gorm + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "reflect" + "regexp" + "strings" + "time" +) + +// Scope contain current operation's information when you perform any operation on the database +type Scope struct { + Search *search + Value interface{} + SQL string + SQLVars []interface{} + db *DB + instanceID string + primaryKeyField *Field + skipLeft bool + fields *[]*Field + selectAttrs *[]string +} + +// IndirectValue return scope's reflect value's indirect value +func (scope *Scope) IndirectValue() reflect.Value { + return indirect(reflect.ValueOf(scope.Value)) +} + +// New create a new Scope without search information +func (scope *Scope) New(value interface{}) *Scope { + return &Scope{db: scope.NewDB(), Search: &search{}, Value: value} +} + +//////////////////////////////////////////////////////////////////////////////// +// Scope DB +//////////////////////////////////////////////////////////////////////////////// + +// DB return scope's DB connection +func (scope *Scope) DB() *DB { + return scope.db +} + +// NewDB create a new DB without search information +func (scope *Scope) NewDB() *DB { + if scope.db != nil { + db := scope.db.clone() + db.search = nil + db.Value = nil + return db + } + return nil +} + +// SQLDB return *sql.DB +func (scope *Scope) SQLDB() SQLCommon { + return scope.db.db +} + +// Dialect get dialect +func (scope *Scope) Dialect() Dialect { + return scope.db.parent.dialect +} + +// Quote used to quote string to escape them for database +func (scope *Scope) Quote(str string) string { + if strings.Index(str, ".") != -1 { + newStrs := []string{} + for _, str := range strings.Split(str, ".") { + newStrs = append(newStrs, scope.Dialect().Quote(str)) + } + return strings.Join(newStrs, ".") + } + + return scope.Dialect().Quote(str) +} + +// Err add error to Scope +func (scope *Scope) Err(err error) error { + if err != nil { + scope.db.AddError(err) + } + return err +} + +// HasError check if there are any error +func (scope *Scope) HasError() bool { + return scope.db.Error != nil +} + +// Log print log message +func (scope *Scope) Log(v ...interface{}) { + scope.db.log(v...) +} + +// SkipLeft skip remaining callbacks +func (scope *Scope) SkipLeft() { + scope.skipLeft = true +} + +// Fields get value's fields +func (scope *Scope) Fields() []*Field { + if scope.fields == nil { + var ( + fields []*Field + indirectScopeValue = scope.IndirectValue() + isStruct = indirectScopeValue.Kind() == reflect.Struct + ) + + for _, structField := range scope.GetModelStruct().StructFields { + if isStruct { + fieldValue := indirectScopeValue + for _, name := range structField.Names { + if fieldValue.Kind() == reflect.Ptr && fieldValue.IsNil() { + fieldValue.Set(reflect.New(fieldValue.Type().Elem())) + } + fieldValue = reflect.Indirect(fieldValue).FieldByName(name) + } + fields = append(fields, &Field{StructField: structField, Field: fieldValue, IsBlank: isBlank(fieldValue)}) + } else { + fields = append(fields, &Field{StructField: structField, IsBlank: true}) + } + } + scope.fields = &fields + } + + return *scope.fields +} + +// FieldByName find `gorm.Field` with field name or db name +func (scope *Scope) FieldByName(name string) (field *Field, ok bool) { + var ( + dbName = ToDBName(name) + mostMatchedField *Field + ) + + for _, field := range scope.Fields() { + if field.Name == name || field.DBName == name { + return field, true + } + if field.DBName == dbName { + mostMatchedField = field + } + } + return mostMatchedField, mostMatchedField != nil +} + +// PrimaryFields return scope's primary fields +func (scope *Scope) PrimaryFields() (fields []*Field) { + for _, field := range scope.Fields() { + if field.IsPrimaryKey { + fields = append(fields, field) + } + } + return fields +} + +// PrimaryField return scope's main primary field, if defined more that one primary fields, will return the one having column name `id` or the first one +func (scope *Scope) PrimaryField() *Field { + if primaryFields := scope.GetModelStruct().PrimaryFields; len(primaryFields) > 0 { + if len(primaryFields) > 1 { + if field, ok := scope.FieldByName("id"); ok { + return field + } + } + return scope.PrimaryFields()[0] + } + return nil +} + +// PrimaryKey get main primary field's db name +func (scope *Scope) PrimaryKey() string { + if field := scope.PrimaryField(); field != nil { + return field.DBName + } + return "" +} + +// PrimaryKeyZero check main primary field's value is blank or not +func (scope *Scope) PrimaryKeyZero() bool { + field := scope.PrimaryField() + return field == nil || field.IsBlank +} + +// PrimaryKeyValue get the primary key's value +func (scope *Scope) PrimaryKeyValue() interface{} { + if field := scope.PrimaryField(); field != nil && field.Field.IsValid() { + return field.Field.Interface() + } + return 0 +} + +// HasColumn to check if has column +func (scope *Scope) HasColumn(column string) bool { + for _, field := range scope.GetStructFields() { + if field.IsNormal && (field.Name == column || field.DBName == column) { + return true + } + } + return false +} + +// SetColumn to set the column's value, column could be field or field's name/dbname +func (scope *Scope) SetColumn(column interface{}, value interface{}) error { + var updateAttrs = map[string]interface{}{} + if attrs, ok := scope.InstanceGet("gorm:update_attrs"); ok { + updateAttrs = attrs.(map[string]interface{}) + defer scope.InstanceSet("gorm:update_attrs", updateAttrs) + } + + if field, ok := column.(*Field); ok { + updateAttrs[field.DBName] = value + return field.Set(value) + } else if name, ok := column.(string); ok { + var ( + dbName = ToDBName(name) + mostMatchedField *Field + ) + for _, field := range scope.Fields() { + if field.DBName == value { + updateAttrs[field.DBName] = value + return field.Set(value) + } + if (field.DBName == dbName) || (field.Name == name && mostMatchedField == nil) { + mostMatchedField = field + } + } + + if mostMatchedField != nil { + updateAttrs[mostMatchedField.DBName] = value + return mostMatchedField.Set(value) + } + } + return errors.New("could not convert column to field") +} + +// CallMethod call scope value's method, if it is a slice, will call its element's method one by one +func (scope *Scope) CallMethod(methodName string) { + if scope.Value == nil { + return + } + + if indirectScopeValue := scope.IndirectValue(); indirectScopeValue.Kind() == reflect.Slice { + for i := 0; i < indirectScopeValue.Len(); i++ { + scope.callMethod(methodName, indirectScopeValue.Index(i)) + } + } else { + scope.callMethod(methodName, indirectScopeValue) + } +} + +// AddToVars add value as sql's vars, used to prevent SQL injection +func (scope *Scope) AddToVars(value interface{}) string { + _, skipBindVar := scope.InstanceGet("skip_bindvar") + + if expr, ok := value.(*expr); ok { + exp := expr.expr + for _, arg := range expr.args { + if skipBindVar { + scope.AddToVars(arg) + } else { + exp = strings.Replace(exp, "?", scope.AddToVars(arg), 1) + } + } + return exp + } + + scope.SQLVars = append(scope.SQLVars, value) + + if skipBindVar { + return "?" + } + return scope.Dialect().BindVar(len(scope.SQLVars)) +} + +// SelectAttrs return selected attributes +func (scope *Scope) SelectAttrs() []string { + if scope.selectAttrs == nil { + attrs := []string{} + for _, value := range scope.Search.selects { + if str, ok := value.(string); ok { + attrs = append(attrs, str) + } else if strs, ok := value.([]string); ok { + attrs = append(attrs, strs...) + } else if strs, ok := value.([]interface{}); ok { + for _, str := range strs { + attrs = append(attrs, fmt.Sprintf("%v", str)) + } + } + } + scope.selectAttrs = &attrs + } + return *scope.selectAttrs +} + +// OmitAttrs return omitted attributes +func (scope *Scope) OmitAttrs() []string { + return scope.Search.omits +} + +type tabler interface { + TableName() string +} + +type dbTabler interface { + TableName(*DB) string +} + +// TableName return table name +func (scope *Scope) TableName() string { + if scope.Search != nil && len(scope.Search.tableName) > 0 { + return scope.Search.tableName + } + + if tabler, ok := scope.Value.(tabler); ok { + return tabler.TableName() + } + + if tabler, ok := scope.Value.(dbTabler); ok { + return tabler.TableName(scope.db) + } + + return scope.GetModelStruct().TableName(scope.db.Model(scope.Value)) +} + +// QuotedTableName return quoted table name +func (scope *Scope) QuotedTableName() (name string) { + if scope.Search != nil && len(scope.Search.tableName) > 0 { + if strings.Index(scope.Search.tableName, " ") != -1 { + return scope.Search.tableName + } + return scope.Quote(scope.Search.tableName) + } + + return scope.Quote(scope.TableName()) +} + +// CombinedConditionSql return combined condition sql +func (scope *Scope) CombinedConditionSql() string { + joinSQL := scope.joinsSQL() + whereSQL := scope.whereSQL() + if scope.Search.raw { + whereSQL = strings.TrimSuffix(strings.TrimPrefix(whereSQL, "WHERE ("), ")") + } + return joinSQL + whereSQL + scope.groupSQL() + + scope.havingSQL() + scope.orderSQL() + scope.limitAndOffsetSQL() +} + +// Raw set raw sql +func (scope *Scope) Raw(sql string) *Scope { + scope.SQL = strings.Replace(sql, "$$$", "?", -1) + return scope +} + +// Exec perform generated SQL +func (scope *Scope) Exec() *Scope { + defer scope.trace(NowFunc()) + + if !scope.HasError() { + if result, err := scope.SQLDB().Exec(scope.SQL, scope.SQLVars...); scope.Err(err) == nil { + if count, err := result.RowsAffected(); scope.Err(err) == nil { + scope.db.RowsAffected = count + } + } + } + return scope +} + +// Set set value by name +func (scope *Scope) Set(name string, value interface{}) *Scope { + scope.db.InstantSet(name, value) + return scope +} + +// Get get setting by name +func (scope *Scope) Get(name string) (interface{}, bool) { + return scope.db.Get(name) +} + +// InstanceID get InstanceID for scope +func (scope *Scope) InstanceID() string { + if scope.instanceID == "" { + scope.instanceID = fmt.Sprintf("%v%v", &scope, &scope.db) + } + return scope.instanceID +} + +// InstanceSet set instance setting for current operation, but not for operations in callbacks, like saving associations callback +func (scope *Scope) InstanceSet(name string, value interface{}) *Scope { + return scope.Set(name+scope.InstanceID(), value) +} + +// InstanceGet get instance setting from current operation +func (scope *Scope) InstanceGet(name string) (interface{}, bool) { + return scope.Get(name + scope.InstanceID()) +} + +// Begin start a transaction +func (scope *Scope) Begin() *Scope { + if db, ok := scope.SQLDB().(sqlDb); ok { + if tx, err := db.Begin(); err == nil { + scope.db.db = interface{}(tx).(SQLCommon) + scope.InstanceSet("gorm:started_transaction", true) + } + } + return scope +} + +// CommitOrRollback commit current transaction if no error happened, otherwise will rollback it +func (scope *Scope) CommitOrRollback() *Scope { + if _, ok := scope.InstanceGet("gorm:started_transaction"); ok { + if db, ok := scope.db.db.(sqlTx); ok { + if scope.HasError() { + db.Rollback() + } else { + scope.Err(db.Commit()) + } + scope.db.db = scope.db.parent.db + } + } + return scope +} + +//////////////////////////////////////////////////////////////////////////////// +// Private Methods For *gorm.Scope +//////////////////////////////////////////////////////////////////////////////// + +func (scope *Scope) callMethod(methodName string, reflectValue reflect.Value) { + // Only get address from non-pointer + if reflectValue.CanAddr() && reflectValue.Kind() != reflect.Ptr { + reflectValue = reflectValue.Addr() + } + + if methodValue := reflectValue.MethodByName(methodName); methodValue.IsValid() { + switch method := methodValue.Interface().(type) { + case func(): + method() + case func(*Scope): + method(scope) + case func(*DB): + newDB := scope.NewDB() + method(newDB) + scope.Err(newDB.Error) + case func() error: + scope.Err(method()) + case func(*Scope) error: + scope.Err(method(scope)) + case func(*DB) error: + newDB := scope.NewDB() + scope.Err(method(newDB)) + scope.Err(newDB.Error) + default: + scope.Err(fmt.Errorf("unsupported function %v", methodName)) + } + } +} + +var ( + columnRegexp = regexp.MustCompile("^[a-zA-Z\\d]+(\\.[a-zA-Z\\d]+)*$") // only match string like `name`, `users.name` + isNumberRegexp = regexp.MustCompile("^\\s*\\d+\\s*$") // match if string is number + comparisonRegexp = regexp.MustCompile("(?i) (=|<>|(>|<)(=?)|LIKE|IS|IN) ") + countingQueryRegexp = regexp.MustCompile("(?i)^count(.+)$") +) + +func (scope *Scope) quoteIfPossible(str string) string { + if columnRegexp.MatchString(str) { + return scope.Quote(str) + } + return str +} + +func (scope *Scope) scan(rows *sql.Rows, columns []string, fields []*Field) { + var ( + ignored interface{} + values = make([]interface{}, len(columns)) + selectFields []*Field + selectedColumnsMap = map[string]int{} + resetFields = map[int]*Field{} + ) + + for index, column := range columns { + values[index] = &ignored + + selectFields = fields + if idx, ok := selectedColumnsMap[column]; ok { + selectFields = selectFields[idx+1:] + } + + for fieldIndex, field := range selectFields { + if field.DBName == column { + if field.Field.Kind() == reflect.Ptr { + values[index] = field.Field.Addr().Interface() + } else { + reflectValue := reflect.New(reflect.PtrTo(field.Struct.Type)) + reflectValue.Elem().Set(field.Field.Addr()) + values[index] = reflectValue.Interface() + resetFields[index] = field + } + + selectedColumnsMap[column] = fieldIndex + + if field.IsNormal { + break + } + } + } + } + + scope.Err(rows.Scan(values...)) + + for index, field := range resetFields { + if v := reflect.ValueOf(values[index]).Elem().Elem(); v.IsValid() { + field.Field.Set(v) + } + } +} + +func (scope *Scope) primaryCondition(value interface{}) string { + return fmt.Sprintf("(%v.%v = %v)", scope.QuotedTableName(), scope.Quote(scope.PrimaryKey()), value) +} + +func (scope *Scope) buildCondition(clause map[string]interface{}, include bool) (str string) { + var ( + quotedTableName = scope.QuotedTableName() + quotedPrimaryKey = scope.Quote(scope.PrimaryKey()) + equalSQL = "=" + inSQL = "IN" + ) + + // If building not conditions + if !include { + equalSQL = "<>" + inSQL = "NOT IN" + } + + switch value := clause["query"].(type) { + case sql.NullInt64: + return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, value.Int64) + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, value) + case []int, []int8, []int16, []int32, []int64, []uint, []uint8, []uint16, []uint32, []uint64, []string, []interface{}: + if !include && reflect.ValueOf(value).Len() == 0 { + return + } + str = fmt.Sprintf("(%v.%v %s (?))", quotedTableName, quotedPrimaryKey, inSQL) + clause["args"] = []interface{}{value} + case string: + if isNumberRegexp.MatchString(value) { + return fmt.Sprintf("(%v.%v %s %v)", quotedTableName, quotedPrimaryKey, equalSQL, scope.AddToVars(value)) + } + + if value != "" { + if !include { + if comparisonRegexp.MatchString(value) { + str = fmt.Sprintf("NOT (%v)", value) + } else { + str = fmt.Sprintf("(%v.%v NOT IN (?))", quotedTableName, scope.Quote(value)) + } + } else { + str = fmt.Sprintf("(%v)", value) + } + } + case map[string]interface{}: + var sqls []string + for key, value := range value { + if value != nil { + sqls = append(sqls, fmt.Sprintf("(%v.%v %s %v)", quotedTableName, scope.Quote(key), equalSQL, scope.AddToVars(value))) + } else { + if !include { + sqls = append(sqls, fmt.Sprintf("(%v.%v IS NOT NULL)", quotedTableName, scope.Quote(key))) + } else { + sqls = append(sqls, fmt.Sprintf("(%v.%v IS NULL)", quotedTableName, scope.Quote(key))) + } + } + } + return strings.Join(sqls, " AND ") + case interface{}: + var sqls []string + newScope := scope.New(value) + + if len(newScope.Fields()) == 0 { + scope.Err(fmt.Errorf("invalid query condition: %v", value)) + return + } + + for _, field := range newScope.Fields() { + if !field.IsIgnored && !field.IsBlank { + sqls = append(sqls, fmt.Sprintf("(%v.%v %s %v)", quotedTableName, scope.Quote(field.DBName), equalSQL, scope.AddToVars(field.Field.Interface()))) + } + } + return strings.Join(sqls, " AND ") + default: + scope.Err(fmt.Errorf("invalid query condition: %v", value)) + return + } + + replacements := []string{} + args := clause["args"].([]interface{}) + for _, arg := range args { + var err error + switch reflect.ValueOf(arg).Kind() { + case reflect.Slice: // For where("id in (?)", []int64{1,2}) + if scanner, ok := interface{}(arg).(driver.Valuer); ok { + arg, err = scanner.Value() + replacements = append(replacements, scope.AddToVars(arg)) + } else if b, ok := arg.([]byte); ok { + replacements = append(replacements, scope.AddToVars(b)) + } else if as, ok := arg.([][]interface{}); ok { + var tempMarks []string + for _, a := range as { + var arrayMarks []string + for _, v := range a { + arrayMarks = append(arrayMarks, scope.AddToVars(v)) + } + + if len(arrayMarks) > 0 { + tempMarks = append(tempMarks, fmt.Sprintf("(%v)", strings.Join(arrayMarks, ","))) + } + } + + if len(tempMarks) > 0 { + replacements = append(replacements, strings.Join(tempMarks, ",")) + } + } else if values := reflect.ValueOf(arg); values.Len() > 0 { + var tempMarks []string + for i := 0; i < values.Len(); i++ { + tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface())) + } + replacements = append(replacements, strings.Join(tempMarks, ",")) + } else { + replacements = append(replacements, scope.AddToVars(Expr("NULL"))) + } + default: + if valuer, ok := interface{}(arg).(driver.Valuer); ok { + arg, err = valuer.Value() + } + + replacements = append(replacements, scope.AddToVars(arg)) + } + + if err != nil { + scope.Err(err) + } + } + + buff := bytes.NewBuffer([]byte{}) + i := 0 + for _, s := range str { + if s == '?' && len(replacements) > i { + buff.WriteString(replacements[i]) + i++ + } else { + buff.WriteRune(s) + } + } + + str = buff.String() + + return +} + +func (scope *Scope) buildSelectQuery(clause map[string]interface{}) (str string) { + switch value := clause["query"].(type) { + case string: + str = value + case []string: + str = strings.Join(value, ", ") + } + + args := clause["args"].([]interface{}) + replacements := []string{} + for _, arg := range args { + switch reflect.ValueOf(arg).Kind() { + case reflect.Slice: + values := reflect.ValueOf(arg) + var tempMarks []string + for i := 0; i < values.Len(); i++ { + tempMarks = append(tempMarks, scope.AddToVars(values.Index(i).Interface())) + } + replacements = append(replacements, strings.Join(tempMarks, ",")) + default: + if valuer, ok := interface{}(arg).(driver.Valuer); ok { + arg, _ = valuer.Value() + } + replacements = append(replacements, scope.AddToVars(arg)) + } + } + + buff := bytes.NewBuffer([]byte{}) + i := 0 + for pos := range str { + if str[pos] == '?' { + buff.WriteString(replacements[i]) + i++ + } else { + buff.WriteByte(str[pos]) + } + } + + str = buff.String() + + return +} + +func (scope *Scope) whereSQL() (sql string) { + var ( + quotedTableName = scope.QuotedTableName() + deletedAtField, hasDeletedAtField = scope.FieldByName("DeletedAt") + primaryConditions, andConditions, orConditions []string + ) + + if !scope.Search.Unscoped && hasDeletedAtField { + sql := fmt.Sprintf("%v.%v IS NULL", quotedTableName, scope.Quote(deletedAtField.DBName)) + primaryConditions = append(primaryConditions, sql) + } + + if !scope.PrimaryKeyZero() { + for _, field := range scope.PrimaryFields() { + sql := fmt.Sprintf("%v.%v = %v", quotedTableName, scope.Quote(field.DBName), scope.AddToVars(field.Field.Interface())) + primaryConditions = append(primaryConditions, sql) + } + } + + for _, clause := range scope.Search.whereConditions { + if sql := scope.buildCondition(clause, true); sql != "" { + andConditions = append(andConditions, sql) + } + } + + for _, clause := range scope.Search.orConditions { + if sql := scope.buildCondition(clause, true); sql != "" { + orConditions = append(orConditions, sql) + } + } + + for _, clause := range scope.Search.notConditions { + if sql := scope.buildCondition(clause, false); sql != "" { + andConditions = append(andConditions, sql) + } + } + + orSQL := strings.Join(orConditions, " OR ") + combinedSQL := strings.Join(andConditions, " AND ") + if len(combinedSQL) > 0 { + if len(orSQL) > 0 { + combinedSQL = combinedSQL + " OR " + orSQL + } + } else { + combinedSQL = orSQL + } + + if len(primaryConditions) > 0 { + sql = "WHERE " + strings.Join(primaryConditions, " AND ") + if len(combinedSQL) > 0 { + sql = sql + " AND (" + combinedSQL + ")" + } + } else if len(combinedSQL) > 0 { + sql = "WHERE " + combinedSQL + } + return +} + +func (scope *Scope) selectSQL() string { + if len(scope.Search.selects) == 0 { + if len(scope.Search.joinConditions) > 0 { + return fmt.Sprintf("%v.*", scope.QuotedTableName()) + } + return "*" + } + return scope.buildSelectQuery(scope.Search.selects) +} + +func (scope *Scope) orderSQL() string { + if len(scope.Search.orders) == 0 || scope.Search.ignoreOrderQuery { + return "" + } + + var orders []string + for _, order := range scope.Search.orders { + if str, ok := order.(string); ok { + orders = append(orders, scope.quoteIfPossible(str)) + } else if expr, ok := order.(*expr); ok { + exp := expr.expr + for _, arg := range expr.args { + exp = strings.Replace(exp, "?", scope.AddToVars(arg), 1) + } + orders = append(orders, exp) + } + } + return " ORDER BY " + strings.Join(orders, ",") +} + +func (scope *Scope) limitAndOffsetSQL() string { + return scope.Dialect().LimitAndOffsetSQL(scope.Search.limit, scope.Search.offset) +} + +func (scope *Scope) groupSQL() string { + if len(scope.Search.group) == 0 { + return "" + } + return " GROUP BY " + scope.Search.group +} + +func (scope *Scope) havingSQL() string { + if len(scope.Search.havingConditions) == 0 { + return "" + } + + var andConditions []string + for _, clause := range scope.Search.havingConditions { + if sql := scope.buildCondition(clause, true); sql != "" { + andConditions = append(andConditions, sql) + } + } + + combinedSQL := strings.Join(andConditions, " AND ") + if len(combinedSQL) == 0 { + return "" + } + + return " HAVING " + combinedSQL +} + +func (scope *Scope) joinsSQL() string { + var joinConditions []string + for _, clause := range scope.Search.joinConditions { + if sql := scope.buildCondition(clause, true); sql != "" { + joinConditions = append(joinConditions, strings.TrimSuffix(strings.TrimPrefix(sql, "("), ")")) + } + } + + return strings.Join(joinConditions, " ") + " " +} + +func (scope *Scope) prepareQuerySQL() { + if scope.Search.raw { + scope.Raw(scope.CombinedConditionSql()) + } else { + scope.Raw(fmt.Sprintf("SELECT %v FROM %v %v", scope.selectSQL(), scope.QuotedTableName(), scope.CombinedConditionSql())) + } + return +} + +func (scope *Scope) inlineCondition(values ...interface{}) *Scope { + if len(values) > 0 { + scope.Search.Where(values[0], values[1:]...) + } + return scope +} + +func (scope *Scope) callCallbacks(funcs []*func(s *Scope)) *Scope { + for _, f := range funcs { + (*f)(scope) + if scope.skipLeft { + break + } + } + return scope +} + +func convertInterfaceToMap(values interface{}, withIgnoredField bool) map[string]interface{} { + var attrs = map[string]interface{}{} + + switch value := values.(type) { + case map[string]interface{}: + return value + case []interface{}: + for _, v := range value { + for key, value := range convertInterfaceToMap(v, withIgnoredField) { + attrs[key] = value + } + } + case interface{}: + reflectValue := reflect.ValueOf(values) + + switch reflectValue.Kind() { + case reflect.Map: + for _, key := range reflectValue.MapKeys() { + attrs[ToDBName(key.Interface().(string))] = reflectValue.MapIndex(key).Interface() + } + default: + for _, field := range (&Scope{Value: values}).Fields() { + if !field.IsBlank && (withIgnoredField || !field.IsIgnored) { + attrs[field.DBName] = field.Field.Interface() + } + } + } + } + return attrs +} + +func (scope *Scope) updatedAttrsWithValues(value interface{}) (results map[string]interface{}, hasUpdate bool) { + if scope.IndirectValue().Kind() != reflect.Struct { + return convertInterfaceToMap(value, false), true + } + + results = map[string]interface{}{} + + for key, value := range convertInterfaceToMap(value, true) { + if field, ok := scope.FieldByName(key); ok && scope.changeableField(field) { + if _, ok := value.(*expr); ok { + hasUpdate = true + results[field.DBName] = value + } else { + err := field.Set(value) + if field.IsNormal { + hasUpdate = true + if err == ErrUnaddressable { + results[field.DBName] = value + } else { + results[field.DBName] = field.Field.Interface() + } + } + } + } + } + return +} + +func (scope *Scope) row() *sql.Row { + defer scope.trace(NowFunc()) + + result := &RowQueryResult{} + scope.InstanceSet("row_query_result", result) + scope.callCallbacks(scope.db.parent.callbacks.rowQueries) + + return result.Row +} + +func (scope *Scope) rows() (*sql.Rows, error) { + defer scope.trace(NowFunc()) + + result := &RowsQueryResult{} + scope.InstanceSet("row_query_result", result) + scope.callCallbacks(scope.db.parent.callbacks.rowQueries) + + return result.Rows, result.Error +} + +func (scope *Scope) initialize() *Scope { + for _, clause := range scope.Search.whereConditions { + scope.updatedAttrsWithValues(clause["query"]) + } + scope.updatedAttrsWithValues(scope.Search.initAttrs) + scope.updatedAttrsWithValues(scope.Search.assignAttrs) + return scope +} + +func (scope *Scope) isQueryForColumn(query interface{}, column string) bool { + queryStr := strings.ToLower(fmt.Sprint(query)) + if queryStr == column { + return true + } + + if strings.HasSuffix(queryStr, "as "+column) { + return true + } + + if strings.HasSuffix(queryStr, "as "+scope.Quote(column)) { + return true + } + + return false +} + +func (scope *Scope) pluck(column string, value interface{}) *Scope { + dest := reflect.Indirect(reflect.ValueOf(value)) + if dest.Kind() != reflect.Slice { + scope.Err(fmt.Errorf("results should be a slice, not %s", dest.Kind())) + return scope + } + + if query, ok := scope.Search.selects["query"]; !ok || !scope.isQueryForColumn(query, column) { + scope.Search.Select(column) + } + + rows, err := scope.rows() + if scope.Err(err) == nil { + defer rows.Close() + for rows.Next() { + elem := reflect.New(dest.Type().Elem()).Interface() + scope.Err(rows.Scan(elem)) + dest.Set(reflect.Append(dest, reflect.ValueOf(elem).Elem())) + } + + if err := rows.Err(); err != nil { + scope.Err(err) + } + } + return scope +} + +func (scope *Scope) count(value interface{}) *Scope { + if query, ok := scope.Search.selects["query"]; !ok || !countingQueryRegexp.MatchString(fmt.Sprint(query)) { + if len(scope.Search.group) != 0 { + scope.Search.Select("count(*) FROM ( SELECT count(*) as name ") + scope.Search.group += " ) AS count_table" + } else { + scope.Search.Select("count(*)") + } + } + scope.Search.ignoreOrderQuery = true + scope.Err(scope.row().Scan(value)) + return scope +} + +func (scope *Scope) typeName() string { + typ := scope.IndirectValue().Type() + + for typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + return typ.Name() +} + +// trace print sql log +func (scope *Scope) trace(t time.Time) { + if len(scope.SQL) > 0 { + scope.db.slog(scope.SQL, t, scope.SQLVars...) + } +} + +func (scope *Scope) changeableField(field *Field) bool { + if selectAttrs := scope.SelectAttrs(); len(selectAttrs) > 0 { + for _, attr := range selectAttrs { + if field.Name == attr || field.DBName == attr { + return true + } + } + return false + } + + for _, attr := range scope.OmitAttrs() { + if field.Name == attr || field.DBName == attr { + return false + } + } + + return true +} + +func (scope *Scope) related(value interface{}, foreignKeys ...string) *Scope { + toScope := scope.db.NewScope(value) + tx := scope.db.Set("gorm:association:source", scope.Value) + + for _, foreignKey := range append(foreignKeys, toScope.typeName()+"Id", scope.typeName()+"Id") { + fromField, _ := scope.FieldByName(foreignKey) + toField, _ := toScope.FieldByName(foreignKey) + + if fromField != nil { + if relationship := fromField.Relationship; relationship != nil { + if relationship.Kind == "many_to_many" { + joinTableHandler := relationship.JoinTableHandler + scope.Err(joinTableHandler.JoinWith(joinTableHandler, tx, scope.Value).Find(value).Error) + } else if relationship.Kind == "belongs_to" { + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(foreignKey); ok { + tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.AssociationForeignDBNames[idx])), field.Field.Interface()) + } + } + scope.Err(tx.Find(value).Error) + } else if relationship.Kind == "has_many" || relationship.Kind == "has_one" { + for idx, foreignKey := range relationship.ForeignDBNames { + if field, ok := scope.FieldByName(relationship.AssociationForeignDBNames[idx]); ok { + tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(foreignKey)), field.Field.Interface()) + } + } + + if relationship.PolymorphicType != "" { + tx = tx.Where(fmt.Sprintf("%v = ?", scope.Quote(relationship.PolymorphicDBName)), relationship.PolymorphicValue) + } + scope.Err(tx.Find(value).Error) + } + } else { + sql := fmt.Sprintf("%v = ?", scope.Quote(toScope.PrimaryKey())) + scope.Err(tx.Where(sql, fromField.Field.Interface()).Find(value).Error) + } + return scope + } else if toField != nil { + sql := fmt.Sprintf("%v = ?", scope.Quote(toField.DBName)) + scope.Err(tx.Where(sql, scope.PrimaryKeyValue()).Find(value).Error) + return scope + } + } + + scope.Err(fmt.Errorf("invalid association %v", foreignKeys)) + return scope +} + +// getTableOptions return the table options string or an empty string if the table options does not exist +func (scope *Scope) getTableOptions() string { + tableOptions, ok := scope.Get("gorm:table_options") + if !ok { + return "" + } + return " " + tableOptions.(string) +} + +func (scope *Scope) createJoinTable(field *StructField) { + if relationship := field.Relationship; relationship != nil && relationship.JoinTableHandler != nil { + joinTableHandler := relationship.JoinTableHandler + joinTable := joinTableHandler.Table(scope.db) + if !scope.Dialect().HasTable(joinTable) { + toScope := &Scope{Value: reflect.New(field.Struct.Type).Interface()} + + var sqlTypes, primaryKeys []string + for idx, fieldName := range relationship.ForeignFieldNames { + if field, ok := scope.FieldByName(fieldName); ok { + foreignKeyStruct := field.clone() + foreignKeyStruct.IsPrimaryKey = false + foreignKeyStruct.TagSettings["IS_JOINTABLE_FOREIGNKEY"] = "true" + delete(foreignKeyStruct.TagSettings, "AUTO_INCREMENT") + sqlTypes = append(sqlTypes, scope.Quote(relationship.ForeignDBNames[idx])+" "+scope.Dialect().DataTypeOf(foreignKeyStruct)) + primaryKeys = append(primaryKeys, scope.Quote(relationship.ForeignDBNames[idx])) + } + } + + for idx, fieldName := range relationship.AssociationForeignFieldNames { + if field, ok := toScope.FieldByName(fieldName); ok { + foreignKeyStruct := field.clone() + foreignKeyStruct.IsPrimaryKey = false + foreignKeyStruct.TagSettings["IS_JOINTABLE_FOREIGNKEY"] = "true" + delete(foreignKeyStruct.TagSettings, "AUTO_INCREMENT") + sqlTypes = append(sqlTypes, scope.Quote(relationship.AssociationForeignDBNames[idx])+" "+scope.Dialect().DataTypeOf(foreignKeyStruct)) + primaryKeys = append(primaryKeys, scope.Quote(relationship.AssociationForeignDBNames[idx])) + } + } + + scope.Err(scope.NewDB().Exec(fmt.Sprintf("CREATE TABLE %v (%v, PRIMARY KEY (%v))%s", scope.Quote(joinTable), strings.Join(sqlTypes, ","), strings.Join(primaryKeys, ","), scope.getTableOptions())).Error) + } + scope.NewDB().Table(joinTable).AutoMigrate(joinTableHandler) + } +} + +func (scope *Scope) createTable() *Scope { + var tags []string + var primaryKeys []string + var primaryKeyInColumnType = false + for _, field := range scope.GetModelStruct().StructFields { + if field.IsNormal { + sqlTag := scope.Dialect().DataTypeOf(field) + + // Check if the primary key constraint was specified as + // part of the column type. If so, we can only support + // one column as the primary key. + if strings.Contains(strings.ToLower(sqlTag), "primary key") { + primaryKeyInColumnType = true + } + + tags = append(tags, scope.Quote(field.DBName)+" "+sqlTag) + } + + if field.IsPrimaryKey { + primaryKeys = append(primaryKeys, scope.Quote(field.DBName)) + } + scope.createJoinTable(field) + } + + var primaryKeyStr string + if len(primaryKeys) > 0 && !primaryKeyInColumnType { + primaryKeyStr = fmt.Sprintf(", PRIMARY KEY (%v)", strings.Join(primaryKeys, ",")) + } + + scope.Raw(fmt.Sprintf("CREATE TABLE %v (%v %v)%s", scope.QuotedTableName(), strings.Join(tags, ","), primaryKeyStr, scope.getTableOptions())).Exec() + + scope.autoIndex() + return scope +} + +func (scope *Scope) dropTable() *Scope { + scope.Raw(fmt.Sprintf("DROP TABLE %v%s", scope.QuotedTableName(), scope.getTableOptions())).Exec() + return scope +} + +func (scope *Scope) modifyColumn(column string, typ string) { + scope.db.AddError(scope.Dialect().ModifyColumn(scope.QuotedTableName(), scope.Quote(column), typ)) +} + +func (scope *Scope) dropColumn(column string) { + scope.Raw(fmt.Sprintf("ALTER TABLE %v DROP COLUMN %v", scope.QuotedTableName(), scope.Quote(column))).Exec() +} + +func (scope *Scope) addIndex(unique bool, indexName string, column ...string) { + if scope.Dialect().HasIndex(scope.TableName(), indexName) { + return + } + + var columns []string + for _, name := range column { + columns = append(columns, scope.quoteIfPossible(name)) + } + + sqlCreate := "CREATE INDEX" + if unique { + sqlCreate = "CREATE UNIQUE INDEX" + } + + scope.Raw(fmt.Sprintf("%s %v ON %v(%v) %v", sqlCreate, indexName, scope.QuotedTableName(), strings.Join(columns, ", "), scope.whereSQL())).Exec() +} + +func (scope *Scope) addForeignKey(field string, dest string, onDelete string, onUpdate string) { + // Compatible with old generated key + keyName := scope.Dialect().BuildKeyName(scope.TableName(), field, dest, "foreign") + + if scope.Dialect().HasForeignKey(scope.TableName(), keyName) { + return + } + var query = `ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s ON DELETE %s ON UPDATE %s;` + scope.Raw(fmt.Sprintf(query, scope.QuotedTableName(), scope.quoteIfPossible(keyName), scope.quoteIfPossible(field), dest, onDelete, onUpdate)).Exec() +} + +func (scope *Scope) removeForeignKey(field string, dest string) { + keyName := scope.Dialect().BuildKeyName(scope.TableName(), field, dest) + + if !scope.Dialect().HasForeignKey(scope.TableName(), keyName) { + return + } + var query = `ALTER TABLE %s DROP CONSTRAINT %s;` + scope.Raw(fmt.Sprintf(query, scope.QuotedTableName(), scope.quoteIfPossible(keyName))).Exec() +} + +func (scope *Scope) removeIndex(indexName string) { + scope.Dialect().RemoveIndex(scope.TableName(), indexName) +} + +func (scope *Scope) autoMigrate() *Scope { + tableName := scope.TableName() + quotedTableName := scope.QuotedTableName() + + if !scope.Dialect().HasTable(tableName) { + scope.createTable() + } else { + for _, field := range scope.GetModelStruct().StructFields { + if !scope.Dialect().HasColumn(tableName, field.DBName) { + if field.IsNormal { + sqlTag := scope.Dialect().DataTypeOf(field) + scope.Raw(fmt.Sprintf("ALTER TABLE %v ADD %v %v;", quotedTableName, scope.Quote(field.DBName), sqlTag)).Exec() + } + } + scope.createJoinTable(field) + } + scope.autoIndex() + } + return scope +} + +func (scope *Scope) autoIndex() *Scope { + var indexes = map[string][]string{} + var uniqueIndexes = map[string][]string{} + + for _, field := range scope.GetStructFields() { + if name, ok := field.TagSettings["INDEX"]; ok { + names := strings.Split(name, ",") + + for _, name := range names { + if name == "INDEX" || name == "" { + name = scope.Dialect().BuildKeyName("idx", scope.TableName(), field.DBName) + } + indexes[name] = append(indexes[name], field.DBName) + } + } + + if name, ok := field.TagSettings["UNIQUE_INDEX"]; ok { + names := strings.Split(name, ",") + + for _, name := range names { + if name == "UNIQUE_INDEX" || name == "" { + name = scope.Dialect().BuildKeyName("uix", scope.TableName(), field.DBName) + } + uniqueIndexes[name] = append(uniqueIndexes[name], field.DBName) + } + } + } + + for name, columns := range indexes { + if db := scope.NewDB().Table(scope.TableName()).Model(scope.Value).AddIndex(name, columns...); db.Error != nil { + scope.db.AddError(db.Error) + } + } + + for name, columns := range uniqueIndexes { + if db := scope.NewDB().Table(scope.TableName()).Model(scope.Value).AddUniqueIndex(name, columns...); db.Error != nil { + scope.db.AddError(db.Error) + } + } + + return scope +} + +func (scope *Scope) getColumnAsArray(columns []string, values ...interface{}) (results [][]interface{}) { + for _, value := range values { + indirectValue := indirect(reflect.ValueOf(value)) + + switch indirectValue.Kind() { + case reflect.Slice: + for i := 0; i < indirectValue.Len(); i++ { + var result []interface{} + var object = indirect(indirectValue.Index(i)) + var hasValue = false + for _, column := range columns { + field := object.FieldByName(column) + if hasValue || !isBlank(field) { + hasValue = true + } + result = append(result, field.Interface()) + } + + if hasValue { + results = append(results, result) + } + } + case reflect.Struct: + var result []interface{} + var hasValue = false + for _, column := range columns { + field := indirectValue.FieldByName(column) + if hasValue || !isBlank(field) { + hasValue = true + } + result = append(result, field.Interface()) + } + + if hasValue { + results = append(results, result) + } + } + } + + return +} + +func (scope *Scope) getColumnAsScope(column string) *Scope { + indirectScopeValue := scope.IndirectValue() + + switch indirectScopeValue.Kind() { + case reflect.Slice: + if fieldStruct, ok := scope.GetModelStruct().ModelType.FieldByName(column); ok { + fieldType := fieldStruct.Type + if fieldType.Kind() == reflect.Slice || fieldType.Kind() == reflect.Ptr { + fieldType = fieldType.Elem() + } + + resultsMap := map[interface{}]bool{} + results := reflect.New(reflect.SliceOf(reflect.PtrTo(fieldType))).Elem() + + for i := 0; i < indirectScopeValue.Len(); i++ { + result := indirect(indirect(indirectScopeValue.Index(i)).FieldByName(column)) + + if result.Kind() == reflect.Slice { + for j := 0; j < result.Len(); j++ { + if elem := result.Index(j); elem.CanAddr() && resultsMap[elem.Addr()] != true { + resultsMap[elem.Addr()] = true + results = reflect.Append(results, elem.Addr()) + } + } + } else if result.CanAddr() && resultsMap[result.Addr()] != true { + resultsMap[result.Addr()] = true + results = reflect.Append(results, result.Addr()) + } + } + return scope.New(results.Interface()) + } + case reflect.Struct: + if field := indirectScopeValue.FieldByName(column); field.CanAddr() { + return scope.New(field.Addr().Interface()) + } + } + return nil +} + +func (scope *Scope) hasConditions() bool { + return !scope.PrimaryKeyZero() || + len(scope.Search.whereConditions) > 0 || + len(scope.Search.orConditions) > 0 || + len(scope.Search.notConditions) > 0 +} diff --git a/vendor/github.com/jinzhu/gorm/scope_test.go b/vendor/github.com/jinzhu/gorm/scope_test.go new file mode 100644 index 0000000..3018f35 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/scope_test.go @@ -0,0 +1,80 @@ +package gorm_test + +import ( + "encoding/hex" + "math/rand" + "strings" + "testing" + + "github.com/jinzhu/gorm" +) + +func NameIn1And2(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", []string{"ScopeUser1", "ScopeUser2"}) +} + +func NameIn2And3(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", []string{"ScopeUser2", "ScopeUser3"}) +} + +func NameIn(names []string) func(d *gorm.DB) *gorm.DB { + return func(d *gorm.DB) *gorm.DB { + return d.Where("name in (?)", names) + } +} + +func TestScopes(t *testing.T) { + user1 := User{Name: "ScopeUser1", Age: 1} + user2 := User{Name: "ScopeUser2", Age: 1} + user3 := User{Name: "ScopeUser3", Age: 2} + DB.Save(&user1).Save(&user2).Save(&user3) + + var users1, users2, users3 []User + DB.Scopes(NameIn1And2).Find(&users1) + if len(users1) != 2 { + t.Errorf("Should found two users's name in 1, 2") + } + + DB.Scopes(NameIn1And2, NameIn2And3).Find(&users2) + if len(users2) != 1 { + t.Errorf("Should found one user's name is 2") + } + + DB.Scopes(NameIn([]string{user1.Name, user3.Name})).Find(&users3) + if len(users3) != 2 { + t.Errorf("Should found two users's name in 1, 3") + } +} + +func randName() string { + data := make([]byte, 8) + rand.Read(data) + + return "n-" + hex.EncodeToString(data) +} + +func TestValuer(t *testing.T) { + name := randName() + + origUser := User{Name: name, Age: 1, Password: EncryptedData("pass1"), PasswordHash: []byte("abc")} + if err := DB.Save(&origUser).Error; err != nil { + t.Errorf("No error should happen when saving user, but got %v", err) + } + + var user2 User + if err := DB.Where("name = ? AND password = ? AND password_hash = ?", name, EncryptedData("pass1"), []byte("abc")).First(&user2).Error; err != nil { + t.Errorf("No error should happen when querying user with valuer, but got %v", err) + } +} + +func TestFailedValuer(t *testing.T) { + name := randName() + + err := DB.Exec("INSERT INTO users(name, password) VALUES(?, ?)", name, EncryptedData("xpass1")).Error + + if err == nil { + t.Errorf("There should be an error should happen when insert data") + } else if !strings.HasPrefix(err.Error(), "Should not start with") { + t.Errorf("The error should be returned from Valuer, but get %v", err) + } +} diff --git a/vendor/github.com/jinzhu/gorm/search.go b/vendor/github.com/jinzhu/gorm/search.go new file mode 100644 index 0000000..9013859 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/search.go @@ -0,0 +1,153 @@ +package gorm + +import ( + "fmt" +) + +type search struct { + db *DB + whereConditions []map[string]interface{} + orConditions []map[string]interface{} + notConditions []map[string]interface{} + havingConditions []map[string]interface{} + joinConditions []map[string]interface{} + initAttrs []interface{} + assignAttrs []interface{} + selects map[string]interface{} + omits []string + orders []interface{} + preload []searchPreload + offset interface{} + limit interface{} + group string + tableName string + raw bool + Unscoped bool + ignoreOrderQuery bool +} + +type searchPreload struct { + schema string + conditions []interface{} +} + +func (s *search) clone() *search { + clone := *s + return &clone +} + +func (s *search) Where(query interface{}, values ...interface{}) *search { + s.whereConditions = append(s.whereConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Not(query interface{}, values ...interface{}) *search { + s.notConditions = append(s.notConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Or(query interface{}, values ...interface{}) *search { + s.orConditions = append(s.orConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Attrs(attrs ...interface{}) *search { + s.initAttrs = append(s.initAttrs, toSearchableMap(attrs...)) + return s +} + +func (s *search) Assign(attrs ...interface{}) *search { + s.assignAttrs = append(s.assignAttrs, toSearchableMap(attrs...)) + return s +} + +func (s *search) Order(value interface{}, reorder ...bool) *search { + if len(reorder) > 0 && reorder[0] { + s.orders = []interface{}{} + } + + if value != nil && value != "" { + s.orders = append(s.orders, value) + } + return s +} + +func (s *search) Select(query interface{}, args ...interface{}) *search { + s.selects = map[string]interface{}{"query": query, "args": args} + return s +} + +func (s *search) Omit(columns ...string) *search { + s.omits = columns + return s +} + +func (s *search) Limit(limit interface{}) *search { + s.limit = limit + return s +} + +func (s *search) Offset(offset interface{}) *search { + s.offset = offset + return s +} + +func (s *search) Group(query string) *search { + s.group = s.getInterfaceAsSQL(query) + return s +} + +func (s *search) Having(query interface{}, values ...interface{}) *search { + if val, ok := query.(*expr); ok { + s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": val.expr, "args": val.args}) + } else { + s.havingConditions = append(s.havingConditions, map[string]interface{}{"query": query, "args": values}) + } + return s +} + +func (s *search) Joins(query string, values ...interface{}) *search { + s.joinConditions = append(s.joinConditions, map[string]interface{}{"query": query, "args": values}) + return s +} + +func (s *search) Preload(schema string, values ...interface{}) *search { + var preloads []searchPreload + for _, preload := range s.preload { + if preload.schema != schema { + preloads = append(preloads, preload) + } + } + preloads = append(preloads, searchPreload{schema, values}) + s.preload = preloads + return s +} + +func (s *search) Raw(b bool) *search { + s.raw = b + return s +} + +func (s *search) unscoped() *search { + s.Unscoped = true + return s +} + +func (s *search) Table(name string) *search { + s.tableName = name + return s +} + +func (s *search) getInterfaceAsSQL(value interface{}) (str string) { + switch value.(type) { + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + str = fmt.Sprintf("%v", value) + default: + s.db.AddError(ErrInvalidSQL) + } + + if str == "-1" { + return "" + } + return +} diff --git a/vendor/github.com/jinzhu/gorm/search_test.go b/vendor/github.com/jinzhu/gorm/search_test.go new file mode 100644 index 0000000..4db7ab6 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/search_test.go @@ -0,0 +1,30 @@ +package gorm + +import ( + "reflect" + "testing" +) + +func TestCloneSearch(t *testing.T) { + s := new(search) + s.Where("name = ?", "jinzhu").Order("name").Attrs("name", "jinzhu").Select("name, age") + + s1 := s.clone() + s1.Where("age = ?", 20).Order("age").Attrs("email", "a@e.org").Select("email") + + if reflect.DeepEqual(s.whereConditions, s1.whereConditions) { + t.Errorf("Where should be copied") + } + + if reflect.DeepEqual(s.orders, s1.orders) { + t.Errorf("Order should be copied") + } + + if reflect.DeepEqual(s.initAttrs, s1.initAttrs) { + t.Errorf("InitAttrs should be copied") + } + + if reflect.DeepEqual(s.Select, s1.Select) { + t.Errorf("selectStr should be copied") + } +} diff --git a/vendor/github.com/jinzhu/gorm/test_all.sh b/vendor/github.com/jinzhu/gorm/test_all.sh new file mode 100755 index 0000000..5cfb332 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/test_all.sh @@ -0,0 +1,5 @@ +dialects=("postgres" "mysql" "mssql" "sqlite") + +for dialect in "${dialects[@]}" ; do + DEBUG=false GORM_DIALECT=${dialect} go test +done diff --git a/vendor/github.com/jinzhu/gorm/update_test.go b/vendor/github.com/jinzhu/gorm/update_test.go new file mode 100644 index 0000000..85d53e5 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/update_test.go @@ -0,0 +1,465 @@ +package gorm_test + +import ( + "testing" + "time" + + "github.com/jinzhu/gorm" +) + +func TestUpdate(t *testing.T) { + product1 := Product{Code: "product1code"} + product2 := Product{Code: "product2code"} + + DB.Save(&product1).Save(&product2).Update("code", "product2newcode") + + if product2.Code != "product2newcode" { + t.Errorf("Record should be updated") + } + + DB.First(&product1, product1.Id) + DB.First(&product2, product2.Id) + updatedAt1 := product1.UpdatedAt + + if DB.First(&Product{}, "code = ?", product1.Code).RecordNotFound() { + t.Errorf("Product1 should not be updated") + } + + if !DB.First(&Product{}, "code = ?", "product2code").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + if DB.First(&Product{}, "code = ?", "product2newcode").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + DB.Table("products").Where("code in (?)", []string{"product1code"}).Update("code", "product1newcode") + + var product4 Product + DB.First(&product4, product1.Id) + if updatedAt1.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should be updated if something changed") + } + + if !DB.First(&Product{}, "code = 'product1code'").RecordNotFound() { + t.Errorf("Product1's code should be updated") + } + + if DB.First(&Product{}, "code = 'product1newcode'").RecordNotFound() { + t.Errorf("Product should not be changed to 789") + } + + if DB.Model(product2).Update("CreatedAt", time.Now().Add(time.Hour)).Error != nil { + t.Error("No error should raise when update with CamelCase") + } + + if DB.Model(&product2).UpdateColumn("CreatedAt", time.Now().Add(time.Hour)).Error != nil { + t.Error("No error should raise when update_column with CamelCase") + } + + var products []Product + DB.Find(&products) + if count := DB.Model(Product{}).Update("CreatedAt", time.Now().Add(2*time.Hour)).RowsAffected; count != int64(len(products)) { + t.Error("RowsAffected should be correct when do batch update") + } + + DB.First(&product4, product4.Id) + updatedAt4 := product4.UpdatedAt + DB.Model(&product4).Update("price", gorm.Expr("price + ? - ?", 100, 50)) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100-50 { + t.Errorf("Update with expression") + } + if product4.UpdatedAt.Format(time.RFC3339Nano) == updatedAt4.Format(time.RFC3339Nano) { + t.Errorf("Update with expression should update UpdatedAt") + } +} + +func TestUpdateWithNoStdPrimaryKeyAndDefaultValues(t *testing.T) { + animal := Animal{Name: "Ferdinand"} + DB.Save(&animal) + updatedAt1 := animal.UpdatedAt + + DB.Save(&animal).Update("name", "Francis") + + if updatedAt1.Format(time.RFC3339Nano) == animal.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should not be updated if nothing changed") + } + + var animals []Animal + DB.Find(&animals) + if count := DB.Model(Animal{}).Update("CreatedAt", time.Now().Add(2*time.Hour)).RowsAffected; count != int64(len(animals)) { + t.Error("RowsAffected should be correct when do batch update") + } + + animal = Animal{From: "somewhere"} // No name fields, should be filled with the default value (galeone) + DB.Save(&animal).Update("From", "a nice place") // The name field shoul be untouched + DB.First(&animal, animal.Counter) + if animal.Name != "galeone" { + t.Errorf("Name fields shouldn't be changed if untouched, but got %v", animal.Name) + } + + // When changing a field with a default value, the change must occur + animal.Name = "amazing horse" + DB.Save(&animal) + DB.First(&animal, animal.Counter) + if animal.Name != "amazing horse" { + t.Errorf("Update a filed with a default value should occur. But got %v\n", animal.Name) + } + + // When changing a field with a default value with blank value + animal.Name = "" + DB.Save(&animal) + DB.First(&animal, animal.Counter) + if animal.Name != "" { + t.Errorf("Update a filed to blank with a default value should occur. But got %v\n", animal.Name) + } +} + +func TestUpdates(t *testing.T) { + product1 := Product{Code: "product1code", Price: 10} + product2 := Product{Code: "product2code", Price: 10} + DB.Save(&product1).Save(&product2) + DB.Model(&product1).Updates(map[string]interface{}{"code": "product1newcode", "price": 100}) + if product1.Code != "product1newcode" || product1.Price != 100 { + t.Errorf("Record should be updated also with map") + } + + DB.First(&product1, product1.Id) + DB.First(&product2, product2.Id) + updatedAt2 := product2.UpdatedAt + + if DB.First(&Product{}, "code = ? and price = ?", product2.Code, product2.Price).RecordNotFound() { + t.Errorf("Product2 should not be updated") + } + + if DB.First(&Product{}, "code = ?", "product1newcode").RecordNotFound() { + t.Errorf("Product1 should be updated") + } + + DB.Table("products").Where("code in (?)", []string{"product2code"}).Updates(Product{Code: "product2newcode"}) + if !DB.First(&Product{}, "code = 'product2code'").RecordNotFound() { + t.Errorf("Product2's code should be updated") + } + + var product4 Product + DB.First(&product4, product2.Id) + if updatedAt2.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should be updated if something changed") + } + + if DB.First(&Product{}, "code = ?", "product2newcode").RecordNotFound() { + t.Errorf("product2's code should be updated") + } + + updatedAt4 := product4.UpdatedAt + DB.Model(&product4).Updates(map[string]interface{}{"price": gorm.Expr("price + ?", 100)}) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100 { + t.Errorf("Updates with expression") + } + // product4's UpdatedAt will be reset when updating + if product4.UpdatedAt.Format(time.RFC3339Nano) == updatedAt4.Format(time.RFC3339Nano) { + t.Errorf("Updates with expression should update UpdatedAt") + } +} + +func TestUpdateColumn(t *testing.T) { + product1 := Product{Code: "product1code", Price: 10} + product2 := Product{Code: "product2code", Price: 20} + DB.Save(&product1).Save(&product2).UpdateColumn(map[string]interface{}{"code": "product2newcode", "price": 100}) + if product2.Code != "product2newcode" || product2.Price != 100 { + t.Errorf("product 2 should be updated with update column") + } + + var product3 Product + DB.First(&product3, product1.Id) + if product3.Code != "product1code" || product3.Price != 10 { + t.Errorf("product 1 should not be updated") + } + + DB.First(&product2, product2.Id) + updatedAt2 := product2.UpdatedAt + DB.Model(product2).UpdateColumn("code", "update_column_new") + var product4 Product + DB.First(&product4, product2.Id) + if updatedAt2.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("updatedAt should not be updated with update column") + } + + DB.Model(&product4).UpdateColumn("price", gorm.Expr("price + 100 - 50")) + var product5 Product + DB.First(&product5, product4.Id) + if product5.Price != product4.Price+100-50 { + t.Errorf("UpdateColumn with expression") + } + if product5.UpdatedAt.Format(time.RFC3339Nano) != product4.UpdatedAt.Format(time.RFC3339Nano) { + t.Errorf("UpdateColumn with expression should not update UpdatedAt") + } +} + +func TestSelectWithUpdate(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update") + DB.Create(user) + + var reloadUser User + DB.First(&reloadUser, user.Id) + reloadUser.Name = "new_name" + reloadUser.Age = 50 + reloadUser.BillingAddress = Address{Address1: "New Billing Address"} + reloadUser.ShippingAddress = Address{Address1: "New ShippingAddress Address"} + reloadUser.CreditCard = CreditCard{Number: "987654321"} + reloadUser.Emails = []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + } + reloadUser.Company = Company{Name: "new company"} + + DB.Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Save(&reloadUser) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 == user.BillingAddressID.Int64 || + queryUser.ShippingAddressId != user.ShippingAddressId || + queryUser.CreditCard.ID == user.CreditCard.ID || + len(queryUser.Emails) == len(user.Emails) || queryUser.Company.Id == user.Company.Id { + t.Errorf("Should only update selected relationships") + } +} + +func TestSelectWithUpdateWithMap(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{ + "Name": "new_name", + "Age": 50, + "BillingAddress": Address{Address1: "New Billing Address"}, + "ShippingAddress": Address{Address1: "New ShippingAddress Address"}, + "CreditCard": CreditCard{Number: "987654321"}, + "Emails": []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + }, + "Company": Company{Name: "new company"}, + } + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Select("Name", "BillingAddress", "CreditCard", "Company", "Emails").Update(updateValues) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 == user.BillingAddressID.Int64 || + queryUser.ShippingAddressId != user.ShippingAddressId || + queryUser.CreditCard.ID == user.CreditCard.ID || + len(queryUser.Emails) == len(user.Emails) || queryUser.Company.Id == user.Company.Id { + t.Errorf("Should only update selected relationships") + } +} + +func TestOmitWithUpdate(t *testing.T) { + user := getPreparedUser("omit_user", "omit_with_update") + DB.Create(user) + + var reloadUser User + DB.First(&reloadUser, user.Id) + reloadUser.Name = "new_name" + reloadUser.Age = 50 + reloadUser.BillingAddress = Address{Address1: "New Billing Address"} + reloadUser.ShippingAddress = Address{Address1: "New ShippingAddress Address"} + reloadUser.CreditCard = CreditCard{Number: "987654321"} + reloadUser.Emails = []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + } + reloadUser.Company = Company{Name: "new company"} + + DB.Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Save(&reloadUser) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 != user.BillingAddressID.Int64 || + queryUser.ShippingAddressId == user.ShippingAddressId || + queryUser.CreditCard.ID != user.CreditCard.ID || + len(queryUser.Emails) != len(user.Emails) || queryUser.Company.Id != user.Company.Id { + t.Errorf("Should only update relationships that not omitted") + } +} + +func TestOmitWithUpdateWithMap(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{ + "Name": "new_name", + "Age": 50, + "BillingAddress": Address{Address1: "New Billing Address"}, + "ShippingAddress": Address{Address1: "New ShippingAddress Address"}, + "CreditCard": CreditCard{Number: "987654321"}, + "Emails": []Email{ + {Email: "new_user_1@example1.com"}, {Email: "new_user_2@example2.com"}, {Email: "new_user_3@example2.com"}, + }, + "Company": Company{Name: "new company"}, + } + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Omit("Name", "BillingAddress", "CreditCard", "Company", "Emails").Update(updateValues) + + var queryUser User + DB.Preload("BillingAddress").Preload("ShippingAddress"). + Preload("CreditCard").Preload("Emails").Preload("Company").First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should only update users with name column") + } + + if queryUser.BillingAddressID.Int64 != user.BillingAddressID.Int64 || + queryUser.ShippingAddressId == user.ShippingAddressId || + queryUser.CreditCard.ID != user.CreditCard.ID || + len(queryUser.Emails) != len(user.Emails) || queryUser.Company.Id != user.Company.Id { + t.Errorf("Should only update relationships not omitted") + } +} + +func TestSelectWithUpdateColumn(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{"Name": "new_name", "Age": 50} + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Select("Name").UpdateColumn(updateValues) + + var queryUser User + DB.First(&queryUser, user.Id) + + if queryUser.Name == user.Name || queryUser.Age != user.Age { + t.Errorf("Should only update users with name column") + } +} + +func TestOmitWithUpdateColumn(t *testing.T) { + user := getPreparedUser("select_user", "select_with_update_map") + DB.Create(user) + + updateValues := map[string]interface{}{"Name": "new_name", "Age": 50} + + var reloadUser User + DB.First(&reloadUser, user.Id) + DB.Model(&reloadUser).Omit("Name").UpdateColumn(updateValues) + + var queryUser User + DB.First(&queryUser, user.Id) + + if queryUser.Name != user.Name || queryUser.Age == user.Age { + t.Errorf("Should omit name column when update user") + } +} + +func TestUpdateColumnsSkipsAssociations(t *testing.T) { + user := getPreparedUser("update_columns_user", "special_role") + user.Age = 99 + address1 := "first street" + user.BillingAddress = Address{Address1: address1} + DB.Save(user) + + // Update a single field of the user and verify that the changed address is not stored. + newAge := int64(100) + user.BillingAddress.Address1 = "second street" + db := DB.Model(user).UpdateColumns(User{Age: newAge}) + if db.RowsAffected != 1 { + t.Errorf("Expected RowsAffected=1 but instead RowsAffected=%v", DB.RowsAffected) + } + + // Verify that Age now=`newAge`. + freshUser := &User{Id: user.Id} + DB.First(freshUser) + if freshUser.Age != newAge { + t.Errorf("Expected freshly queried user to have Age=%v but instead found Age=%v", newAge, freshUser.Age) + } + + // Verify that user's BillingAddress.Address1 is not changed and is still "first street". + DB.First(&freshUser.BillingAddress, freshUser.BillingAddressID) + if freshUser.BillingAddress.Address1 != address1 { + t.Errorf("Expected user's BillingAddress.Address1=%s to remain unchanged after UpdateColumns invocation, but BillingAddress.Address1=%s", address1, freshUser.BillingAddress.Address1) + } +} + +func TestUpdatesWithBlankValues(t *testing.T) { + product := Product{Code: "product1", Price: 10} + DB.Save(&product) + + DB.Model(&Product{Id: product.Id}).Updates(&Product{Price: 100}) + + var product1 Product + DB.First(&product1, product.Id) + + if product1.Code != "product1" || product1.Price != 100 { + t.Errorf("product's code should not be updated") + } +} + +type ElementWithIgnoredField struct { + Id int64 + Value string + IgnoredField int64 `sql:"-"` +} + +func (e ElementWithIgnoredField) TableName() string { + return "element_with_ignored_field" +} + +func TestUpdatesTableWithIgnoredValues(t *testing.T) { + elem := ElementWithIgnoredField{Value: "foo", IgnoredField: 10} + DB.Save(&elem) + + DB.Table(elem.TableName()). + Where("id = ?", elem.Id). + // DB.Model(&ElementWithIgnoredField{Id: elem.Id}). + Updates(&ElementWithIgnoredField{Value: "bar", IgnoredField: 100}) + + var elem1 ElementWithIgnoredField + err := DB.First(&elem1, elem.Id).Error + if err != nil { + t.Errorf("error getting an element from database: %s", err.Error()) + } + + if elem1.IgnoredField != 0 { + t.Errorf("element's ignored field should not be updated") + } +} + +func TestUpdateDecodeVirtualAttributes(t *testing.T) { + var user = User{ + Name: "jinzhu", + IgnoreMe: 88, + } + + DB.Save(&user) + + DB.Model(&user).Updates(User{Name: "jinzhu2", IgnoreMe: 100}) + + if user.IgnoreMe != 100 { + t.Errorf("should decode virtual attributes to struct, so it could be used in callbacks") + } +} diff --git a/vendor/github.com/jinzhu/gorm/utils.go b/vendor/github.com/jinzhu/gorm/utils.go new file mode 100644 index 0000000..dfaae93 --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/utils.go @@ -0,0 +1,285 @@ +package gorm + +import ( + "bytes" + "database/sql/driver" + "fmt" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" +) + +// NowFunc returns current time, this function is exported in order to be able +// to give the flexibility to the developer to customize it according to their +// needs, e.g: +// gorm.NowFunc = func() time.Time { +// return time.Now().UTC() +// } +var NowFunc = func() time.Time { + return time.Now() +} + +// Copied from golint +var commonInitialisms = []string{"API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "LHS", "QPS", "RAM", "RHS", "RPC", "SLA", "SMTP", "SSH", "TLS", "TTL", "UID", "UI", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XSRF", "XSS"} +var commonInitialismsReplacer *strings.Replacer + +var goSrcRegexp = regexp.MustCompile(`jinzhu/gorm/.*.go`) +var goTestRegexp = regexp.MustCompile(`jinzhu/gorm/.*test.go`) + +func init() { + var commonInitialismsForReplacer []string + for _, initialism := range commonInitialisms { + commonInitialismsForReplacer = append(commonInitialismsForReplacer, initialism, strings.Title(strings.ToLower(initialism))) + } + commonInitialismsReplacer = strings.NewReplacer(commonInitialismsForReplacer...) +} + +type safeMap struct { + m map[string]string + l *sync.RWMutex +} + +func (s *safeMap) Set(key string, value string) { + s.l.Lock() + defer s.l.Unlock() + s.m[key] = value +} + +func (s *safeMap) Get(key string) string { + s.l.RLock() + defer s.l.RUnlock() + return s.m[key] +} + +func newSafeMap() *safeMap { + return &safeMap{l: new(sync.RWMutex), m: make(map[string]string)} +} + +var smap = newSafeMap() + +type strCase bool + +const ( + lower strCase = false + upper strCase = true +) + +// ToDBName convert string to db name +func ToDBName(name string) string { + if v := smap.Get(name); v != "" { + return v + } + + if name == "" { + return "" + } + + var ( + value = commonInitialismsReplacer.Replace(name) + buf = bytes.NewBufferString("") + lastCase, currCase, nextCase strCase + ) + + for i, v := range value[:len(value)-1] { + nextCase = strCase(value[i+1] >= 'A' && value[i+1] <= 'Z') + if i > 0 { + if currCase == upper { + if lastCase == upper && nextCase == upper { + buf.WriteRune(v) + } else { + if value[i-1] != '_' && value[i+1] != '_' { + buf.WriteRune('_') + } + buf.WriteRune(v) + } + } else { + buf.WriteRune(v) + if i == len(value)-2 && nextCase == upper { + buf.WriteRune('_') + } + } + } else { + currCase = upper + buf.WriteRune(v) + } + lastCase = currCase + currCase = nextCase + } + + buf.WriteByte(value[len(value)-1]) + + s := strings.ToLower(buf.String()) + smap.Set(name, s) + return s +} + +// SQL expression +type expr struct { + expr string + args []interface{} +} + +// Expr generate raw SQL expression, for example: +// DB.Model(&product).Update("price", gorm.Expr("price * ? + ?", 2, 100)) +func Expr(expression string, args ...interface{}) *expr { + return &expr{expr: expression, args: args} +} + +func indirect(reflectValue reflect.Value) reflect.Value { + for reflectValue.Kind() == reflect.Ptr { + reflectValue = reflectValue.Elem() + } + return reflectValue +} + +func toQueryMarks(primaryValues [][]interface{}) string { + var results []string + + for _, primaryValue := range primaryValues { + var marks []string + for range primaryValue { + marks = append(marks, "?") + } + + if len(marks) > 1 { + results = append(results, fmt.Sprintf("(%v)", strings.Join(marks, ","))) + } else { + results = append(results, strings.Join(marks, "")) + } + } + return strings.Join(results, ",") +} + +func toQueryCondition(scope *Scope, columns []string) string { + var newColumns []string + for _, column := range columns { + newColumns = append(newColumns, scope.Quote(column)) + } + + if len(columns) > 1 { + return fmt.Sprintf("(%v)", strings.Join(newColumns, ",")) + } + return strings.Join(newColumns, ",") +} + +func toQueryValues(values [][]interface{}) (results []interface{}) { + for _, value := range values { + for _, v := range value { + results = append(results, v) + } + } + return +} + +func fileWithLineNum() string { + for i := 2; i < 15; i++ { + _, file, line, ok := runtime.Caller(i) + if ok && (!goSrcRegexp.MatchString(file) || goTestRegexp.MatchString(file)) { + return fmt.Sprintf("%v:%v", file, line) + } + } + return "" +} + +func isBlank(value reflect.Value) bool { + switch value.Kind() { + case reflect.String: + return value.Len() == 0 + case reflect.Bool: + return !value.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return value.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return value.Uint() == 0 + case reflect.Float32, reflect.Float64: + return value.Float() == 0 + case reflect.Interface, reflect.Ptr: + return value.IsNil() + } + + return reflect.DeepEqual(value.Interface(), reflect.Zero(value.Type()).Interface()) +} + +func toSearchableMap(attrs ...interface{}) (result interface{}) { + if len(attrs) > 1 { + if str, ok := attrs[0].(string); ok { + result = map[string]interface{}{str: attrs[1]} + } + } else if len(attrs) == 1 { + if attr, ok := attrs[0].(map[string]interface{}); ok { + result = attr + } + + if attr, ok := attrs[0].(interface{}); ok { + result = attr + } + } + return +} + +func equalAsString(a interface{}, b interface{}) bool { + return toString(a) == toString(b) +} + +func toString(str interface{}) string { + if values, ok := str.([]interface{}); ok { + var results []string + for _, value := range values { + results = append(results, toString(value)) + } + return strings.Join(results, "_") + } else if bytes, ok := str.([]byte); ok { + return string(bytes) + } else if reflectValue := reflect.Indirect(reflect.ValueOf(str)); reflectValue.IsValid() { + return fmt.Sprintf("%v", reflectValue.Interface()) + } + return "" +} + +func makeSlice(elemType reflect.Type) interface{} { + if elemType.Kind() == reflect.Slice { + elemType = elemType.Elem() + } + sliceType := reflect.SliceOf(elemType) + slice := reflect.New(sliceType) + slice.Elem().Set(reflect.MakeSlice(sliceType, 0, 0)) + return slice.Interface() +} + +func strInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// getValueFromFields return given fields's value +func getValueFromFields(value reflect.Value, fieldNames []string) (results []interface{}) { + // If value is a nil pointer, Indirect returns a zero Value! + // Therefor we need to check for a zero value, + // as FieldByName could panic + if indirectValue := reflect.Indirect(value); indirectValue.IsValid() { + for _, fieldName := range fieldNames { + if fieldValue := indirectValue.FieldByName(fieldName); fieldValue.IsValid() { + result := fieldValue.Interface() + if r, ok := result.(driver.Valuer); ok { + result, _ = r.Value() + } + results = append(results, result) + } + } + } + return +} + +func addExtraSpaceIfExist(str string) string { + if str != "" { + return " " + str + } + return "" +} diff --git a/vendor/github.com/jinzhu/gorm/utils_test.go b/vendor/github.com/jinzhu/gorm/utils_test.go new file mode 100644 index 0000000..152296d --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/utils_test.go @@ -0,0 +1,32 @@ +package gorm_test + +import ( + "testing" + + "github.com/jinzhu/gorm" +) + +func TestToDBNameGenerateFriendlyName(t *testing.T) { + var maps = map[string]string{ + "": "", + "X": "x", + "ThisIsATest": "this_is_a_test", + "PFAndESI": "pf_and_esi", + "AbcAndJkl": "abc_and_jkl", + "EmployeeID": "employee_id", + "SKU_ID": "sku_id", + "FieldX": "field_x", + "HTTPAndSMTP": "http_and_smtp", + "HTTPServerHandlerForURLID": "http_server_handler_for_url_id", + "UUID": "uuid", + "HTTPURL": "http_url", + "HTTP_URL": "http_url", + "ThisIsActuallyATestSoWeMayBeAbleToUseThisCodeInGormPackageAlsoIdCanBeUsedAtTheEndAsID": "this_is_actually_a_test_so_we_may_be_able_to_use_this_code_in_gorm_package_also_id_can_be_used_at_the_end_as_id", + } + + for key, value := range maps { + if gorm.ToDBName(key) != value { + t.Errorf("%v ToDBName should equal %v, but got %v", key, value, gorm.ToDBName(key)) + } + } +} diff --git a/vendor/github.com/jinzhu/gorm/wercker.yml b/vendor/github.com/jinzhu/gorm/wercker.yml new file mode 100644 index 0000000..0c3e73e --- /dev/null +++ b/vendor/github.com/jinzhu/gorm/wercker.yml @@ -0,0 +1,148 @@ +# use the default golang container from Docker Hub +box: golang + +services: + - name: mariadb + id: mariadb:latest + env: + MYSQL_DATABASE: gorm + MYSQL_USER: gorm + MYSQL_PASSWORD: gorm + MYSQL_RANDOM_ROOT_PASSWORD: "yes" + - name: mysql57 + id: mysql:5.7 + env: + MYSQL_DATABASE: gorm + MYSQL_USER: gorm + MYSQL_PASSWORD: gorm + MYSQL_RANDOM_ROOT_PASSWORD: "yes" + - name: mysql56 + id: mysql:5.6 + env: + MYSQL_DATABASE: gorm + MYSQL_USER: gorm + MYSQL_PASSWORD: gorm + MYSQL_RANDOM_ROOT_PASSWORD: "yes" + - name: mysql55 + id: mysql:5.5 + env: + MYSQL_DATABASE: gorm + MYSQL_USER: gorm + MYSQL_PASSWORD: gorm + MYSQL_RANDOM_ROOT_PASSWORD: "yes" + - name: postgres + id: postgres:latest + env: + POSTGRES_USER: gorm + POSTGRES_PASSWORD: gorm + POSTGRES_DB: gorm + - name: postgres96 + id: postgres:9.6 + env: + POSTGRES_USER: gorm + POSTGRES_PASSWORD: gorm + POSTGRES_DB: gorm + - name: postgres95 + id: postgres:9.5 + env: + POSTGRES_USER: gorm + POSTGRES_PASSWORD: gorm + POSTGRES_DB: gorm + - name: postgres94 + id: postgres:9.4 + env: + POSTGRES_USER: gorm + POSTGRES_PASSWORD: gorm + POSTGRES_DB: gorm + - name: postgres93 + id: postgres:9.3 + env: + POSTGRES_USER: gorm + POSTGRES_PASSWORD: gorm + POSTGRES_DB: gorm + - name: mssql + id: mcmoe/mssqldocker:latest + env: + ACCEPT_EULA: Y + SA_PASSWORD: LoremIpsum86 + MSSQL_DB: gorm + MSSQL_USER: gorm + MSSQL_PASSWORD: LoremIpsum86 + +# The steps that will be executed in the build pipeline +build: + # The steps that will be executed on build + steps: + # Sets the go workspace and places you package + # at the right place in the workspace tree + - setup-go-workspace + + # Gets the dependencies + - script: + name: go get + code: | + cd $WERCKER_SOURCE_DIR + go version + go get -t ./... + + # Build the project + - script: + name: go build + code: | + go build ./... + + # Test the project + - script: + name: test sqlite + code: | + go test ./... + + - script: + name: test mariadb + code: | + GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mariadb:3306)/gorm?charset=utf8&parseTime=True" go test ./... + + - script: + name: test mysql5.7 + code: | + GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql57:3306)/gorm?charset=utf8&parseTime=True" go test ./... + + - script: + name: test mysql5.6 + code: | + GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql56:3306)/gorm?charset=utf8&parseTime=True" go test ./... + + - script: + name: test mysql5.5 + code: | + GORM_DIALECT=mysql GORM_DSN="gorm:gorm@tcp(mysql55:3306)/gorm?charset=utf8&parseTime=True" go test ./... + + - script: + name: test postgres + code: | + GORM_DIALECT=postgres GORM_DSN="host=postgres user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./... + + - script: + name: test postgres96 + code: | + GORM_DIALECT=postgres GORM_DSN="host=postgres96 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./... + + - script: + name: test postgres95 + code: | + GORM_DIALECT=postgres GORM_DSN="host=postgres95 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./... + + - script: + name: test postgres94 + code: | + GORM_DIALECT=postgres GORM_DSN="host=postgres94 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./... + + - script: + name: test postgres93 + code: | + GORM_DIALECT=postgres GORM_DSN="host=postgres93 user=gorm password=gorm DB.name=gorm port=5432 sslmode=disable" go test ./... + + - script: + name: test mssql + code: | + GORM_DIALECT=mssql GORM_DSN="sqlserver://gorm:LoremIpsum86@mssql:1433?database=gorm" go test ./... diff --git a/vendor/github.com/jinzhu/inflection/LICENSE b/vendor/github.com/jinzhu/inflection/LICENSE new file mode 100644 index 0000000..a1ca9a0 --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 - Jinzhu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/jinzhu/inflection/README.md b/vendor/github.com/jinzhu/inflection/README.md new file mode 100644 index 0000000..a3de336 --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/README.md @@ -0,0 +1,55 @@ +# Inflection + +Inflection pluralizes and singularizes English nouns + +[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930) + +## Basic Usage + +```go +inflection.Plural("person") => "people" +inflection.Plural("Person") => "People" +inflection.Plural("PERSON") => "PEOPLE" +inflection.Plural("bus") => "buses" +inflection.Plural("BUS") => "BUSES" +inflection.Plural("Bus") => "Buses" + +inflection.Singular("people") => "person" +inflection.Singular("People") => "Person" +inflection.Singular("PEOPLE") => "PERSON" +inflection.Singular("buses") => "bus" +inflection.Singular("BUSES") => "BUS" +inflection.Singular("Buses") => "Bus" + +inflection.Plural("FancyPerson") => "FancyPeople" +inflection.Singular("FancyPeople") => "FancyPerson" +``` + +## Register Rules + +Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) + +If you want to register more rules, follow: + +``` +inflection.AddUncountable("fish") +inflection.AddIrregular("person", "people") +inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" +inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" +``` + +## Contributing + +You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do. + +## Author + +**jinzhu** + +* +* +* + +## License + +Released under the [MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/jinzhu/inflection/inflections.go b/vendor/github.com/jinzhu/inflection/inflections.go new file mode 100644 index 0000000..606263b --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/inflections.go @@ -0,0 +1,273 @@ +/* +Package inflection pluralizes and singularizes English nouns. + + inflection.Plural("person") => "people" + inflection.Plural("Person") => "People" + inflection.Plural("PERSON") => "PEOPLE" + + inflection.Singular("people") => "person" + inflection.Singular("People") => "Person" + inflection.Singular("PEOPLE") => "PERSON" + + inflection.Plural("FancyPerson") => "FancydPeople" + inflection.Singular("FancyPeople") => "FancydPerson" + +Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) + +If you want to register more rules, follow: + + inflection.AddUncountable("fish") + inflection.AddIrregular("person", "people") + inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" + inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" +*/ +package inflection + +import ( + "regexp" + "strings" +) + +type inflection struct { + regexp *regexp.Regexp + replace string +} + +// Regular is a regexp find replace inflection +type Regular struct { + find string + replace string +} + +// Irregular is a hard replace inflection, +// containing both singular and plural forms +type Irregular struct { + singular string + plural string +} + +// RegularSlice is a slice of Regular inflections +type RegularSlice []Regular + +// IrregularSlice is a slice of Irregular inflections +type IrregularSlice []Irregular + +var pluralInflections = RegularSlice{ + {"([a-z])$", "${1}s"}, + {"s$", "s"}, + {"^(ax|test)is$", "${1}es"}, + {"(octop|vir)us$", "${1}i"}, + {"(octop|vir)i$", "${1}i"}, + {"(alias|status)$", "${1}es"}, + {"(bu)s$", "${1}ses"}, + {"(buffal|tomat)o$", "${1}oes"}, + {"([ti])um$", "${1}a"}, + {"([ti])a$", "${1}a"}, + {"sis$", "ses"}, + {"(?:([^f])fe|([lr])f)$", "${1}${2}ves"}, + {"(hive)$", "${1}s"}, + {"([^aeiouy]|qu)y$", "${1}ies"}, + {"(x|ch|ss|sh)$", "${1}es"}, + {"(matr|vert|ind)(?:ix|ex)$", "${1}ices"}, + {"^(m|l)ouse$", "${1}ice"}, + {"^(m|l)ice$", "${1}ice"}, + {"^(ox)$", "${1}en"}, + {"^(oxen)$", "${1}"}, + {"(quiz)$", "${1}zes"}, +} + +var singularInflections = RegularSlice{ + {"s$", ""}, + {"(ss)$", "${1}"}, + {"(n)ews$", "${1}ews"}, + {"([ti])a$", "${1}um"}, + {"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"}, + {"(^analy)(sis|ses)$", "${1}sis"}, + {"([^f])ves$", "${1}fe"}, + {"(hive)s$", "${1}"}, + {"(tive)s$", "${1}"}, + {"([lr])ves$", "${1}f"}, + {"([^aeiouy]|qu)ies$", "${1}y"}, + {"(s)eries$", "${1}eries"}, + {"(m)ovies$", "${1}ovie"}, + {"(c)ookies$", "${1}ookie"}, + {"(x|ch|ss|sh)es$", "${1}"}, + {"^(m|l)ice$", "${1}ouse"}, + {"(bus)(es)?$", "${1}"}, + {"(o)es$", "${1}"}, + {"(shoe)s$", "${1}"}, + {"(cris|test)(is|es)$", "${1}is"}, + {"^(a)x[ie]s$", "${1}xis"}, + {"(octop|vir)(us|i)$", "${1}us"}, + {"(alias|status)(es)?$", "${1}"}, + {"^(ox)en", "${1}"}, + {"(vert|ind)ices$", "${1}ex"}, + {"(matr)ices$", "${1}ix"}, + {"(quiz)zes$", "${1}"}, + {"(database)s$", "${1}"}, +} + +var irregularInflections = IrregularSlice{ + {"person", "people"}, + {"man", "men"}, + {"child", "children"}, + {"sex", "sexes"}, + {"move", "moves"}, + {"mombie", "mombies"}, +} + +var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"} + +var compiledPluralMaps []inflection +var compiledSingularMaps []inflection + +func compile() { + compiledPluralMaps = []inflection{} + compiledSingularMaps = []inflection{} + for _, uncountable := range uncountableInflections { + inf := inflection{ + regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"), + replace: "${1}", + } + compiledPluralMaps = append(compiledPluralMaps, inf) + compiledSingularMaps = append(compiledSingularMaps, inf) + } + + for _, value := range irregularInflections { + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)}, + inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)}, + inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural}, + } + compiledPluralMaps = append(compiledPluralMaps, infs...) + } + + for _, value := range irregularInflections { + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)}, + inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)}, + inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular}, + } + compiledSingularMaps = append(compiledSingularMaps, infs...) + } + + for i := len(pluralInflections) - 1; i >= 0; i-- { + value := pluralInflections[i] + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, + inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, + inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, + } + compiledPluralMaps = append(compiledPluralMaps, infs...) + } + + for i := len(singularInflections) - 1; i >= 0; i-- { + value := singularInflections[i] + infs := []inflection{ + inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, + inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, + inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, + } + compiledSingularMaps = append(compiledSingularMaps, infs...) + } +} + +func init() { + compile() +} + +// AddPlural adds a plural inflection +func AddPlural(find, replace string) { + pluralInflections = append(pluralInflections, Regular{find, replace}) + compile() +} + +// AddSingular adds a singular inflection +func AddSingular(find, replace string) { + singularInflections = append(singularInflections, Regular{find, replace}) + compile() +} + +// AddIrregular adds an irregular inflection +func AddIrregular(singular, plural string) { + irregularInflections = append(irregularInflections, Irregular{singular, plural}) + compile() +} + +// AddUncountable adds an uncountable inflection +func AddUncountable(values ...string) { + uncountableInflections = append(uncountableInflections, values...) + compile() +} + +// GetPlural retrieves the plural inflection values +func GetPlural() RegularSlice { + plurals := make(RegularSlice, len(pluralInflections)) + copy(plurals, pluralInflections) + return plurals +} + +// GetSingular retrieves the singular inflection values +func GetSingular() RegularSlice { + singulars := make(RegularSlice, len(singularInflections)) + copy(singulars, singularInflections) + return singulars +} + +// GetIrregular retrieves the irregular inflection values +func GetIrregular() IrregularSlice { + irregular := make(IrregularSlice, len(irregularInflections)) + copy(irregular, irregularInflections) + return irregular +} + +// GetUncountable retrieves the uncountable inflection values +func GetUncountable() []string { + uncountables := make([]string, len(uncountableInflections)) + copy(uncountables, uncountableInflections) + return uncountables +} + +// SetPlural sets the plural inflections slice +func SetPlural(inflections RegularSlice) { + pluralInflections = inflections + compile() +} + +// SetSingular sets the singular inflections slice +func SetSingular(inflections RegularSlice) { + singularInflections = inflections + compile() +} + +// SetIrregular sets the irregular inflections slice +func SetIrregular(inflections IrregularSlice) { + irregularInflections = inflections + compile() +} + +// SetUncountable sets the uncountable inflections slice +func SetUncountable(inflections []string) { + uncountableInflections = inflections + compile() +} + +// Plural converts a word to its plural form +func Plural(str string) string { + for _, inflection := range compiledPluralMaps { + if inflection.regexp.MatchString(str) { + return inflection.regexp.ReplaceAllString(str, inflection.replace) + } + } + return str +} + +// Singular converts a word to its singular form +func Singular(str string) string { + for _, inflection := range compiledSingularMaps { + if inflection.regexp.MatchString(str) { + return inflection.regexp.ReplaceAllString(str, inflection.replace) + } + } + return str +} diff --git a/vendor/github.com/jinzhu/inflection/inflections_test.go b/vendor/github.com/jinzhu/inflection/inflections_test.go new file mode 100644 index 0000000..689e1df --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/inflections_test.go @@ -0,0 +1,213 @@ +package inflection + +import ( + "strings" + "testing" +) + +var inflections = map[string]string{ + "star": "stars", + "STAR": "STARS", + "Star": "Stars", + "bus": "buses", + "fish": "fish", + "mouse": "mice", + "query": "queries", + "ability": "abilities", + "agency": "agencies", + "movie": "movies", + "archive": "archives", + "index": "indices", + "wife": "wives", + "safe": "saves", + "half": "halves", + "move": "moves", + "salesperson": "salespeople", + "person": "people", + "spokesman": "spokesmen", + "man": "men", + "woman": "women", + "basis": "bases", + "diagnosis": "diagnoses", + "diagnosis_a": "diagnosis_as", + "datum": "data", + "medium": "media", + "stadium": "stadia", + "analysis": "analyses", + "node_child": "node_children", + "child": "children", + "experience": "experiences", + "day": "days", + "comment": "comments", + "foobar": "foobars", + "newsletter": "newsletters", + "old_news": "old_news", + "news": "news", + "series": "series", + "species": "species", + "quiz": "quizzes", + "perspective": "perspectives", + "ox": "oxen", + "photo": "photos", + "buffalo": "buffaloes", + "tomato": "tomatoes", + "dwarf": "dwarves", + "elf": "elves", + "information": "information", + "equipment": "equipment", + "criterion": "criteria", +} + +// storage is used to restore the state of the global variables +// on each test execution, to ensure no global state pollution +type storage struct { + singulars RegularSlice + plurals RegularSlice + irregulars IrregularSlice + uncountables []string +} + +var backup = storage{} + +func init() { + AddIrregular("criterion", "criteria") + copy(backup.singulars, singularInflections) + copy(backup.plurals, pluralInflections) + copy(backup.irregulars, irregularInflections) + copy(backup.uncountables, uncountableInflections) +} + +func restore() { + copy(singularInflections, backup.singulars) + copy(pluralInflections, backup.plurals) + copy(irregularInflections, backup.irregulars) + copy(uncountableInflections, backup.uncountables) +} + +func TestPlural(t *testing.T) { + for key, value := range inflections { + if v := Plural(strings.ToUpper(key)); v != strings.ToUpper(value) { + t.Errorf("%v's plural should be %v, but got %v", strings.ToUpper(key), strings.ToUpper(value), v) + } + + if v := Plural(strings.Title(key)); v != strings.Title(value) { + t.Errorf("%v's plural should be %v, but got %v", strings.Title(key), strings.Title(value), v) + } + + if v := Plural(key); v != value { + t.Errorf("%v's plural should be %v, but got %v", key, value, v) + } + } +} + +func TestSingular(t *testing.T) { + for key, value := range inflections { + if v := Singular(strings.ToUpper(value)); v != strings.ToUpper(key) { + t.Errorf("%v's singular should be %v, but got %v", strings.ToUpper(value), strings.ToUpper(key), v) + } + + if v := Singular(strings.Title(value)); v != strings.Title(key) { + t.Errorf("%v's singular should be %v, but got %v", strings.Title(value), strings.Title(key), v) + } + + if v := Singular(value); v != key { + t.Errorf("%v's singular should be %v, but got %v", value, key, v) + } + } +} + +func TestAddPlural(t *testing.T) { + defer restore() + ln := len(pluralInflections) + AddPlural("", "") + if ln+1 != len(pluralInflections) { + t.Errorf("Expected len %d, got %d", ln+1, len(pluralInflections)) + } +} + +func TestAddSingular(t *testing.T) { + defer restore() + ln := len(singularInflections) + AddSingular("", "") + if ln+1 != len(singularInflections) { + t.Errorf("Expected len %d, got %d", ln+1, len(singularInflections)) + } +} + +func TestAddIrregular(t *testing.T) { + defer restore() + ln := len(irregularInflections) + AddIrregular("", "") + if ln+1 != len(irregularInflections) { + t.Errorf("Expected len %d, got %d", ln+1, len(irregularInflections)) + } +} + +func TestAddUncountable(t *testing.T) { + defer restore() + ln := len(uncountableInflections) + AddUncountable("", "") + if ln+2 != len(uncountableInflections) { + t.Errorf("Expected len %d, got %d", ln+2, len(uncountableInflections)) + } +} + +func TestGetPlural(t *testing.T) { + plurals := GetPlural() + if len(plurals) != len(pluralInflections) { + t.Errorf("Expected len %d, got %d", len(plurals), len(pluralInflections)) + } +} + +func TestGetSingular(t *testing.T) { + singular := GetSingular() + if len(singular) != len(singularInflections) { + t.Errorf("Expected len %d, got %d", len(singular), len(singularInflections)) + } +} + +func TestGetIrregular(t *testing.T) { + irregular := GetIrregular() + if len(irregular) != len(irregularInflections) { + t.Errorf("Expected len %d, got %d", len(irregular), len(irregularInflections)) + } +} + +func TestGetUncountable(t *testing.T) { + uncountables := GetUncountable() + if len(uncountables) != len(uncountableInflections) { + t.Errorf("Expected len %d, got %d", len(uncountables), len(uncountableInflections)) + } +} + +func TestSetPlural(t *testing.T) { + defer restore() + SetPlural(RegularSlice{{}, {}}) + if len(pluralInflections) != 2 { + t.Errorf("Expected len 2, got %d", len(pluralInflections)) + } +} + +func TestSetSingular(t *testing.T) { + defer restore() + SetSingular(RegularSlice{{}, {}}) + if len(singularInflections) != 2 { + t.Errorf("Expected len 2, got %d", len(singularInflections)) + } +} + +func TestSetIrregular(t *testing.T) { + defer restore() + SetIrregular(IrregularSlice{{}, {}}) + if len(irregularInflections) != 2 { + t.Errorf("Expected len 2, got %d", len(irregularInflections)) + } +} + +func TestSetUncountable(t *testing.T) { + defer restore() + SetUncountable([]string{"", ""}) + if len(uncountableInflections) != 2 { + t.Errorf("Expected len 2, got %d", len(uncountableInflections)) + } +} diff --git a/vendor/github.com/jinzhu/inflection/wercker.yml b/vendor/github.com/jinzhu/inflection/wercker.yml new file mode 100644 index 0000000..5e6ce98 --- /dev/null +++ b/vendor/github.com/jinzhu/inflection/wercker.yml @@ -0,0 +1,23 @@ +box: golang + +build: + steps: + - setup-go-workspace + + # Gets the dependencies + - script: + name: go get + code: | + go get + + # Build the project + - script: + name: go build + code: | + go build ./... + + # Test the project + - script: + name: go test + code: | + go test ./... diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/compress/.travis.yml b/vendor/github.com/klauspost/compress/.travis.yml new file mode 100644 index 0000000..182d38a --- /dev/null +++ b/vendor/github.com/klauspost/compress/.travis.yml @@ -0,0 +1,24 @@ +language: go + +sudo: false + +os: + - linux + - osx + +go: + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip + +install: + - go get -t ./... + +script: + - diff <(gofmt -d .) <(printf "") + - go test -v -cpu=2 ./... + - go test -cpu=2 -tags=noasm ./... + - go test -cpu=1,2,4 -short -race ./... + - go test -cpu=2,4 -short -race -tags=noasm ./... diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 0000000..25c8e2b --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,289 @@ +# compress + +This package is based on an optimized Deflate function, which is used by gzip/zip/zlib packages. + +It offers slightly better compression at lower compression settings, and up to 3x faster encoding at highest compression level. + +* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). +* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). +* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). +* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) + +[![Build Status](https://travis-ci.org/klauspost/compress.svg?branch=master)](https://travis-ci.org/klauspost/compress) + +# changelog +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625(https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +# usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | +|--------------------|-----------------------------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | +| `compress/zlib` | `github.com/klauspost/compress/zlib` | +| `archive/zip` | `github.com/klauspost/compress/zip` | +| `compress/deflate` | `github.com/klauspost/compress/deflate` | + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +# deflate optimizations + +* Minimum matches are 4 bytes, this leads to fewer searches and better compression. (In Go 1.7) +* Stronger hash (iSCSI CRC32) for matches on x64 with SSE 4.2 support. This leads to fewer hash collisions. (Go 1.7 also has improved hashes) +* Literal byte matching using SSE 4.2 for faster match comparisons. (not in Go) +* Bulk hashing on matches. (In Go 1.7) +* Much faster dictionary indexing with `NewWriterDict()`/`Reset()`. (In Go 1.7) +* Make Bit Coder faster by assuming we are on a 64 bit CPU. (In Go 1.7) +* Level 1 compression replaced by converted "Snappy" algorithm. (In Go 1.7) +* Uncompressible content is detected and skipped faster. (Only in BestSpeed in Go) +* A lot of branching eliminated by having two encoders for levels 4-6 and 7-9. (not in Go) +* All heap memory allocations eliminated. (In Go 1.7) + +``` +benchmark old ns/op new ns/op delta +BenchmarkEncodeDigitsSpeed1e4-4 554029 265175 -52.14% +BenchmarkEncodeDigitsSpeed1e5-4 3908558 2416595 -38.17% +BenchmarkEncodeDigitsSpeed1e6-4 37546692 24875330 -33.75% +BenchmarkEncodeDigitsDefault1e4-4 781510 486322 -37.77% +BenchmarkEncodeDigitsDefault1e5-4 15530248 6740175 -56.60% +BenchmarkEncodeDigitsDefault1e6-4 174915710 76498625 -56.27% +BenchmarkEncodeDigitsCompress1e4-4 769995 485652 -36.93% +BenchmarkEncodeDigitsCompress1e5-4 15450113 6929589 -55.15% +BenchmarkEncodeDigitsCompress1e6-4 175114660 73348495 -58.11% +BenchmarkEncodeTwainSpeed1e4-4 560122 275977 -50.73% +BenchmarkEncodeTwainSpeed1e5-4 3740978 2506095 -33.01% +BenchmarkEncodeTwainSpeed1e6-4 35542802 21904440 -38.37% +BenchmarkEncodeTwainDefault1e4-4 828534 549026 -33.74% +BenchmarkEncodeTwainDefault1e5-4 13667153 7528455 -44.92% +BenchmarkEncodeTwainDefault1e6-4 141191770 79952170 -43.37% +BenchmarkEncodeTwainCompress1e4-4 830050 545694 -34.26% +BenchmarkEncodeTwainCompress1e5-4 16620852 8460600 -49.10% +BenchmarkEncodeTwainCompress1e6-4 193326820 90808750 -53.03% + +benchmark old MB/s new MB/s speedup +BenchmarkEncodeDigitsSpeed1e4-4 18.05 37.71 2.09x +BenchmarkEncodeDigitsSpeed1e5-4 25.58 41.38 1.62x +BenchmarkEncodeDigitsSpeed1e6-4 26.63 40.20 1.51x +BenchmarkEncodeDigitsDefault1e4-4 12.80 20.56 1.61x +BenchmarkEncodeDigitsDefault1e5-4 6.44 14.84 2.30x +BenchmarkEncodeDigitsDefault1e6-4 5.72 13.07 2.28x +BenchmarkEncodeDigitsCompress1e4-4 12.99 20.59 1.59x +BenchmarkEncodeDigitsCompress1e5-4 6.47 14.43 2.23x +BenchmarkEncodeDigitsCompress1e6-4 5.71 13.63 2.39x +BenchmarkEncodeTwainSpeed1e4-4 17.85 36.23 2.03x +BenchmarkEncodeTwainSpeed1e5-4 26.73 39.90 1.49x +BenchmarkEncodeTwainSpeed1e6-4 28.14 45.65 1.62x +BenchmarkEncodeTwainDefault1e4-4 12.07 18.21 1.51x +BenchmarkEncodeTwainDefault1e5-4 7.32 13.28 1.81x +BenchmarkEncodeTwainDefault1e6-4 7.08 12.51 1.77x +BenchmarkEncodeTwainCompress1e4-4 12.05 18.33 1.52x +BenchmarkEncodeTwainCompress1e5-4 6.02 11.82 1.96x +BenchmarkEncodeTwainCompress1e6-4 5.17 11.01 2.13x +``` +* "Speed" is compression level 1 +* "Default" is compression level 6 +* "Compress" is compression level 9 +* Test files are [Digits](https://github.com/klauspost/compress/blob/master/testdata/e.txt) (no matches) and [Twain](https://github.com/klauspost/compress/blob/master/testdata/Mark.Twain-Tom.Sawyer.txt) (plain text) . + +As can be seen it shows a very good speedup all across the line. + +`Twain` is a much more realistic benchmark, and will be closer to JSON/HTML performance. Here speed is equivalent or faster, up to 2 times. + +**Without assembly**. This is what you can expect on systems that does not have amd64 and SSE 4: +``` +benchmark old ns/op new ns/op delta +BenchmarkEncodeDigitsSpeed1e4-4 554029 249558 -54.96% +BenchmarkEncodeDigitsSpeed1e5-4 3908558 2295216 -41.28% +BenchmarkEncodeDigitsSpeed1e6-4 37546692 22594905 -39.82% +BenchmarkEncodeDigitsDefault1e4-4 781510 579850 -25.80% +BenchmarkEncodeDigitsDefault1e5-4 15530248 10096561 -34.99% +BenchmarkEncodeDigitsDefault1e6-4 174915710 111470780 -36.27% +BenchmarkEncodeDigitsCompress1e4-4 769995 579708 -24.71% +BenchmarkEncodeDigitsCompress1e5-4 15450113 10266373 -33.55% +BenchmarkEncodeDigitsCompress1e6-4 175114660 110170120 -37.09% +BenchmarkEncodeTwainSpeed1e4-4 560122 260679 -53.46% +BenchmarkEncodeTwainSpeed1e5-4 3740978 2097372 -43.94% +BenchmarkEncodeTwainSpeed1e6-4 35542802 20353449 -42.74% +BenchmarkEncodeTwainDefault1e4-4 828534 646016 -22.03% +BenchmarkEncodeTwainDefault1e5-4 13667153 10056369 -26.42% +BenchmarkEncodeTwainDefault1e6-4 141191770 105268770 -25.44% +BenchmarkEncodeTwainCompress1e4-4 830050 642401 -22.61% +BenchmarkEncodeTwainCompress1e5-4 16620852 11157081 -32.87% +BenchmarkEncodeTwainCompress1e6-4 193326820 121780770 -37.01% + +benchmark old MB/s new MB/s speedup +BenchmarkEncodeDigitsSpeed1e4-4 18.05 40.07 2.22x +BenchmarkEncodeDigitsSpeed1e5-4 25.58 43.57 1.70x +BenchmarkEncodeDigitsSpeed1e6-4 26.63 44.26 1.66x +BenchmarkEncodeDigitsDefault1e4-4 12.80 17.25 1.35x +BenchmarkEncodeDigitsDefault1e5-4 6.44 9.90 1.54x +BenchmarkEncodeDigitsDefault1e6-4 5.72 8.97 1.57x +BenchmarkEncodeDigitsCompress1e4-4 12.99 17.25 1.33x +BenchmarkEncodeDigitsCompress1e5-4 6.47 9.74 1.51x +BenchmarkEncodeDigitsCompress1e6-4 5.71 9.08 1.59x +BenchmarkEncodeTwainSpeed1e4-4 17.85 38.36 2.15x +BenchmarkEncodeTwainSpeed1e5-4 26.73 47.68 1.78x +BenchmarkEncodeTwainSpeed1e6-4 28.14 49.13 1.75x +BenchmarkEncodeTwainDefault1e4-4 12.07 15.48 1.28x +BenchmarkEncodeTwainDefault1e5-4 7.32 9.94 1.36x +BenchmarkEncodeTwainDefault1e6-4 7.08 9.50 1.34x +BenchmarkEncodeTwainCompress1e4-4 12.05 15.57 1.29x +BenchmarkEncodeTwainCompress1e5-4 6.02 8.96 1.49x +BenchmarkEncodeTwainCompress1e6-4 5.17 8.21 1.59x +``` +So even without the assembly optimizations there is a general speedup across the board. + +## level 1-3 "snappy" compression + +Levels 1 "Best Speed", 2 and 3 are completely replaced by a converted version of the algorithm found in Snappy, modified to be fully +compatible with the deflate bitstream (and thus still compatible with all existing zlib/gzip libraries and tools). +This version is considerably faster than the "old" deflate at level 1. It does however come at a compression loss, usually in the order of 3-4% compared to the old level 1. However, the speed is usually 1.75 times that of the fastest deflate mode. + +In my previous experiments the most common case for "level 1" was that it provided no significant speedup, only lower compression compared to level 2 and sometimes even 3. However, the modified Snappy algorithm provides a very good sweet spot. Usually about 75% faster and with only little compression loss. Therefore I decided to *replace* level 1 with this mode entirely. + +Input is split into blocks of 64kb of, and they are encoded independently (no backreferences across blocks) for the best speed. Contrary to Snappy the output is entropy-encoded, so you will almost always see better compression than Snappy. But Snappy is still about twice as fast as Snappy in deflate mode. + +Level 2 and 3 have also been replaced. Level 2 is capable is matching between blocks and level 3 checks up to two hashes for matches it will try. + +## compression levels + +This table shows the compression at each level, and the percentage of the output size compared to output +at the similar level with the standard library. Compression data is `Twain`, see above. + +(Not up-to-date after rebalancing) + +| Level | Bytes | % size | +|-------|--------|--------| +| 1 | 194622 | 103.7% | +| 2 | 174684 | 96.85% | +| 3 | 170301 | 98.45% | +| 4 | 165253 | 97.69% | +| 5 | 161274 | 98.65% | +| 6 | 160464 | 99.71% | +| 7 | 160304 | 99.87% | +| 8 | 160279 | 99.99% | +| 9 | 160279 | 99.99% | + +To interpret and example, this version of deflate compresses input of 407287 bytes to 161274 bytes at level 5, which is 98.6% of the size of what the standard library produces; 161274 bytes. + +This means that from level 4 you can expect a compression level increase of a few percent. Level 1 is about 3% worse, as descibed above. + +# linear time compression (huffman only) + +This compression library adds a special compression level, named `ConstantCompression`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression raio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + + +# gzip/zip optimizations + * Uses the faster deflate + * Uses SSE 4.2 CRC32 calculations. + +Speed increase is up to 3x of the standard library, but usually around 2x. + +This is close to a real world benchmark as you will get. A 2.3MB JSON file. (NOTE: not up-to-date) + +``` +benchmark old ns/op new ns/op delta +BenchmarkGzipL1-4 95212470 59938275 -37.05% +BenchmarkGzipL2-4 102069730 76349195 -25.20% +BenchmarkGzipL3-4 115472770 82492215 -28.56% +BenchmarkGzipL4-4 153197780 107570890 -29.78% +BenchmarkGzipL5-4 203930260 134387930 -34.10% +BenchmarkGzipL6-4 233172100 145495400 -37.60% +BenchmarkGzipL7-4 297190260 197926950 -33.40% +BenchmarkGzipL8-4 512819750 376244733 -26.63% +BenchmarkGzipL9-4 563366800 403266833 -28.42% + +benchmark old MB/s new MB/s speedup +BenchmarkGzipL1-4 52.11 82.78 1.59x +BenchmarkGzipL2-4 48.61 64.99 1.34x +BenchmarkGzipL3-4 42.97 60.15 1.40x +BenchmarkGzipL4-4 32.39 46.13 1.42x +BenchmarkGzipL5-4 24.33 36.92 1.52x +BenchmarkGzipL6-4 21.28 34.10 1.60x +BenchmarkGzipL7-4 16.70 25.07 1.50x +BenchmarkGzipL8-4 9.68 13.19 1.36x +BenchmarkGzipL9-4 8.81 12.30 1.40x +``` + +Multithreaded compression using [pgzip](https://github.com/klauspost/pgzip) comparison, Quadcore, CPU = 8: + +(Not updated, old numbers) + +``` +benchmark old ns/op new ns/op delta +BenchmarkGzipL1 96155500 25981486 -72.98% +BenchmarkGzipL2 101905830 24601408 -75.86% +BenchmarkGzipL3 113506490 26321506 -76.81% +BenchmarkGzipL4 143708220 31761818 -77.90% +BenchmarkGzipL5 188210770 39602266 -78.96% +BenchmarkGzipL6 209812000 40402313 -80.74% +BenchmarkGzipL7 270015440 56103210 -79.22% +BenchmarkGzipL8 461359700 91255220 -80.22% +BenchmarkGzipL9 498361833 88755075 -82.19% + +benchmark old MB/s new MB/s speedup +BenchmarkGzipL1 51.60 190.97 3.70x +BenchmarkGzipL2 48.69 201.69 4.14x +BenchmarkGzipL3 43.71 188.51 4.31x +BenchmarkGzipL4 34.53 156.22 4.52x +BenchmarkGzipL5 26.36 125.29 4.75x +BenchmarkGzipL6 23.65 122.81 5.19x +BenchmarkGzipL7 18.38 88.44 4.81x +BenchmarkGzipL8 10.75 54.37 5.06x +BenchmarkGzipL9 9.96 55.90 5.61x +``` + +# snappy package + +The standard snappy package has now been improved. This repo contains a copy of the snappy repo. + +I would advise to use the standard package: https://github.com/golang/snappy + + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/flate/asm_test.go b/vendor/github.com/klauspost/compress/flate/asm_test.go new file mode 100644 index 0000000..40bf210 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/asm_test.go @@ -0,0 +1,193 @@ +// Copyright 2015, Klaus Post, see LICENSE for details. + +//+build amd64 + +package flate + +import ( + "math/rand" + "testing" +) + +func TestCRC(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping CRC test, no SSE 4.2 available") + } + for _, x := range deflateTests { + y := x.out + if len(y) >= minMatchLength { + t.Logf("In: %v, Out:0x%08x", y[0:minMatchLength], crc32sse(y[0:minMatchLength])) + } + } +} + +func TestCRCBulk(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping CRC test, no SSE 4.2 available") + } + for _, x := range deflateTests { + y := x.out + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + if !testing.Short() { + y = append(y, y...) + y = append(y, y...) + } + y = append(y, 1) + if len(y) >= minMatchLength { + for j := len(y) - 1; j >= 4; j-- { + + // Create copy, so we easier detect of-of-bound reads + test := make([]byte, j) + test2 := make([]byte, j) + copy(test, y[:j]) + copy(test2, y[:j]) + + // We allocate one more than we need to test for unintentional overwrites + dst := make([]uint32, j-3+1) + ref := make([]uint32, j-3+1) + for i := range dst { + dst[i] = uint32(i + 100) + ref[i] = uint32(i + 101) + } + // Last entry must NOT be overwritten. + dst[j-3] = 0x1234 + ref[j-3] = 0x1234 + + // Do two encodes we can compare + crc32sseAll(test, dst) + crc32sseAll(test2, ref) + + // Check all values + for i, got := range dst { + if i == j-3 { + if dst[i] != 0x1234 { + t.Fatalf("end of expected dst overwritten, was %08x", uint32(dst[i])) + } + continue + } + expect := crc32sse(y[i : i+4]) + if got != expect && got == uint32(i)+100 { + t.Errorf("Len:%d Index:%d, expected 0x%08x but not modified", len(y), i, uint32(expect)) + } else if got != expect { + t.Errorf("Len:%d Index:%d, got 0x%08x expected:0x%08x", len(y), i, uint32(got), uint32(expect)) + } + expect = ref[i] + if got != expect { + t.Errorf("Len:%d Index:%d, got 0x%08x expected:0x%08x", len(y), i, got, expect) + } + } + } + } + } +} + +func TestMatchLen(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping Matchlen test, no SSE 4.2 available") + } + // Maximum length tested + var maxLen = 512 + + // Skips per iteration + is, js, ks := 3, 2, 1 + if testing.Short() { + is, js, ks = 7, 5, 3 + } + + a := make([]byte, maxLen) + b := make([]byte, maxLen) + bb := make([]byte, maxLen) + rand.Seed(1) + for i := range a { + a[i] = byte(rand.Int63()) + b[i] = byte(rand.Int63()) + } + + // Test different lengths + for i := 0; i < maxLen; i += is { + // Test different dst offsets. + for j := 0; j < maxLen-1; j += js { + copy(bb, b) + // Test different src offsets + for k := i - 1; k >= 0; k -= ks { + copy(bb[j:], a[k:i]) + maxTest := maxLen - j + if maxTest > maxLen-k { + maxTest = maxLen - k + } + got := matchLenSSE4(a[k:], bb[j:], maxTest) + expect := matchLenReference(a[k:], bb[j:], maxTest) + if got > maxTest || got < 0 { + t.Fatalf("unexpected result %d (len:%d, src offset: %d, dst offset:%d)", got, maxTest, k, j) + } + if got != expect { + t.Fatalf("Mismatch, expected %d, got %d", expect, got) + } + } + } + } +} + +// matchLenReference is a reference matcher. +func matchLenReference(a, b []byte, max int) int { + for i := 0; i < max; i++ { + if a[i] != b[i] { + return i + } + } + return max +} + +func TestHistogram(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping Matchlen test, no SSE 4.2 available") + } + // Maximum length tested + const maxLen = 65536 + var maxOff = 8 + + // Skips per iteration + is, js := 5, 3 + if testing.Short() { + is, js = 9, 1 + maxOff = 1 + } + + a := make([]byte, maxLen+maxOff) + rand.Seed(1) + for i := range a { + a[i] = byte(rand.Int63()) + } + + // Test different lengths + for i := 0; i <= maxLen; i += is { + // Test different offsets + for j := 0; j < maxOff; j += js { + var got [256]int32 + var reference [256]int32 + + histogram(a[j:i+j], got[:]) + histogramReference(a[j:i+j], reference[:]) + for k := range got { + if got[k] != reference[k] { + t.Fatalf("mismatch at len:%d, offset:%d, value %d: (got) %d != %d (expected)", i, j, k, got[k], reference[k]) + } + } + } + } +} + +// histogramReference is a reference +func histogramReference(b []byte, h []int32) { + if len(h) < 256 { + panic("Histogram too small") + } + for _, t := range b { + h[t]++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/copy.go b/vendor/github.com/klauspost/compress/flate/copy.go new file mode 100644 index 0000000..a3200a8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/copy.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// forwardCopy is like the built-in copy function except that it always goes +// forward from the start, even if the dst and src overlap. +// It is equivalent to: +// for i := 0; i < n; i++ { +// mem[dst+i] = mem[src+i] +// } +func forwardCopy(mem []byte, dst, src, n int) { + if dst <= src { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + for { + if dst >= src+n { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + // There is some forward overlap. The destination + // will be filled with a repeated pattern of mem[src:src+k]. + // We copy one instance of the pattern here, then repeat. + // Each time around this loop k will double. + k := dst - src + copy(mem[dst:dst+k], mem[src:src+k]) + n -= k + dst += k + } +} diff --git a/vendor/github.com/klauspost/compress/flate/copy_test.go b/vendor/github.com/klauspost/compress/flate/copy_test.go new file mode 100644 index 0000000..2011b15 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/copy_test.go @@ -0,0 +1,54 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "testing" +) + +func TestForwardCopy(t *testing.T) { + testCases := []struct { + dst0, dst1 int + src0, src1 int + want string + }{ + {0, 9, 0, 9, "012345678"}, + {0, 5, 4, 9, "45678"}, + {4, 9, 0, 5, "01230"}, + {1, 6, 3, 8, "34567"}, + {3, 8, 1, 6, "12121"}, + {0, 9, 3, 6, "345"}, + {3, 6, 0, 9, "012"}, + {1, 6, 0, 9, "00000"}, + {0, 4, 7, 8, "7"}, + {0, 1, 6, 8, "6"}, + {4, 4, 6, 9, ""}, + {2, 8, 6, 6, ""}, + {0, 0, 0, 0, ""}, + } + for _, tc := range testCases { + b := []byte("0123456789") + n := tc.dst1 - tc.dst0 + if tc.src1-tc.src0 < n { + n = tc.src1 - tc.src0 + } + forwardCopy(b, tc.dst0, tc.src0, n) + got := string(b[tc.dst0 : tc.dst0+n]) + if got != tc.want { + t.Errorf("dst=b[%d:%d], src=b[%d:%d]: got %q, want %q", + tc.dst0, tc.dst1, tc.src0, tc.src1, got, tc.want) + } + // Check that the bytes outside of dst[:n] were not modified. + for i, x := range b { + if i >= tc.dst0 && i < tc.dst0+n { + continue + } + if int(x) != '0'+i { + t.Errorf("dst=b[%d:%d], src=b[%d:%d]: copy overrun at b[%d]: got '%c', want '%c'", + tc.dst0, tc.dst1, tc.src0, tc.src1, i, x, '0'+i) + } + } + } +} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go new file mode 100644 index 0000000..70a6095 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go @@ -0,0 +1,41 @@ +//+build !noasm +//+build !appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +import ( + "github.com/klauspost/cpuid" +) + +// crc32sse returns a hash for the first 4 bytes of the slice +// len(a) must be >= 4. +//go:noescape +func crc32sse(a []byte) uint32 + +// crc32sseAll calculates hashes for each 4-byte set in a. +// dst must be east len(a) - 4 in size. +// The size is not checked by the assembly. +//go:noescape +func crc32sseAll(a []byte, dst []uint32) + +// matchLenSSE4 returns the number of matching bytes in a and b +// up to length 'max'. Both slices must be at least 'max' +// bytes in size. +// +// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions. +// +//go:noescape +func matchLenSSE4(a, b []byte, max int) int + +// histogram accumulates a histogram of b in h. +// h must be at least 256 entries in length, +// and must be cleared before calling this function. +//go:noescape +func histogram(b []byte, h []int32) + +// Detect SSE 4.2 feature. +func init() { + useSSE42 = cpuid.CPU.SSE42() +} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s new file mode 100644 index 0000000..2fb2079 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s @@ -0,0 +1,213 @@ +//+build !noasm +//+build !appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +// func crc32sse(a []byte) uint32 +TEXT ·crc32sse(SB), 4, $0 + MOVQ a+0(FP), R10 + XORQ BX, BX + + // CRC32 dword (R10), EBX + BYTE $0xF2; BYTE $0x41; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0x1a + + MOVL BX, ret+24(FP) + RET + +// func crc32sseAll(a []byte, dst []uint32) +TEXT ·crc32sseAll(SB), 4, $0 + MOVQ a+0(FP), R8 // R8: src + MOVQ a_len+8(FP), R10 // input length + MOVQ dst+24(FP), R9 // R9: dst + SUBQ $4, R10 + JS end + JZ one_crc + MOVQ R10, R13 + SHRQ $2, R10 // len/4 + ANDQ $3, R13 // len&3 + XORQ BX, BX + ADDQ $1, R13 + TESTQ R10, R10 + JZ rem_loop + +crc_loop: + MOVQ (R8), R11 + XORQ BX, BX + XORQ DX, DX + XORQ DI, DI + MOVQ R11, R12 + SHRQ $8, R11 + MOVQ R12, AX + MOVQ R11, CX + SHRQ $16, R12 + SHRQ $16, R11 + MOVQ R12, SI + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + // CRC32 ECX, EDX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd1 + + // CRC32 ESI, EDI + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xfe + MOVL BX, (R9) + MOVL DX, 4(R9) + MOVL DI, 8(R9) + + XORQ BX, BX + MOVL R11, AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + MOVL BX, 12(R9) + + ADDQ $16, R9 + ADDQ $4, R8 + XORQ BX, BX + SUBQ $1, R10 + JNZ crc_loop + +rem_loop: + MOVL (R8), AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + MOVL BX, (R9) + ADDQ $4, R9 + ADDQ $1, R8 + XORQ BX, BX + SUBQ $1, R13 + JNZ rem_loop + +end: + RET + +one_crc: + MOVQ $1, R13 + XORQ BX, BX + JMP rem_loop + +// func matchLenSSE4(a, b []byte, max int) int +TEXT ·matchLenSSE4(SB), 4, $0 + MOVQ a_base+0(FP), SI + MOVQ b_base+24(FP), DI + MOVQ DI, DX + MOVQ max+48(FP), CX + +cmp8: + // As long as we are 8 or more bytes before the end of max, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ CX, $8 + JLT cmp1 + MOVQ (SI), AX + MOVQ (DI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, SI + ADDQ $8, DI + SUBQ $8, CX + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, DI + + // Subtract off &b[0] to convert from &b[ret] to ret, and return. + SUBQ DX, DI + MOVQ DI, ret+56(FP) + RET + +cmp1: + // In the slices' tail, compare 1 byte at a time. + CMPQ CX, $0 + JEQ matchLenEnd + MOVB (SI), AX + MOVB (DI), BX + CMPB AX, BX + JNE matchLenEnd + ADDQ $1, SI + ADDQ $1, DI + SUBQ $1, CX + JMP cmp1 + +matchLenEnd: + // Subtract off &b[0] to convert from &b[ret] to ret, and return. + SUBQ DX, DI + MOVQ DI, ret+56(FP) + RET + +// func histogram(b []byte, h []int32) +TEXT ·histogram(SB), 4, $0 + MOVQ b+0(FP), SI // SI: &b + MOVQ b_len+8(FP), R9 // R9: len(b) + MOVQ h+24(FP), DI // DI: Histogram + MOVQ R9, R8 + SHRQ $3, R8 + JZ hist1 + XORQ R11, R11 + +loop_hist8: + MOVQ (SI), R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + INCL (DI)(R10*4) + + ADDQ $8, SI + DECQ R8 + JNZ loop_hist8 + +hist1: + ANDQ $7, R9 + JZ end_hist + XORQ R10, R10 + +loop_hist1: + MOVB (SI), R10 + INCL (DI)(R10*4) + INCQ SI + DECQ R9 + JNZ loop_hist1 + +end_hist: + RET diff --git a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go new file mode 100644 index 0000000..bd98bd5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go @@ -0,0 +1,35 @@ +//+build !amd64 noasm appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +func init() { + useSSE42 = false +} + +// crc32sse should never be called. +func crc32sse(a []byte) uint32 { + panic("no assembler") +} + +// crc32sseAll should never be called. +func crc32sseAll(a []byte, dst []uint32) { + panic("no assembler") +} + +// matchLenSSE4 should never be called. +func matchLenSSE4(a, b []byte, max int) int { + panic("no assembler") + return 0 +} + +// histogram accumulates a histogram of b in h. +// +// len(h) must be >= 256, and h's elements must be all zeroes. +func histogram(b []byte, h []int32) { + h = h[:256] + for _, t := range b { + h[t]++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go new file mode 100644 index 0000000..9e6e7ff --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -0,0 +1,1353 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = -1 + + // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman + // entropy encoding. This mode is useful in compressing data that has + // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) + // that lacks an entropy encoder. Compression gains are achieved when + // certain bytes in the input stream occur more frequently than others. + // + // Note that HuffmanOnly produces a compressed output that is + // RFC 1951 compliant. That is, any valid DEFLATE decompressor will + // continue to be able to decompress this output. + HuffmanOnly = -2 + ConstantCompression = HuffmanOnly // compatibility alias. + + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we put into a single flat block, just too + // stop things from getting too large. + maxFlateBlockTokens = 1 << 14 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 24 + + skipNever = math.MaxInt32 +) + +var useSSE42 bool + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +// Compression levels have been rebalanced from zlib deflate defaults +// to give a bigger spread in speed and compression. +// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ +var levels = []compressionLevel{ + {}, // 0 + // Level 1-4 uses specialized algorithm - values not used + {0, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 2}, + {0, 0, 0, 0, 0, 3}, + {0, 0, 0, 0, 0, 4}, + // For levels 5-6 we don't bother trying with lazy matches. + // Lazy matching is at least 30% slower, with 1.5% increase. + {6, 0, 12, 8, 12, 5}, + {8, 0, 24, 16, 16, 6}, + // Levels 7-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {8, 8, 24, 16, skipNever, 7}, + {10, 16, 24, 64, skipNever, 8}, + {32, 258, 258, 4096, skipNever, 9}, +} + +type compressor struct { + compressionLevel + + w *huffmanBitWriter + bulkHasher func([]byte, []uint32) + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + sync bool // requesting flush + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + chainHead int + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 + hashOffset int + + // input window: unprocessed data is window[index:windowEnd] + index int + window []byte + windowEnd int + blockStart int // window index where current tokens start + byteAvailable bool // if true, still need to process window[index-1]. + + // queued output tokens + tokens tokens + + // deflate state + length int + offset int + hash uint32 + maxInsertIndex int + err error + ii uint16 // position of last match, intended to overflow to reset. + + snap snappyEnc + hashMatch [maxMatchLength + minMatchLength]uint32 +} + +func (d *compressor) fillDeflate(b []byte) int { + if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + copy(d.window[:], d.window[windowSize:2*windowSize]) + d.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + d.hashOffset += windowSize + if d.hashOffset > maxHashOffset { + delta := d.hashOffset - 1 + d.hashOffset -= delta + d.chainHead -= delta + // Iterate over slices instead of arrays to avoid copying + // the entire table onto the stack (Issue #18625). + for i, v := range d.hashPrev[:] { + if int(v) > delta { + d.hashPrev[i] = uint32(int(v) - delta) + } else { + d.hashPrev[i] = 0 + } + } + for i, v := range d.hashHead[:] { + if int(v) > delta { + d.hashHead[i] = uint32(int(v) - delta) + } else { + d.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + d.w.writeBlock(tok.tokens[:tok.n], eof, window) + return d.w.err + } + return nil +} + +// writeBlockSkip writes the current block and uses the number of tokens +// to determine if the block should be stored on no matches, or +// only huffman encoded. +func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { + if index > 0 || eof { + if d.blockStart <= index { + window := d.window[d.blockStart:index] + // If we removed less than a 64th of all literals + // we huffman compress the block. + if int(tok.n) > len(window)-int(tok.n>>6) { + d.w.writeBlockHuff(eof, window) + } else { + // Write a dynamic huffman block. + d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window) + } + } else { + d.w.writeBlock(tok.tokens[:tok.n], eof, nil) + } + d.blockStart = index + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only mode, + // use constant or Snappy compression. + switch d.compressionLevel.level { + case 0, 1, 2: + return + } + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := d.hashMatch[:dstSize] + d.bulkHasher(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + // Update window information. + d.windowEnd += n + d.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLenSSE4(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +const hashmul = 0x1e35a7bd + +// hash4 returns a hash representation of the first 4 bytes +// of the supplied slice. +// The caller must ensure that len(b) >= 4. +func hash4(b []byte) uint32 { + return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits) +} + +// bulkHash4 will compute hashes using the same +// algorithm as hash4 +func bulkHash4(b []byte, dst []uint32) { + if len(b) < minMatchLength { + return + } + hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + dst[0] = (hb * hashmul) >> (32 - hashBits) + end := len(b) - minMatchLength + 1 + for i := 1; i < end; i++ { + hb = (hb << 8) | uint32(b[i+3]) + dst[i] = (hb * hashmul) >> (32 - hashBits) + } +} + +// matchLen returns the number of matching bytes in a and b +// up to length 'max'. Both slices must be at least 'max' +// bytes in size. +func matchLen(a, b []byte, max int) int { + a = a[:max] + b = b[:len(a)] + for i, av := range a { + if b[i] != av { + return i + } + } + return max +} + +func (d *compressor) initDeflate() { + d.window = make([]byte, 2*windowSize) + d.hashOffset = 1 + d.length = minMatchLength - 1 + d.offset = 0 + d.byteAvailable = false + d.index = 0 + d.hash = 0 + d.chainHead = -1 + d.bulkHasher = bulkHash4 + if useSSE42 { + d.bulkHasher = crc32sseAll + } +} + +// Assumes that d.fastSkipHashing != skipNever, +// otherwise use deflateLazy +func (d *compressor) deflate() { + + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + ch := d.hashHead[d.hash&hashMask] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + ch := d.hashHead[d.hash&hashMask] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 5) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +// Assumes that d.fastSkipHashing != skipNever, +// otherwise use deflateLazySSE +func (d *compressor) deflateSSE() { + + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = uint32(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + + crc32sseAll(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>5) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazySSE() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = uint32(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + crc32sseAll(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 6) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) store() { + if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.windowEnd = 0 + } +} + +// fillWindow will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillBlock(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeSnappy() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < maxStoreBlockSize { + if !d.sync { + return + } + // Handle extremely small sizes. + if d.windowEnd < 128 { + if d.windowEnd == 0 { + return + } + if d.windowEnd <= 32 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.tokens.n = 0 + d.windowEnd = 0 + } else { + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + } + d.tokens.n = 0 + d.windowEnd = 0 + d.snap.Reset() + return + } + } + + d.snap.Encode(&d.tokens, d.window[:d.windowEnd]) + // If we made zero matches, store the block as is. + if int(d.tokens.n) == d.windowEnd { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + // If we removed less than 1/16th, huffman compress the block. + } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + } else { + d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd]) + d.err = d.w.err + } + d.tokens.n = 0 + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + d.step(d) + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).store + case level == ConstantCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeHuff + case level >= 1 && level <= 4: + d.snap = newSnappy(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeSnappy + case level == DefaultCompression: + level = 5 + fallthrough + case 5 <= level && level <= 9: + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + if d.fastSkipHashing == skipNever { + if useSSE42 { + d.step = (*compressor).deflateLazySSE + } else { + d.step = (*compressor).deflateLazy + } + } else { + if useSSE42 { + d.step = (*compressor).deflateSSE + } else { + d.step = (*compressor).deflate + + } + } + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + return nil +} + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + // We only need to reset a few things for Snappy. + if d.snap != nil { + d.snap.Reset() + d.windowEnd = 0 + d.tokens.n = 0 + return + } + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + d.chainHead = -1 + for i := range d.hashHead { + d.hashHead[i] = 0 + } + for i := range d.hashPrev { + d.hashPrev[i] = 0 + } + d.hashOffset = 1 + d.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + d.tokens.n = 0 + d.length = minMatchLength - 1 + d.offset = 0 + d.hash = 0 + d.ii = 0 + d.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. +// Level 0 (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. +// Level -1 (DefaultCompression) uses the default compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + dw := &dictWriter{w} + zw, err := NewWriter(dw, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +type dictWriter struct { + w io.Writer +} + +func (w *dictWriter) Write(b []byte) (n int, err error) { + return w.w.Write(b) +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// Calling Flush when there is no pending data still causes the Writer +// to emit a sync marker of at least 4 bytes. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if dw, ok := w.d.w.writer.(*dictWriter); ok { + // w was created with NewWriterDict + dw.w = dst + w.d.reset(dw) + w.d.fillWindow(w.dict) + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/vendor/github.com/klauspost/compress/flate/deflate_test.go b/vendor/github.com/klauspost/compress/flate/deflate_test.go new file mode 100644 index 0000000..ff62a60 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate_test.go @@ -0,0 +1,648 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "reflect" + "strings" + "sync" + "testing" +) + +type deflateTest struct { + in []byte + level int + out []byte +} + +type deflateInflateTest struct { + in []byte +} + +type reverseBitsTest struct { + in uint16 + bitCount uint8 + out uint16 +} + +var deflateTests = []*deflateTest{ + {[]byte{}, 0, []byte{1, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + + {[]byte{0x11}, 0, []byte{0, 1, 0, 254, 255, 17, 1, 0, 0, 255, 255}}, + {[]byte{0x11, 0x12}, 0, []byte{0, 2, 0, 253, 255, 17, 18, 1, 0, 0, 255, 255}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 0, + []byte{0, 8, 0, 247, 255, 17, 17, 17, 17, 17, 17, 17, 17, 1, 0, 0, 255, 255}, + }, + {[]byte{}, 1, []byte{1, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x12}, BestCompression, []byte{18, 20, 2, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, BestCompression, []byte{18, 132, 2, 64, 0, 0, 0, 255, 255}}, + {[]byte{}, 9, []byte{1, 0, 0, 255, 255}}, + {[]byte{0x11}, 9, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x12}, 9, []byte{18, 20, 2, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 9, []byte{18, 132, 2, 64, 0, 0, 0, 255, 255}}, +} + +var deflateInflateTests = []*deflateInflateTest{ + {[]byte{}}, + {[]byte{0x11}}, + {[]byte{0x11, 0x12}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}}, + {[]byte{0x11, 0x10, 0x13, 0x41, 0x21, 0x21, 0x41, 0x13, 0x87, 0x78, 0x13}}, + {largeDataChunk()}, +} + +var reverseBitsTests = []*reverseBitsTest{ + {1, 1, 1}, + {1, 2, 2}, + {1, 3, 4}, + {1, 4, 8}, + {1, 5, 16}, + {17, 5, 17}, + {257, 9, 257}, + {29, 5, 23}, +} + +func largeDataChunk() []byte { + result := make([]byte, 100000) + for i := range result { + result[i] = byte(i * i & 0xFF) + } + return result +} + +func TestCRCBulkOld(t *testing.T) { + for _, x := range deflateTests { + y := x.out + if len(y) >= minMatchLength { + y = append(y, y...) + for j := 4; j < len(y); j++ { + y := y[:j] + dst := make([]uint32, len(y)-minMatchLength+1) + for i := range dst { + dst[i] = uint32(i + 100) + } + bulkHash4(y, dst) + for i, val := range dst { + got := val + expect := hash4(y[i:]) + if got != expect && got == uint32(i)+100 { + t.Errorf("Len:%d Index:%d, expected 0x%08x but not modified", len(y), i, expect) + } else if got != expect { + t.Errorf("Len:%d Index:%d, got 0x%08x expected:0x%08x", len(y), i, got, expect) + } else { + //t.Logf("Len:%d Index:%d OK (0x%08x)", len(y), i, got) + } + } + } + } + } +} + +func TestDeflate(t *testing.T) { + for _, h := range deflateTests { + var buf bytes.Buffer + w, err := NewWriter(&buf, h.level) + if err != nil { + t.Errorf("NewWriter: %v", err) + continue + } + w.Write(h.in) + w.Close() + if !bytes.Equal(buf.Bytes(), h.out) { + t.Errorf("Deflate(%d, %x) = \n%#v, want \n%#v", h.level, h.in, buf.Bytes(), h.out) + } + } +} + +// A sparseReader returns a stream consisting of 0s followed by 1<<16 1s. +// This tests missing hash references in a very large input. +type sparseReader struct { + l int64 + cur int64 +} + +func (r *sparseReader) Read(b []byte) (n int, err error) { + if r.cur >= r.l { + return 0, io.EOF + } + n = len(b) + cur := r.cur + int64(n) + if cur > r.l { + n -= int(cur - r.l) + cur = r.l + } + for i := range b[0:n] { + if r.cur+int64(i) >= r.l-1<<16 { + b[i] = 1 + } else { + b[i] = 0 + } + } + r.cur = cur + return +} + +func TestVeryLongSparseChunk(t *testing.T) { + if testing.Short() { + t.Skip("skipping sparse chunk during short test") + } + w, err := NewWriter(ioutil.Discard, 1) + if err != nil { + t.Errorf("NewWriter: %v", err) + return + } + if _, err = io.Copy(w, &sparseReader{l: 23E8}); err != nil { + t.Errorf("Compress failed: %v", err) + return + } +} + +type syncBuffer struct { + buf bytes.Buffer + mu sync.RWMutex + closed bool + ready chan bool +} + +func newSyncBuffer() *syncBuffer { + return &syncBuffer{ready: make(chan bool, 1)} +} + +func (b *syncBuffer) Read(p []byte) (n int, err error) { + for { + b.mu.RLock() + n, err = b.buf.Read(p) + b.mu.RUnlock() + if n > 0 || b.closed { + return + } + <-b.ready + } +} + +func (b *syncBuffer) signal() { + select { + case b.ready <- true: + default: + } +} + +func (b *syncBuffer) Write(p []byte) (n int, err error) { + n, err = b.buf.Write(p) + b.signal() + return +} + +func (b *syncBuffer) WriteMode() { + b.mu.Lock() +} + +func (b *syncBuffer) ReadMode() { + b.mu.Unlock() + b.signal() +} + +func (b *syncBuffer) Close() error { + b.closed = true + b.signal() + return nil +} + +func testSync(t *testing.T, level int, input []byte, name string) { + if len(input) == 0 { + return + } + + t.Logf("--testSync %d, %d, %s", level, len(input), name) + buf := newSyncBuffer() + buf1 := new(bytes.Buffer) + buf.WriteMode() + w, err := NewWriter(io.MultiWriter(buf, buf1), level) + if err != nil { + t.Errorf("NewWriter: %v", err) + return + } + r := NewReader(buf) + + // Write half the input and read back. + for i := 0; i < 2; i++ { + var lo, hi int + if i == 0 { + lo, hi = 0, (len(input)+1)/2 + } else { + lo, hi = (len(input)+1)/2, len(input) + } + t.Logf("#%d: write %d-%d", i, lo, hi) + if _, err := w.Write(input[lo:hi]); err != nil { + t.Errorf("testSync: write: %v", err) + return + } + if i == 0 { + if err := w.Flush(); err != nil { + t.Errorf("testSync: flush: %v", err) + return + } + } else { + if err := w.Close(); err != nil { + t.Errorf("testSync: close: %v", err) + } + } + buf.ReadMode() + out := make([]byte, hi-lo+1) + m, err := io.ReadAtLeast(r, out, hi-lo) + t.Logf("#%d: read %d", i, m) + if m != hi-lo || err != nil { + t.Errorf("testSync/%d (%d, %d, %s): read %d: %d, %v (%d left)", i, level, len(input), name, hi-lo, m, err, buf.buf.Len()) + return + } + if !bytes.Equal(input[lo:hi], out[:hi-lo]) { + t.Errorf("testSync/%d: read wrong bytes: %x vs %x", i, input[lo:hi], out[:hi-lo]) + return + } + // This test originally checked that after reading + // the first half of the input, there was nothing left + // in the read buffer (buf.buf.Len() != 0) but that is + // not necessarily the case: the write Flush may emit + // some extra framing bits that are not necessary + // to process to obtain the first half of the uncompressed + // data. The test ran correctly most of the time, because + // the background goroutine had usually read even + // those extra bits by now, but it's not a useful thing to + // check. + buf.WriteMode() + } + buf.ReadMode() + out := make([]byte, 10) + if n, err := r.Read(out); n > 0 || err != io.EOF { + t.Errorf("testSync (%d, %d, %s): final Read: %d, %v (hex: %x)", level, len(input), name, n, err, out[0:n]) + } + if buf.buf.Len() != 0 { + t.Errorf("testSync (%d, %d, %s): extra data at end", level, len(input), name) + } + r.Close() + + // stream should work for ordinary reader too + r = NewReader(buf1) + out, err = ioutil.ReadAll(r) + if err != nil { + t.Errorf("testSync: read: %s", err) + return + } + r.Close() + if !bytes.Equal(input, out) { + t.Errorf("testSync: decompress(compress(data)) != data: level=%d input=%s", level, name) + } +} + +func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name string, limit int) { + var buffer bytes.Buffer + w, err := NewWriter(&buffer, level) + if err != nil { + t.Errorf("NewWriter: %v", err) + return + } + w.Write(input) + w.Close() + if limit > 0 && buffer.Len() > limit { + t.Errorf("level: %d, len(compress(data)) = %d > limit = %d", level, buffer.Len(), limit) + return + } + if limit > 0 { + t.Logf("level: %d - Size:%.2f%%, %d b\n", level, float64(buffer.Len()*100)/float64(limit), buffer.Len()) + } + r := NewReader(&buffer) + out, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("read: %s", err) + return + } + r.Close() + if !bytes.Equal(input, out) { + t.Errorf("decompress(compress(data)) != data: level=%d input=%s", level, name) + return + } + testSync(t, level, input, name) +} + +func testToFromWithLimit(t *testing.T, input []byte, name string, limit [11]int) { + for i := 0; i < 10; i++ { + testToFromWithLevelAndLimit(t, i, input, name, limit[i]) + } + testToFromWithLevelAndLimit(t, -2, input, name, limit[10]) +} + +func TestDeflateInflate(t *testing.T) { + for i, h := range deflateInflateTests { + testToFromWithLimit(t, h.in, fmt.Sprintf("#%d", i), [11]int{}) + } +} + +func TestReverseBits(t *testing.T) { + for _, h := range reverseBitsTests { + if v := reverseBits(h.in, h.bitCount); v != h.out { + t.Errorf("reverseBits(%v,%v) = %v, want %v", + h.in, h.bitCount, v, h.out) + } + } +} + +type deflateInflateStringTest struct { + filename string + label string + limit [11]int // Number 11 is ConstantCompression +} + +var deflateInflateStringTests = []deflateInflateStringTest{ + { + "../testdata/e.txt", + "2.718281828...", + [...]int{100018, 67900, 50960, 51150, 50930, 50790, 50790, 50790, 50790, 50790, 43683 + 100}, + }, + { + "../testdata/Mark.Twain-Tom.Sawyer.txt", + "Mark.Twain-Tom.Sawyer", + [...]int{387999, 185000, 182361, 179974, 174124, 168819, 162936, 160506, 160295, 160295, 233460 + 100}, + }, +} + +func TestDeflateInflateString(t *testing.T) { + for _, test := range deflateInflateStringTests { + gold, err := ioutil.ReadFile(test.filename) + if err != nil { + t.Error(err) + } + // Remove returns that may be present on Windows + neutral := strings.Map(func(r rune) rune { + if r != '\r' { + return r + } + return -1 + }, string(gold)) + + testToFromWithLimit(t, []byte(neutral), test.label, test.limit) + + if testing.Short() { + break + } + } +} + +func TestReaderDict(t *testing.T) { + const ( + dict = "hello world" + text = "hello again world" + ) + var b bytes.Buffer + w, err := NewWriter(&b, 5) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + w.Write([]byte(dict)) + w.Flush() + b.Reset() + w.Write([]byte(text)) + w.Close() + + r := NewReaderDict(&b, []byte(dict)) + data, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if string(data) != "hello again world" { + t.Fatalf("read returned %q want %q", string(data), text) + } +} + +func TestWriterDict(t *testing.T) { + const ( + dict = "hello world Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua." + text = "hello world Lorem ipsum dolor sit amet" + ) + // This test is sensitive to algorithm changes that skip + // data in favour of speed. Higher levels are less prone to this + // so we test level 4-9. + for l := 4; l < 9; l++ { + var b bytes.Buffer + w, err := NewWriter(&b, l) + if err != nil { + t.Fatalf("level %d, NewWriter: %v", l, err) + } + w.Write([]byte(dict)) + w.Flush() + b.Reset() + w.Write([]byte(text)) + w.Close() + + var b1 bytes.Buffer + w, _ = NewWriterDict(&b1, l, []byte(dict)) + w.Write([]byte(text)) + w.Close() + + if !bytes.Equal(b1.Bytes(), b.Bytes()) { + t.Errorf("level %d, writer wrote\n%v\n want\n%v", l, b1.Bytes(), b.Bytes()) + } + } +} + +// See http://code.google.com/p/go/issues/detail?id=2508 +func TestRegression2508(t *testing.T) { + if testing.Short() { + t.Logf("test disabled with -short") + return + } + w, err := NewWriter(ioutil.Discard, 1) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + buf := make([]byte, 1024) + for i := 0; i < 131072; i++ { + if _, err := w.Write(buf); err != nil { + t.Fatalf("writer failed: %v", err) + } + } + w.Close() +} + +func TestWriterReset(t *testing.T) { + for level := -2; level <= 9; level++ { + if level == -1 { + level++ + } + if testing.Short() && level > 1 { + break + } + w, err := NewWriter(ioutil.Discard, level) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + buf := []byte("hello world") + for i := 0; i < 1024; i++ { + w.Write(buf) + } + w.Reset(ioutil.Discard) + + wref, err := NewWriter(ioutil.Discard, level) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + + // DeepEqual doesn't compare functions. + w.d.fill, wref.d.fill = nil, nil + w.d.step, wref.d.step = nil, nil + w.d.bulkHasher, wref.d.bulkHasher = nil, nil + w.d.snap, wref.d.snap = nil, nil + + // hashMatch is always overwritten when used. + copy(w.d.hashMatch[:], wref.d.hashMatch[:]) + if w.d.tokens.n != 0 { + t.Errorf("level %d Writer not reset after Reset. %d tokens were present", level, w.d.tokens.n) + } + // As long as the length is 0, we don't care about the content. + w.d.tokens = wref.d.tokens + + // We don't care if there are values in the window, as long as it is at d.index is 0 + w.d.window = wref.d.window + if !reflect.DeepEqual(w, wref) { + t.Errorf("level %d Writer not reset after Reset", level) + } + } + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, NoCompression) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, DefaultCompression) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, BestCompression) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, ConstantCompression) }) + dict := []byte("we are the world") + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, NoCompression, dict) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, DefaultCompression, dict) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, BestCompression, dict) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, ConstantCompression, dict) }) +} + +func testResetOutput(t *testing.T, newWriter func(w io.Writer) (*Writer, error)) { + buf := new(bytes.Buffer) + w, err := newWriter(buf) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + b := []byte("hello world") + for i := 0; i < 1024; i++ { + w.Write(b) + } + w.Close() + out1 := buf.Bytes() + + buf2 := new(bytes.Buffer) + w.Reset(buf2) + for i := 0; i < 1024; i++ { + w.Write(b) + } + w.Close() + out2 := buf2.Bytes() + + if len(out1) != len(out2) { + t.Errorf("got %d, expected %d bytes", len(out2), len(out1)) + } + if bytes.Compare(out1, out2) != 0 { + mm := 0 + for i, b := range out1[:len(out2)] { + if b != out2[i] { + t.Errorf("mismatch index %d: %02x, expected %02x", i, out2[i], b) + } + mm++ + if mm == 10 { + t.Fatal("Stopping") + } + } + } + t.Logf("got %d bytes", len(out1)) +} + +// TestBestSpeed tests that round-tripping through deflate and then inflate +// recovers the original input. The Write sizes are near the thresholds in the +// compressor.encSpeed method (0, 16, 128), as well as near maxStoreBlockSize +// (65535). +func TestBestSpeed(t *testing.T) { + abc := make([]byte, 128) + for i := range abc { + abc[i] = byte(i) + } + abcabc := bytes.Repeat(abc, 131072/len(abc)) + var want []byte + + testCases := [][]int{ + {65536, 0}, + {65536, 1}, + {65536, 1, 256}, + {65536, 1, 65536}, + {65536, 14}, + {65536, 15}, + {65536, 16}, + {65536, 16, 256}, + {65536, 16, 65536}, + {65536, 127}, + {65536, 128}, + {65536, 128, 256}, + {65536, 128, 65536}, + {65536, 129}, + {65536, 65536, 256}, + {65536, 65536, 65536}, + } + + for i, tc := range testCases { + for _, firstN := range []int{1, 65534, 65535, 65536, 65537, 131072} { + tc[0] = firstN + outer: + for _, flush := range []bool{false, true} { + buf := new(bytes.Buffer) + want = want[:0] + + w, err := NewWriter(buf, BestSpeed) + if err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: NewWriter: %v", i, firstN, flush, err) + continue + } + for _, n := range tc { + want = append(want, abcabc[:n]...) + if _, err := w.Write(abcabc[:n]); err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: Write: %v", i, firstN, flush, err) + continue outer + } + if !flush { + continue + } + if err := w.Flush(); err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: Flush: %v", i, firstN, flush, err) + continue outer + } + } + if err := w.Close(); err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: Close: %v", i, firstN, flush, err) + continue + } + + r := NewReader(buf) + got, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: ReadAll: %v", i, firstN, flush, err) + continue + } + r.Close() + + if !bytes.Equal(got, want) { + t.Errorf("i=%d, firstN=%d, flush=%t: corruption during deflate-then-inflate", i, firstN, flush) + continue + } + } + } + } +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go new file mode 100644 index 0000000..71c75a0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// dictDecoder implements the LZ77 sliding dictionary as used in decompression. +// LZ77 decompresses data through sequences of two forms of commands: +// +// * Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. +// +// * Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. +// +// For performance reasons, this implementation performs little to no sanity +// checks about the arguments. As such, the invariants documented for each +// method call must be respected. +type dictDecoder struct { + hist []byte // Sliding window history + + // Invariant: 0 <= rdPos <= wrPos <= len(hist) + wrPos int // Current output position in buffer + rdPos int // Have emitted hist[:rdPos] already + full bool // Has a full window length been written yet? +} + +// init initializes dictDecoder to have a sliding window dictionary of the given +// size. If a preset dict is provided, it will initialize the dictionary with +// the contents of dict. +func (dd *dictDecoder) init(size int, dict []byte) { + *dd = dictDecoder{hist: dd.hist} + + if cap(dd.hist) < size { + dd.hist = make([]byte, size) + } + dd.hist = dd.hist[:size] + + if len(dict) > len(dd.hist) { + dict = dict[len(dict)-len(dd.hist):] + } + dd.wrPos = copy(dd.hist, dict) + if dd.wrPos == len(dd.hist) { + dd.wrPos = 0 + dd.full = true + } + dd.rdPos = dd.wrPos +} + +// histSize reports the total amount of historical data in the dictionary. +func (dd *dictDecoder) histSize() int { + if dd.full { + return len(dd.hist) + } + return dd.wrPos +} + +// availRead reports the number of bytes that can be flushed by readFlush. +func (dd *dictDecoder) availRead() int { + return dd.wrPos - dd.rdPos +} + +// availWrite reports the available amount of output buffer space. +func (dd *dictDecoder) availWrite() int { + return len(dd.hist) - dd.wrPos +} + +// writeSlice returns a slice of the available buffer to write data to. +// +// This invariant will be kept: len(s) <= availWrite() +func (dd *dictDecoder) writeSlice() []byte { + return dd.hist[dd.wrPos:] +} + +// writeMark advances the writer pointer by cnt. +// +// This invariant must be kept: 0 <= cnt <= availWrite() +func (dd *dictDecoder) writeMark(cnt int) { + dd.wrPos += cnt +} + +// writeByte writes a single byte to the dictionary. +// +// This invariant must be kept: 0 < availWrite() +func (dd *dictDecoder) writeByte(c byte) { + dd.hist[dd.wrPos] = c + dd.wrPos++ +} + +// writeCopy copies a string at a given (dist, length) to the output. +// This returns the number of bytes copied and may be less than the requested +// length if the available space in the output buffer is too small. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) writeCopy(dist, length int) int { + dstBase := dd.wrPos + dstPos := dstBase + srcPos := dstPos - dist + endPos := dstPos + length + if endPos > len(dd.hist) { + endPos = len(dd.hist) + } + + // Copy non-overlapping section after destination position. + // + // This section is non-overlapping in that the copy length for this section + // is always less than or equal to the backwards distance. This can occur + // if a distance refers to data that wraps-around in the buffer. + // Thus, a backwards copy is performed here; that is, the exact bytes in + // the source prior to the copy is placed in the destination. + if srcPos < 0 { + srcPos += len(dd.hist) + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) + srcPos = 0 + } + + // Copy possibly overlapping section before destination position. + // + // This section can overlap if the copy length for this section is larger + // than the backwards distance. This is allowed by LZ77 so that repeated + // strings can be succinctly represented using (dist, length) pairs. + // Thus, a forwards copy is performed here; that is, the bytes copied is + // possibly dependent on the resulting bytes in the destination as the copy + // progresses along. This is functionally equivalent to the following: + // + // for i := 0; i < endPos-dstPos; i++ { + // dd.hist[dstPos+i] = dd.hist[srcPos+i] + // } + // dstPos = endPos + // + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// tryWriteCopy tries to copy a string at a given (distance, length) to the +// output. This specialized version is optimized for short distances. +// +// This method is designed to be inlined for performance reasons. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) tryWriteCopy(dist, length int) int { + dstPos := dd.wrPos + endPos := dstPos + length + if dstPos < dist || endPos > len(dd.hist) { + return 0 + } + dstBase := dstPos + srcPos := dstPos - dist + + // Copy possibly overlapping section before destination position. +loop: + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + if dstPos < endPos { + goto loop // Avoid for-loop so that this function can be inlined + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// readFlush returns a slice of the historical buffer that is ready to be +// emitted to the user. The data returned by readFlush must be fully consumed +// before calling any other dictDecoder methods. +func (dd *dictDecoder) readFlush() []byte { + toRead := dd.hist[dd.rdPos:dd.wrPos] + dd.rdPos = dd.wrPos + if dd.wrPos == len(dd.hist) { + dd.wrPos, dd.rdPos = 0, 0 + dd.full = true + } + return toRead +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go b/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go new file mode 100644 index 0000000..9275cff --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go @@ -0,0 +1,139 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "strings" + "testing" +) + +func TestDictDecoder(t *testing.T) { + const ( + abc = "ABC\n" + fox = "The quick brown fox jumped over the lazy dog!\n" + poem = "The Road Not Taken\nRobert Frost\n" + + "\n" + + "Two roads diverged in a yellow wood,\n" + + "And sorry I could not travel both\n" + + "And be one traveler, long I stood\n" + + "And looked down one as far as I could\n" + + "To where it bent in the undergrowth;\n" + + "\n" + + "Then took the other, as just as fair,\n" + + "And having perhaps the better claim,\n" + + "Because it was grassy and wanted wear;\n" + + "Though as for that the passing there\n" + + "Had worn them really about the same,\n" + + "\n" + + "And both that morning equally lay\n" + + "In leaves no step had trodden black.\n" + + "Oh, I kept the first for another day!\n" + + "Yet knowing how way leads on to way,\n" + + "I doubted if I should ever come back.\n" + + "\n" + + "I shall be telling this with a sigh\n" + + "Somewhere ages and ages hence:\n" + + "Two roads diverged in a wood, and I-\n" + + "I took the one less traveled by,\n" + + "And that has made all the difference.\n" + ) + + var poemRefs = []struct { + dist int // Backward distance (0 if this is an insertion) + length int // Length of copy or insertion + }{ + {0, 38}, {33, 3}, {0, 48}, {79, 3}, {0, 11}, {34, 5}, {0, 6}, {23, 7}, + {0, 8}, {50, 3}, {0, 2}, {69, 3}, {34, 5}, {0, 4}, {97, 3}, {0, 4}, + {43, 5}, {0, 6}, {7, 4}, {88, 7}, {0, 12}, {80, 3}, {0, 2}, {141, 4}, + {0, 1}, {196, 3}, {0, 3}, {157, 3}, {0, 6}, {181, 3}, {0, 2}, {23, 3}, + {77, 3}, {28, 5}, {128, 3}, {110, 4}, {70, 3}, {0, 4}, {85, 6}, {0, 2}, + {182, 6}, {0, 4}, {133, 3}, {0, 7}, {47, 5}, {0, 20}, {112, 5}, {0, 1}, + {58, 3}, {0, 8}, {59, 3}, {0, 4}, {173, 3}, {0, 5}, {114, 3}, {0, 4}, + {92, 5}, {0, 2}, {71, 3}, {0, 2}, {76, 5}, {0, 1}, {46, 3}, {96, 4}, + {130, 4}, {0, 3}, {360, 3}, {0, 3}, {178, 5}, {0, 7}, {75, 3}, {0, 3}, + {45, 6}, {0, 6}, {299, 6}, {180, 3}, {70, 6}, {0, 1}, {48, 3}, {66, 4}, + {0, 3}, {47, 5}, {0, 9}, {325, 3}, {0, 1}, {359, 3}, {318, 3}, {0, 2}, + {199, 3}, {0, 1}, {344, 3}, {0, 3}, {248, 3}, {0, 10}, {310, 3}, {0, 3}, + {93, 6}, {0, 3}, {252, 3}, {157, 4}, {0, 2}, {273, 5}, {0, 14}, {99, 4}, + {0, 1}, {464, 4}, {0, 2}, {92, 4}, {495, 3}, {0, 1}, {322, 4}, {16, 4}, + {0, 3}, {402, 3}, {0, 2}, {237, 4}, {0, 2}, {432, 4}, {0, 1}, {483, 5}, + {0, 2}, {294, 4}, {0, 2}, {306, 3}, {113, 5}, {0, 1}, {26, 4}, {164, 3}, + {488, 4}, {0, 1}, {542, 3}, {248, 6}, {0, 5}, {205, 3}, {0, 8}, {48, 3}, + {449, 6}, {0, 2}, {192, 3}, {328, 4}, {9, 5}, {433, 3}, {0, 3}, {622, 25}, + {615, 5}, {46, 5}, {0, 2}, {104, 3}, {475, 10}, {549, 3}, {0, 4}, {597, 8}, + {314, 3}, {0, 1}, {473, 6}, {317, 5}, {0, 1}, {400, 3}, {0, 3}, {109, 3}, + {151, 3}, {48, 4}, {0, 4}, {125, 3}, {108, 3}, {0, 2}, + } + + var got, want bytes.Buffer + var dd dictDecoder + dd.init(1<<11, nil) + + var writeCopy = func(dist, length int) { + for length > 0 { + cnt := dd.tryWriteCopy(dist, length) + if cnt == 0 { + cnt = dd.writeCopy(dist, length) + } + + length -= cnt + if dd.availWrite() == 0 { + got.Write(dd.readFlush()) + } + } + } + var writeString = func(str string) { + for len(str) > 0 { + cnt := copy(dd.writeSlice(), str) + str = str[cnt:] + dd.writeMark(cnt) + if dd.availWrite() == 0 { + got.Write(dd.readFlush()) + } + } + } + + writeString(".") + want.WriteByte('.') + + str := poem + for _, ref := range poemRefs { + if ref.dist == 0 { + writeString(str[:ref.length]) + } else { + writeCopy(ref.dist, ref.length) + } + str = str[ref.length:] + } + want.WriteString(poem) + + writeCopy(dd.histSize(), 33) + want.Write(want.Bytes()[:33]) + + writeString(abc) + writeCopy(len(abc), 59*len(abc)) + want.WriteString(strings.Repeat(abc, 60)) + + writeString(fox) + writeCopy(len(fox), 9*len(fox)) + want.WriteString(strings.Repeat(fox, 10)) + + writeString(".") + writeCopy(1, 9) + want.WriteString(strings.Repeat(".", 10)) + + writeString(strings.ToUpper(poem)) + writeCopy(len(poem), 7*len(poem)) + want.WriteString(strings.Repeat(strings.ToUpper(poem), 8)) + + writeCopy(dd.histSize(), 10) + want.Write(want.Bytes()[want.Len()-dd.histSize():][:10]) + + got.Write(dd.readFlush()) + if got.String() != want.String() { + t.Errorf("final string mismatch:\ngot %q\nwant %q", got.String(), want.String()) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/flate_test.go b/vendor/github.com/klauspost/compress/flate/flate_test.go new file mode 100644 index 0000000..3f67025 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/flate_test.go @@ -0,0 +1,260 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test tests some internals of the flate package. +// The tests in package compress/gzip serve as the +// end-to-end test of the decompressor. + +package flate + +import ( + "bytes" + "encoding/hex" + "io/ioutil" + "testing" +) + +// The following test should not panic. +func TestIssue5915(t *testing.T) { + bits := []int{4, 0, 0, 6, 4, 3, 2, 3, 3, 4, 4, 5, 0, 0, 0, 0, 5, 5, 6, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 6, 0, 11, 0, 8, 0, 6, 6, 10, 8} + var h huffmanDecoder + if h.init(bits) { + t.Fatalf("Given sequence of bits is bad, and should not succeed.") + } +} + +// The following test should not panic. +func TestIssue5962(t *testing.T) { + bits := []int{4, 0, 0, 6, 4, 3, 2, 3, 3, 4, 4, 5, 0, 0, 0, 0, + 5, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11} + var h huffmanDecoder + if h.init(bits) { + t.Fatalf("Given sequence of bits is bad, and should not succeed.") + } +} + +// The following test should not panic. +func TestIssue6255(t *testing.T) { + bits1 := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11} + bits2 := []int{11, 13} + var h huffmanDecoder + if !h.init(bits1) { + t.Fatalf("Given sequence of bits is good and should succeed.") + } + if h.init(bits2) { + t.Fatalf("Given sequence of bits is bad and should not succeed.") + } +} + +func TestInvalidEncoding(t *testing.T) { + // Initialize Huffman decoder to recognize "0". + var h huffmanDecoder + if !h.init([]int{1}) { + t.Fatal("Failed to initialize Huffman decoder") + } + + // Initialize decompressor with invalid Huffman coding. + var f decompressor + f.r = bytes.NewReader([]byte{0xff}) + + _, err := f.huffSym(&h) + if err == nil { + t.Fatal("Should have rejected invalid bit sequence") + } +} + +func TestInvalidBits(t *testing.T) { + oversubscribed := []int{1, 2, 3, 4, 4, 5} + incomplete := []int{1, 2, 4, 4} + var h huffmanDecoder + if h.init(oversubscribed) { + t.Fatal("Should reject oversubscribed bit-length set") + } + if h.init(incomplete) { + t.Fatal("Should reject incomplete bit-length set") + } +} + +func TestStreams(t *testing.T) { + // To verify any of these hexstrings as valid or invalid flate streams + // according to the C zlib library, you can use the Python wrapper library: + // >>> hex_string = "010100feff11" + // >>> import zlib + // >>> zlib.decompress(hex_string.decode("hex"), -15) # Negative means raw DEFLATE + // '\x11' + + testCases := []struct { + desc string // Description of the stream + stream string // Hexstring of the input DEFLATE stream + want string // Expected result. Use "fail" to expect failure + }{{ + "degenerate HCLenTree", + "05e0010000000000100000000000000000000000000000000000000000000000" + + "00000000000000000004", + "fail", + }, { + "complete HCLenTree, empty HLitTree, empty HDistTree", + "05e0010400000000000000000000000000000000000000000000000000000000" + + "00000000000000000010", + "fail", + }, { + "empty HCLenTree", + "05e0010000000000000000000000000000000000000000000000000000000000" + + "00000000000000000010", + "fail", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree, use missing HDist symbol", + "000100feff000de0010400000000100000000000000000000000000000000000" + + "0000000000000000000000000000002c", + "fail", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use missing HDist symbol", + "000100feff000de0010000000000000000000000000000000000000000000000" + + "00000000000000000610000000004070", + "fail", + }, { + "complete HCLenTree, empty HLitTree, empty HDistTree", + "05e0010400000000100400000000000000000000000000000000000000000000" + + "0000000000000000000000000008", + "fail", + }, { + "complete HCLenTree, empty HLitTree, degenerate HDistTree", + "05e0010400000000100400000000000000000000000000000000000000000000" + + "0000000000000000000800000008", + "fail", + }, { + "complete HCLenTree, degenerate HLitTree, degenerate HDistTree, use missing HLit symbol", + "05e0010400000000100000000000000000000000000000000000000000000000" + + "0000000000000000001c", + "fail", + }, { + "complete HCLenTree, complete HLitTree, too large HDistTree", + "edff870500000000200400000000000000000000000000000000000000000000" + + "000000000000000000080000000000000004", + "fail", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree, excessive repeater code", + "edfd870500000000200400000000000000000000000000000000000000000000" + + "000000000000000000e8b100", + "fail", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree of normal length 30", + "05fd01240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07000000fe01", + "", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree of excessive length 31", + "05fe01240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07000000fc03", + "fail", + }, { + "complete HCLenTree, over-subscribed HLitTree, empty HDistTree", + "05e001240000000000fcffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07f00f", + "fail", + }, { + "complete HCLenTree, under-subscribed HLitTree, empty HDistTree", + "05e001240000000000fcffffffffffffffffffffffffffffffffffffffffffff" + + "fffffffffcffffffff07f00f", + "fail", + }, { + "complete HCLenTree, complete HLitTree with single code, empty HDistTree", + "05e001240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07f00f", + "01", + }, { + "complete HCLenTree, complete HLitTree with multiple codes, empty HDistTree", + "05e301240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07807f", + "01", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HDist symbol", + "000100feff000de0010400000000100000000000000000000000000000000000" + + "0000000000000000000000000000003c", + "00000000", + }, { + "complete HCLenTree, degenerate HLitTree, degenerate HDistTree", + "05e0010400000000100000000000000000000000000000000000000000000000" + + "0000000000000000000c", + "", + }, { + "complete HCLenTree, degenerate HLitTree, empty HDistTree", + "05e0010400000000100000000000000000000000000000000000000000000000" + + "00000000000000000004", + "", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree, spanning repeater code", + "edfd870500000000200400000000000000000000000000000000000000000000" + + "000000000000000000e8b000", + "", + }, { + "complete HCLenTree with length codes, complete HLitTree, empty HDistTree", + "ede0010400000000100000000000000000000000000000000000000000000000" + + "0000000000000000000400004000", + "", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HLit symbol 284 with count 31", + "000100feff00ede0010400000000100000000000000000000000000000000000" + + "000000000000000000000000000000040000407f00", + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "000000", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HLit and HDist symbols", + "0cc2010d00000082b0ac4aff0eb07d27060000ffff", + "616263616263", + }, { + "fixed block, use reserved symbol 287", + "33180700", + "fail", + }, { + "raw block", + "010100feff11", + "11", + }, { + "issue 10426 - over-subscribed HCLenTree causes a hang", + "344c4a4e494d4b070000ff2e2eff2e2e2e2e2eff", + "fail", + }, { + "issue 11030 - empty HDistTree unexpectedly leads to error", + "05c0070600000080400fff37a0ca", + "", + }, { + "issue 11033 - empty HDistTree unexpectedly leads to error", + "050fb109c020cca5d017dcbca044881ee1034ec149c8980bbc413c2ab35be9dc" + + "b1473449922449922411202306ee97b0383a521b4ffdcf3217f9f7d3adb701", + "3130303634342068652e706870005d05355f7ed957ff084a90925d19e3ebc6d0" + + "c6d7", + }} + + for i, tc := range testCases { + data, err := hex.DecodeString(tc.stream) + if err != nil { + t.Fatal(err) + } + data, err = ioutil.ReadAll(NewReader(bytes.NewReader(data))) + if tc.want == "fail" { + if err == nil { + t.Errorf("#%d (%s): got nil error, want non-nil", i, tc.desc) + } + } else { + if err != nil { + t.Errorf("#%d (%s): %v", i, tc.desc, err) + continue + } + if got := hex.EncodeToString(data); got != tc.want { + t.Errorf("#%d (%s):\ngot %q\nwant %q", i, tc.desc, got, tc.want) + } + + } + } +} diff --git a/vendor/github.com/klauspost/compress/flate/gen.go b/vendor/github.com/klauspost/compress/flate/gen.go new file mode 100644 index 0000000..154c89a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/gen.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates fixedhuff.go +// Invoke as +// +// go run gen.go -output fixedhuff.go + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "log" +) + +var filename = flag.String("output", "fixedhuff.go", "output file name") + +const maxCodeLen = 16 + +// Note: the definition of the huffmanDecoder struct is copied from +// inflate.go, as it is private to the implementation. + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks [huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(bits []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.min != 0 { + *h = huffmanDecoder{} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range bits { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i] = code + code += count[i] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + h.links = make([][]uint32, huffmanNumChunks-link) + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 + reverse >>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +func main() { + flag.Parse() + + var h huffmanDecoder + var bits [288]int + initReverseByte() + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + h.init(bits[:]) + if h.links != nil { + log.Fatal("Unexpected links table in fixed Huffman decoder") + } + + var buf bytes.Buffer + + fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file.`+"\n\n") + + fmt.Fprintln(&buf, "package flate") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{") + fmt.Fprintf(&buf, "\t%d,\n", h.min) + fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{") + for i := 0; i < huffmanNumChunks; i++ { + if i&7 == 0 { + fmt.Fprintf(&buf, "\t\t") + } else { + fmt.Fprintf(&buf, " ") + } + fmt.Fprintf(&buf, "0x%04x,", h.chunks[i]) + if i&7 == 7 { + fmt.Fprintln(&buf) + } + } + fmt.Fprintln(&buf, "\t},") + fmt.Fprintln(&buf, "\tnil, 0,") + fmt.Fprintln(&buf, "}") + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) + if err != nil { + log.Fatal(err) + } +} + +var reverseByte [256]byte + +func initReverseByte() { + for x := 0; x < 256; x++ { + var result byte + for i := uint(0); i < 8; i++ { + result |= byte(((x >> i) & 1) << (7 - i)) + } + reverseByte[x] = result + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go new file mode 100644 index 0000000..f9b2a69 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -0,0 +1,701 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "io" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // bufferFlushSize indicates the buffer size + // after which bytes are flushed to the writer. + // Should preferably be a multiple of 6, since + // we accumulate 6 bytes between writes to the buffer. + bufferFlushSize = 240 + + // bufferSize is the actual output byte buffer size. + // It must have additional headroom for a flush + // which can contain up to 8 bytes. + bufferSize = bufferFlushSize + 8 +) + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = []int8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = []uint32{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// offset code word extra bits. +var offsetExtraBits = []int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, +} + +var offsetBase = []uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, + 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, + 0x100000, 0x180000, 0x200000, 0x300000, +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + // writer is the underlying writer. + // Do not use it directly; use the write method, which ensures + // that Write errors are sticky. + writer io.Writer + + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint + bytes [bufferSize]byte + codegenFreq [codegenCodeCount]int32 + nbytes int + literalFreq []int32 + offsetFreq []int32 + codegen []uint8 + literalEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error +} + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + writer: w, + literalFreq: make([]int32, maxNumLit), + offsetFreq: make([]int32, offsetCodeCount), + codegen: make([]uint8, maxNumLit+offsetCodeCount+1), + literalEncoding: newHuffmanEncoder(maxNumLit), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.writer = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.bytes = [bufferSize]byte{} +} + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + w.write(w.bytes[:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) write(b []byte) { + if w.err != nil { + return + } + _, w.err = w.writer.Write(b) +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint) { + if w.err != nil { + return + } + w.bits |= uint64(b) << w.nbits + w.nbits += nb + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + if w.nbits&7 != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if n != 0 { + w.write(w.bytes[:n]) + } + w.nbytes = 0 + w.write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + cgnl := codegen[:numLiterals] + for i := range cgnl { + cgnl[i] = uint8(litEnc.codes[i].len) + } + + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = uint8(offEnc.codes[i].len) + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + header := 3 + 5 + 5 + 4 + (3 * numCodegens) + + w.codegenEncoding.bitLength(w.codegenFreq[:]) + + int(w.codegenFreq[16])*2 + + int(w.codegenFreq[17])*3 + + int(w.codegenFreq[18])*7 + size = header + + litEnc.bitLength(w.literalFreq) + + offEnc.bitLength(w.offsetFreq) + + extraBits + + return size, numCodegens +} + +// fixedSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) fixedSize(extraBits int) int { + return 3 + + fixedLiteralEncoding.bitLength(w.literalFreq) + + fixedOffsetEncoding.bitLength(w.offsetFreq) + + extraBits +} + +// storedSize calculates the stored size, including header. +// The function returns the size in bits and whether the block +// fits inside a single block. +func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { + if in == nil { + return 0, false + } + if len(in) <= maxStoreBlockSize { + return (len(in) + 5) * 8, true + } + return 0, false +} + +func (w *huffmanBitWriter) writeCode(c hcode) { + if w.err != nil { + return + } + w.bits |= uint64(c.code) << w.nbits + w.nbits += uint(c.len) + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n + } +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len) + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord int = int(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + w.writeCode(w.codegenEncoding.codes[uint32(codeWord)]) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + break + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + break + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + break + } + } +} + +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +// writeBlock will write a block of tokens with the smallest encoding. +// The original input can be supplied, and if the huffman encoded data +// is larger than the original bytes, the data will be written as a +// stored block. +// If the input is nil, the tokens will always be Huffman encoded. +func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens = append(tokens, endBlockMarker) + numLiterals, numOffsets := w.indexTokens(tokens) + + var extraBits int + storedSize, storable := w.storedSize(input) + if storable { + // We only bother calculating the costs of the extra bits required by + // the length of offset fields (which will be the same for both fixed + // and dynamic encoding), if we need to compare those two encodings + // against stored encoding. + for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { + // First eight length codes have extra size = 0. + extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart]) + } + for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { + // First four offset codes have extra size = 0. + extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode]) + } + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + var size = w.fixedSize(extraBits) + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storable && storedSize < size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + + // Write the tokens. + w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes) +} + +// writeBlockDynamic encodes a block using a dynamic Huffman table. +// This should be used if the symbols used have a disproportionate +// histogram distribution. +// If input is supplied and the compression savings are below 1/16th of the +// input size the block is stored. +func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens = append(tokens, endBlockMarker) + numLiterals, numOffsets := w.indexTokens(tokens) + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0) + + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + + // Write the tokens. + w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes) +} + +// indexTokens indexes a slice of tokens, and updates +// literalFreq and offsetFreq, and generates literalEncoding +// and offsetEncoding. +// The number of literal and offset tokens is returned. +func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) { + for i := range w.literalFreq { + w.literalFreq[i] = 0 + } + for i := range w.offsetFreq { + w.offsetFreq[i] = 0 + } + + for _, t := range tokens { + if t < matchType { + w.literalFreq[t.literal()]++ + continue + } + length := t.length() + offset := t.offset() + w.literalFreq[lengthCodesStart+lengthCode(length)]++ + w.offsetFreq[offsetCode(offset)]++ + } + + // get the number of literals + numLiterals = len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets = len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + w.literalEncoding.generate(w.literalFreq, 15) + w.offsetEncoding.generate(w.offsetFreq, 15) + return +} + +// writeTokens writes a slice of tokens to the output. +// codes for literal and offset encoding must be supplied. +func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { + if w.err != nil { + return + } + for _, t := range tokens { + if t < matchType { + w.writeCode(leCodes[t.literal()]) + continue + } + // Write the length + length := t.length() + lengthCode := lengthCode(length) + w.writeCode(leCodes[lengthCode+lengthCodesStart]) + extraLengthBits := uint(lengthExtraBits[lengthCode]) + if extraLengthBits > 0 { + extraLength := int32(length - lengthBase[lengthCode]) + w.writeBits(extraLength, extraLengthBits) + } + // Write the offset + offset := t.offset() + offsetCode := offsetCode(offset) + w.writeCode(oeCodes[offsetCode]) + extraOffsetBits := uint(offsetExtraBits[offsetCode]) + if extraOffsetBits > 0 { + extraOffset := int32(offset - offsetBase[offsetCode]) + w.writeBits(extraOffset, extraOffsetBits) + } + } +} + +// huffOffset is a static offset encoder used for huffman only encoding. +// It can be reused since we will not be encoding offset values. +var huffOffset *huffmanEncoder + +func init() { + w := newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + huffOffset = newHuffmanEncoder(offsetCodeCount) + huffOffset.generate(w.offsetFreq, 15) +} + +// writeBlockHuff encodes a block of bytes as either +// Huffman encoded literals or uncompressed bytes if the +// results only gains very little from compression. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { + if w.err != nil { + return + } + + // Clear histogram + for i := range w.literalFreq { + w.literalFreq[i] = 0 + } + + // Add everything as literals + histogram(input, w.literalFreq) + + w.literalFreq[endBlockMarker] = 1 + + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + + w.literalEncoding.generate(w.literalFreq, 15) + + // Figure out smallest code. + // Always use dynamic Huffman or Store + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0) + + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + encoding := w.literalEncoding.codes[:257] + n := w.nbytes + for _, t := range input { + // Bitwriting inlined, ~30% speedup + c := encoding[t] + w.bits |= uint64(c.code) << w.nbits + w.nbits += uint(c.len) + if w.nbits < 48 { + continue + } + // Store 6 bytes + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n < bufferFlushSize { + continue + } + w.write(w.bytes[:n]) + if w.err != nil { + return // Return early in the event of write failures + } + n = 0 + } + w.nbytes = n + w.writeCode(encoding[endBlockMarker]) +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go new file mode 100644 index 0000000..882d3ab --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go @@ -0,0 +1,366 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +var update = flag.Bool("update", false, "update reference files") + +// TestBlockHuff tests huffman encoding against reference files +// to detect possible regressions. +// If encoding/bit allocation changes you can regenerate these files +// by using the -update flag. +func TestBlockHuff(t *testing.T) { + // determine input files + match, err := filepath.Glob("testdata/huffman-*.in") + if err != nil { + t.Fatal(err) + } + + for _, in := range match { + out := in // for files where input and output are identical + if strings.HasSuffix(in, ".in") { + out = in[:len(in)-len(".in")] + ".golden" + } + testBlockHuff(t, in, out) + } +} + +func testBlockHuff(t *testing.T, in, out string) { + all, err := ioutil.ReadFile(in) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + bw.writeBlockHuff(false, all) + bw.flush() + got := buf.Bytes() + + want, err := ioutil.ReadFile(out) + if err != nil && !*update { + t.Error(err) + return + } + + t.Logf("Testing %q", in) + if !bytes.Equal(got, want) { + if *update { + if in != out { + t.Logf("Updating %q", out) + if err := ioutil.WriteFile(out, got, 0666); err != nil { + t.Error(err) + } + return + } + // in == out: don't accidentally destroy input + t.Errorf("WARNING: -update did not rewrite input file %s", in) + } + + t.Errorf("%q != %q (see %q)", in, out, in+".got") + if err := ioutil.WriteFile(in+".got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Output ok") + + // Test if the writer produces the same output after reset. + buf.Reset() + bw.reset(&buf) + bw.writeBlockHuff(false, all) + bw.flush() + got = buf.Bytes() + if !bytes.Equal(got, want) { + t.Errorf("after reset %q != %q (see %q)", in, out, in+".reset.got") + if err := ioutil.WriteFile(in+".reset.got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Reset ok") + testWriterEOF(t, "huff", huffTest{input: in}, true) +} + +type huffTest struct { + tokens []token + input string // File name of input data matching the tokens. + want string // File name of data with the expected output with input available. + wantNoInput string // File name of the expected output when no input is available. +} + +const ml = 0x7fc00000 // Maximum length token. Used to reduce the size of writeBlockTests + +var writeBlockTests = []huffTest{ + { + input: "testdata/huffman-null-max.in", + want: "testdata/huffman-null-max.%s.expect", + wantNoInput: "testdata/huffman-null-max.%s.expect-noinput", + tokens: []token{0x0, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, 0x0, 0x0}, + }, + { + input: "testdata/huffman-pi.in", + want: "testdata/huffman-pi.%s.expect", + wantNoInput: "testdata/huffman-pi.%s.expect-noinput", + tokens: []token{0x33, 0x2e, 0x31, 0x34, 0x31, 0x35, 0x39, 0x32, 0x36, 0x35, 0x33, 0x35, 0x38, 0x39, 0x37, 0x39, 0x33, 0x32, 0x33, 0x38, 0x34, 0x36, 0x32, 0x36, 0x34, 0x33, 0x33, 0x38, 0x33, 0x32, 0x37, 0x39, 0x35, 0x30, 0x32, 0x38, 0x38, 0x34, 0x31, 0x39, 0x37, 0x31, 0x36, 0x39, 0x33, 0x39, 0x39, 0x33, 0x37, 0x35, 0x31, 0x30, 0x35, 0x38, 0x32, 0x30, 0x39, 0x37, 0x34, 0x39, 0x34, 0x34, 0x35, 0x39, 0x32, 0x33, 0x30, 0x37, 0x38, 0x31, 0x36, 0x34, 0x30, 0x36, 0x32, 0x38, 0x36, 0x32, 0x30, 0x38, 0x39, 0x39, 0x38, 0x36, 0x32, 0x38, 0x30, 0x33, 0x34, 0x38, 0x32, 0x35, 0x33, 0x34, 0x32, 0x31, 0x31, 0x37, 0x30, 0x36, 0x37, 0x39, 0x38, 0x32, 0x31, 0x34, 0x38, 0x30, 0x38, 0x36, 0x35, 0x31, 0x33, 0x32, 0x38, 0x32, 0x33, 0x30, 0x36, 0x36, 0x34, 0x37, 0x30, 0x39, 0x33, 0x38, 0x34, 0x34, 0x36, 0x30, 0x39, 0x35, 0x35, 0x30, 0x35, 0x38, 0x32, 0x32, 0x33, 0x31, 0x37, 0x32, 0x35, 0x33, 0x35, 0x39, 0x34, 0x30, 0x38, 0x31, 0x32, 0x38, 0x34, 0x38, 0x31, 0x31, 0x31, 0x37, 0x34, 0x4040007e, 0x34, 0x31, 0x30, 0x32, 0x37, 0x30, 0x31, 0x39, 0x33, 0x38, 0x35, 0x32, 0x31, 0x31, 0x30, 0x35, 0x35, 0x35, 0x39, 0x36, 0x34, 0x34, 0x36, 0x32, 0x32, 0x39, 0x34, 0x38, 0x39, 0x35, 0x34, 0x39, 0x33, 0x30, 0x33, 0x38, 0x31, 0x40400012, 0x32, 0x38, 0x38, 0x31, 0x30, 0x39, 0x37, 0x35, 0x36, 0x36, 0x35, 0x39, 0x33, 0x33, 0x34, 0x34, 0x36, 0x40400047, 0x37, 0x35, 0x36, 0x34, 0x38, 0x32, 0x33, 0x33, 0x37, 0x38, 0x36, 0x37, 0x38, 0x33, 0x31, 0x36, 0x35, 0x32, 0x37, 0x31, 0x32, 0x30, 0x31, 0x39, 0x30, 0x39, 0x31, 0x34, 0x4040001a, 0x35, 0x36, 0x36, 0x39, 0x32, 0x33, 0x34, 0x36, 0x404000b2, 0x36, 0x31, 0x30, 0x34, 0x35, 0x34, 0x33, 0x32, 0x36, 0x40400032, 0x31, 0x33, 0x33, 0x39, 0x33, 0x36, 0x30, 0x37, 0x32, 0x36, 0x30, 0x32, 0x34, 0x39, 0x31, 0x34, 0x31, 0x32, 0x37, 0x33, 0x37, 0x32, 0x34, 0x35, 0x38, 0x37, 0x30, 0x30, 0x36, 0x36, 0x30, 0x36, 0x33, 0x31, 0x35, 0x35, 0x38, 0x38, 0x31, 0x37, 0x34, 0x38, 0x38, 0x31, 0x35, 0x32, 0x30, 0x39, 0x32, 0x30, 0x39, 0x36, 0x32, 0x38, 0x32, 0x39, 0x32, 0x35, 0x34, 0x30, 0x39, 0x31, 0x37, 0x31, 0x35, 0x33, 0x36, 0x34, 0x33, 0x36, 0x37, 0x38, 0x39, 0x32, 0x35, 0x39, 0x30, 0x33, 0x36, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x30, 0x35, 0x33, 0x30, 0x35, 0x34, 0x38, 0x38, 0x32, 0x30, 0x34, 0x36, 0x36, 0x35, 0x32, 0x31, 0x33, 0x38, 0x34, 0x31, 0x34, 0x36, 0x39, 0x35, 0x31, 0x39, 0x34, 0x31, 0x35, 0x31, 0x31, 0x36, 0x30, 0x39, 0x34, 0x33, 0x33, 0x30, 0x35, 0x37, 0x32, 0x37, 0x30, 0x33, 0x36, 0x35, 0x37, 0x35, 0x39, 0x35, 0x39, 0x31, 0x39, 0x35, 0x33, 0x30, 0x39, 0x32, 0x31, 0x38, 0x36, 0x31, 0x31, 0x37, 0x404000e9, 0x33, 0x32, 0x40400009, 0x39, 0x33, 0x31, 0x30, 0x35, 0x31, 0x31, 0x38, 0x35, 0x34, 0x38, 0x30, 0x37, 0x4040010e, 0x33, 0x37, 0x39, 0x39, 0x36, 0x32, 0x37, 0x34, 0x39, 0x35, 0x36, 0x37, 0x33, 0x35, 0x31, 0x38, 0x38, 0x35, 0x37, 0x35, 0x32, 0x37, 0x32, 0x34, 0x38, 0x39, 0x31, 0x32, 0x32, 0x37, 0x39, 0x33, 0x38, 0x31, 0x38, 0x33, 0x30, 0x31, 0x31, 0x39, 0x34, 0x39, 0x31, 0x32, 0x39, 0x38, 0x33, 0x33, 0x36, 0x37, 0x33, 0x33, 0x36, 0x32, 0x34, 0x34, 0x30, 0x36, 0x35, 0x36, 0x36, 0x34, 0x33, 0x30, 0x38, 0x36, 0x30, 0x32, 0x31, 0x33, 0x39, 0x34, 0x39, 0x34, 0x36, 0x33, 0x39, 0x35, 0x32, 0x32, 0x34, 0x37, 0x33, 0x37, 0x31, 0x39, 0x30, 0x37, 0x30, 0x32, 0x31, 0x37, 0x39, 0x38, 0x40800099, 0x37, 0x30, 0x32, 0x37, 0x37, 0x30, 0x35, 0x33, 0x39, 0x32, 0x31, 0x37, 0x31, 0x37, 0x36, 0x32, 0x39, 0x33, 0x31, 0x37, 0x36, 0x37, 0x35, 0x40800232, 0x37, 0x34, 0x38, 0x31, 0x40400006, 0x36, 0x36, 0x39, 0x34, 0x30, 0x404001e7, 0x30, 0x30, 0x30, 0x35, 0x36, 0x38, 0x31, 0x32, 0x37, 0x31, 0x34, 0x35, 0x32, 0x36, 0x33, 0x35, 0x36, 0x30, 0x38, 0x32, 0x37, 0x37, 0x38, 0x35, 0x37, 0x37, 0x31, 0x33, 0x34, 0x32, 0x37, 0x35, 0x37, 0x37, 0x38, 0x39, 0x36, 0x40400129, 0x33, 0x36, 0x33, 0x37, 0x31, 0x37, 0x38, 0x37, 0x32, 0x31, 0x34, 0x36, 0x38, 0x34, 0x34, 0x30, 0x39, 0x30, 0x31, 0x32, 0x32, 0x34, 0x39, 0x35, 0x33, 0x34, 0x33, 0x30, 0x31, 0x34, 0x36, 0x35, 0x34, 0x39, 0x35, 0x38, 0x35, 0x33, 0x37, 0x31, 0x30, 0x35, 0x30, 0x37, 0x39, 0x404000ca, 0x36, 0x40400153, 0x38, 0x39, 0x32, 0x33, 0x35, 0x34, 0x404001c9, 0x39, 0x35, 0x36, 0x31, 0x31, 0x32, 0x31, 0x32, 0x39, 0x30, 0x32, 0x31, 0x39, 0x36, 0x30, 0x38, 0x36, 0x34, 0x30, 0x33, 0x34, 0x34, 0x31, 0x38, 0x31, 0x35, 0x39, 0x38, 0x31, 0x33, 0x36, 0x32, 0x39, 0x37, 0x37, 0x34, 0x40400074, 0x30, 0x39, 0x39, 0x36, 0x30, 0x35, 0x31, 0x38, 0x37, 0x30, 0x37, 0x32, 0x31, 0x31, 0x33, 0x34, 0x39, 0x40800000, 0x38, 0x33, 0x37, 0x32, 0x39, 0x37, 0x38, 0x30, 0x34, 0x39, 0x39, 0x404002da, 0x39, 0x37, 0x33, 0x31, 0x37, 0x33, 0x32, 0x38, 0x4040018a, 0x36, 0x33, 0x31, 0x38, 0x35, 0x40400301, 0x404002e8, 0x34, 0x35, 0x35, 0x33, 0x34, 0x36, 0x39, 0x30, 0x38, 0x33, 0x30, 0x32, 0x36, 0x34, 0x32, 0x35, 0x32, 0x32, 0x33, 0x30, 0x404002e3, 0x40400267, 0x38, 0x35, 0x30, 0x33, 0x35, 0x32, 0x36, 0x31, 0x39, 0x33, 0x31, 0x31, 0x40400212, 0x31, 0x30, 0x31, 0x30, 0x30, 0x30, 0x33, 0x31, 0x33, 0x37, 0x38, 0x33, 0x38, 0x37, 0x35, 0x32, 0x38, 0x38, 0x36, 0x35, 0x38, 0x37, 0x35, 0x33, 0x33, 0x32, 0x30, 0x38, 0x33, 0x38, 0x31, 0x34, 0x32, 0x30, 0x36, 0x40400140, 0x4040012b, 0x31, 0x34, 0x37, 0x33, 0x30, 0x33, 0x35, 0x39, 0x4080032e, 0x39, 0x30, 0x34, 0x32, 0x38, 0x37, 0x35, 0x35, 0x34, 0x36, 0x38, 0x37, 0x33, 0x31, 0x31, 0x35, 0x39, 0x35, 0x40400355, 0x33, 0x38, 0x38, 0x32, 0x33, 0x35, 0x33, 0x37, 0x38, 0x37, 0x35, 0x4080037f, 0x39, 0x4040013a, 0x31, 0x40400148, 0x38, 0x30, 0x35, 0x33, 0x4040018a, 0x32, 0x32, 0x36, 0x38, 0x30, 0x36, 0x36, 0x31, 0x33, 0x30, 0x30, 0x31, 0x39, 0x32, 0x37, 0x38, 0x37, 0x36, 0x36, 0x31, 0x31, 0x31, 0x39, 0x35, 0x39, 0x40400237, 0x36, 0x40800124, 0x38, 0x39, 0x33, 0x38, 0x30, 0x39, 0x35, 0x32, 0x35, 0x37, 0x32, 0x30, 0x31, 0x30, 0x36, 0x35, 0x34, 0x38, 0x35, 0x38, 0x36, 0x33, 0x32, 0x37, 0x4040009a, 0x39, 0x33, 0x36, 0x31, 0x35, 0x33, 0x40400220, 0x4080015c, 0x32, 0x33, 0x30, 0x33, 0x30, 0x31, 0x39, 0x35, 0x32, 0x30, 0x33, 0x35, 0x33, 0x30, 0x31, 0x38, 0x35, 0x32, 0x40400171, 0x40400075, 0x33, 0x36, 0x32, 0x32, 0x35, 0x39, 0x39, 0x34, 0x31, 0x33, 0x40400254, 0x34, 0x39, 0x37, 0x32, 0x31, 0x37, 0x404000de, 0x33, 0x34, 0x37, 0x39, 0x31, 0x33, 0x31, 0x35, 0x31, 0x35, 0x35, 0x37, 0x34, 0x38, 0x35, 0x37, 0x32, 0x34, 0x32, 0x34, 0x35, 0x34, 0x31, 0x35, 0x30, 0x36, 0x39, 0x4040013f, 0x38, 0x32, 0x39, 0x35, 0x33, 0x33, 0x31, 0x31, 0x36, 0x38, 0x36, 0x31, 0x37, 0x32, 0x37, 0x38, 0x40400337, 0x39, 0x30, 0x37, 0x35, 0x30, 0x39, 0x4040010d, 0x37, 0x35, 0x34, 0x36, 0x33, 0x37, 0x34, 0x36, 0x34, 0x39, 0x33, 0x39, 0x33, 0x31, 0x39, 0x32, 0x35, 0x35, 0x30, 0x36, 0x30, 0x34, 0x30, 0x30, 0x39, 0x4040026b, 0x31, 0x36, 0x37, 0x31, 0x31, 0x33, 0x39, 0x30, 0x30, 0x39, 0x38, 0x40400335, 0x34, 0x30, 0x31, 0x32, 0x38, 0x35, 0x38, 0x33, 0x36, 0x31, 0x36, 0x30, 0x33, 0x35, 0x36, 0x33, 0x37, 0x30, 0x37, 0x36, 0x36, 0x30, 0x31, 0x30, 0x34, 0x40400172, 0x38, 0x31, 0x39, 0x34, 0x32, 0x39, 0x4080041e, 0x404000ef, 0x4040028b, 0x37, 0x38, 0x33, 0x37, 0x34, 0x404004a8, 0x38, 0x32, 0x35, 0x35, 0x33, 0x37, 0x40800209, 0x32, 0x36, 0x38, 0x4040002e, 0x34, 0x30, 0x34, 0x37, 0x404001d1, 0x34, 0x404004b5, 0x4040038d, 0x38, 0x34, 0x404003a8, 0x36, 0x40c0031f, 0x33, 0x33, 0x31, 0x33, 0x36, 0x37, 0x37, 0x30, 0x32, 0x38, 0x39, 0x38, 0x39, 0x31, 0x35, 0x32, 0x40400062, 0x35, 0x32, 0x31, 0x36, 0x32, 0x30, 0x35, 0x36, 0x39, 0x36, 0x40400411, 0x30, 0x35, 0x38, 0x40400477, 0x35, 0x40400498, 0x35, 0x31, 0x31, 0x40400209, 0x38, 0x32, 0x34, 0x33, 0x30, 0x30, 0x33, 0x35, 0x35, 0x38, 0x37, 0x36, 0x34, 0x30, 0x32, 0x34, 0x37, 0x34, 0x39, 0x36, 0x34, 0x37, 0x33, 0x32, 0x36, 0x33, 0x4040043e, 0x39, 0x39, 0x32, 0x4040044b, 0x34, 0x32, 0x36, 0x39, 0x40c002c5, 0x37, 0x404001d6, 0x34, 0x4040053d, 0x4040041d, 0x39, 0x33, 0x34, 0x31, 0x37, 0x404001ad, 0x31, 0x32, 0x4040002a, 0x34, 0x4040019e, 0x31, 0x35, 0x30, 0x33, 0x30, 0x32, 0x38, 0x36, 0x31, 0x38, 0x32, 0x39, 0x37, 0x34, 0x35, 0x35, 0x35, 0x37, 0x30, 0x36, 0x37, 0x34, 0x40400135, 0x35, 0x30, 0x35, 0x34, 0x39, 0x34, 0x35, 0x38, 0x404001c5, 0x39, 0x40400051, 0x35, 0x36, 0x404001ec, 0x37, 0x32, 0x31, 0x30, 0x37, 0x39, 0x40400159, 0x33, 0x30, 0x4040010a, 0x33, 0x32, 0x31, 0x31, 0x36, 0x35, 0x33, 0x34, 0x34, 0x39, 0x38, 0x37, 0x32, 0x30, 0x32, 0x37, 0x4040011b, 0x30, 0x32, 0x33, 0x36, 0x34, 0x4040022e, 0x35, 0x34, 0x39, 0x39, 0x31, 0x31, 0x39, 0x38, 0x40400418, 0x34, 0x4040011b, 0x35, 0x33, 0x35, 0x36, 0x36, 0x33, 0x36, 0x39, 0x40400450, 0x32, 0x36, 0x35, 0x404002e4, 0x37, 0x38, 0x36, 0x32, 0x35, 0x35, 0x31, 0x404003da, 0x31, 0x37, 0x35, 0x37, 0x34, 0x36, 0x37, 0x32, 0x38, 0x39, 0x30, 0x39, 0x37, 0x37, 0x37, 0x37, 0x40800453, 0x30, 0x30, 0x30, 0x404005fd, 0x37, 0x30, 0x404004df, 0x36, 0x404003e9, 0x34, 0x39, 0x31, 0x4040041e, 0x40400297, 0x32, 0x31, 0x34, 0x37, 0x37, 0x32, 0x33, 0x35, 0x30, 0x31, 0x34, 0x31, 0x34, 0x40400643, 0x33, 0x35, 0x36, 0x404004af, 0x31, 0x36, 0x31, 0x33, 0x36, 0x31, 0x31, 0x35, 0x37, 0x33, 0x35, 0x32, 0x35, 0x40400504, 0x33, 0x34, 0x4040005b, 0x31, 0x38, 0x4040047b, 0x38, 0x34, 0x404005e7, 0x33, 0x33, 0x32, 0x33, 0x39, 0x30, 0x37, 0x33, 0x39, 0x34, 0x31, 0x34, 0x33, 0x33, 0x33, 0x34, 0x35, 0x34, 0x37, 0x37, 0x36, 0x32, 0x34, 0x40400242, 0x32, 0x35, 0x31, 0x38, 0x39, 0x38, 0x33, 0x35, 0x36, 0x39, 0x34, 0x38, 0x35, 0x35, 0x36, 0x32, 0x30, 0x39, 0x39, 0x32, 0x31, 0x39, 0x32, 0x32, 0x32, 0x31, 0x38, 0x34, 0x32, 0x37, 0x4040023e, 0x32, 0x404000ba, 0x36, 0x38, 0x38, 0x37, 0x36, 0x37, 0x31, 0x37, 0x39, 0x30, 0x40400055, 0x30, 0x40800106, 0x36, 0x36, 0x404003e7, 0x38, 0x38, 0x36, 0x32, 0x37, 0x32, 0x404006dc, 0x31, 0x37, 0x38, 0x36, 0x30, 0x38, 0x35, 0x37, 0x40400073, 0x33, 0x408002fc, 0x37, 0x39, 0x37, 0x36, 0x36, 0x38, 0x31, 0x404002bd, 0x30, 0x30, 0x39, 0x35, 0x33, 0x38, 0x38, 0x40400638, 0x33, 0x404006a5, 0x30, 0x36, 0x38, 0x30, 0x30, 0x36, 0x34, 0x32, 0x32, 0x35, 0x31, 0x32, 0x35, 0x32, 0x4040057b, 0x37, 0x33, 0x39, 0x32, 0x40400297, 0x40400474, 0x34, 0x408006b3, 0x38, 0x36, 0x32, 0x36, 0x39, 0x34, 0x35, 0x404001e5, 0x34, 0x31, 0x39, 0x36, 0x35, 0x32, 0x38, 0x35, 0x30, 0x40400099, 0x4040039c, 0x31, 0x38, 0x36, 0x33, 0x404001be, 0x34, 0x40800154, 0x32, 0x30, 0x33, 0x39, 0x4040058b, 0x34, 0x35, 0x404002bc, 0x32, 0x33, 0x37, 0x4040042c, 0x36, 0x40400510, 0x35, 0x36, 0x40400638, 0x37, 0x31, 0x39, 0x31, 0x37, 0x32, 0x38, 0x40400171, 0x37, 0x36, 0x34, 0x36, 0x35, 0x37, 0x35, 0x37, 0x33, 0x39, 0x40400101, 0x33, 0x38, 0x39, 0x40400748, 0x38, 0x33, 0x32, 0x36, 0x34, 0x35, 0x39, 0x39, 0x35, 0x38, 0x404006a7, 0x30, 0x34, 0x37, 0x38, 0x404001de, 0x40400328, 0x39, 0x4040002d, 0x36, 0x34, 0x30, 0x37, 0x38, 0x39, 0x35, 0x31, 0x4040008e, 0x36, 0x38, 0x33, 0x4040012f, 0x32, 0x35, 0x39, 0x35, 0x37, 0x30, 0x40400468, 0x38, 0x32, 0x32, 0x404002c8, 0x32, 0x4040061b, 0x34, 0x30, 0x37, 0x37, 0x32, 0x36, 0x37, 0x31, 0x39, 0x34, 0x37, 0x38, 0x40400319, 0x38, 0x32, 0x36, 0x30, 0x31, 0x34, 0x37, 0x36, 0x39, 0x39, 0x30, 0x39, 0x404004e8, 0x30, 0x31, 0x33, 0x36, 0x33, 0x39, 0x34, 0x34, 0x33, 0x4040027f, 0x33, 0x30, 0x40400105, 0x32, 0x30, 0x33, 0x34, 0x39, 0x36, 0x32, 0x35, 0x32, 0x34, 0x35, 0x31, 0x37, 0x404003b5, 0x39, 0x36, 0x35, 0x31, 0x34, 0x33, 0x31, 0x34, 0x32, 0x39, 0x38, 0x30, 0x39, 0x31, 0x39, 0x30, 0x36, 0x35, 0x39, 0x32, 0x40400282, 0x37, 0x32, 0x32, 0x31, 0x36, 0x39, 0x36, 0x34, 0x36, 0x40400419, 0x4040007a, 0x35, 0x4040050e, 0x34, 0x40800565, 0x38, 0x40400559, 0x39, 0x37, 0x4040057b, 0x35, 0x34, 0x4040049d, 0x4040023e, 0x37, 0x4040065a, 0x38, 0x34, 0x36, 0x38, 0x31, 0x33, 0x4040008c, 0x36, 0x38, 0x33, 0x38, 0x36, 0x38, 0x39, 0x34, 0x32, 0x37, 0x37, 0x34, 0x31, 0x35, 0x35, 0x39, 0x39, 0x31, 0x38, 0x35, 0x4040005a, 0x32, 0x34, 0x35, 0x39, 0x35, 0x33, 0x39, 0x35, 0x39, 0x34, 0x33, 0x31, 0x404005b7, 0x37, 0x40400012, 0x36, 0x38, 0x30, 0x38, 0x34, 0x35, 0x404002e7, 0x37, 0x33, 0x4040081e, 0x39, 0x35, 0x38, 0x34, 0x38, 0x36, 0x35, 0x33, 0x38, 0x404006e8, 0x36, 0x32, 0x404000f2, 0x36, 0x30, 0x39, 0x404004b6, 0x36, 0x30, 0x38, 0x30, 0x35, 0x31, 0x32, 0x34, 0x33, 0x38, 0x38, 0x34, 0x4040013a, 0x4040000b, 0x34, 0x31, 0x33, 0x4040030f, 0x37, 0x36, 0x32, 0x37, 0x38, 0x40400341, 0x37, 0x31, 0x35, 0x4040059b, 0x33, 0x35, 0x39, 0x39, 0x37, 0x37, 0x30, 0x30, 0x31, 0x32, 0x39, 0x40400472, 0x38, 0x39, 0x34, 0x34, 0x31, 0x40400277, 0x36, 0x38, 0x35, 0x35, 0x4040005f, 0x34, 0x30, 0x36, 0x33, 0x404008e6, 0x32, 0x30, 0x37, 0x32, 0x32, 0x40400158, 0x40800203, 0x34, 0x38, 0x31, 0x35, 0x38, 0x40400205, 0x404001fe, 0x4040027a, 0x40400298, 0x33, 0x39, 0x34, 0x35, 0x32, 0x32, 0x36, 0x37, 0x40c00496, 0x38, 0x4040058a, 0x32, 0x31, 0x404002ea, 0x32, 0x40400387, 0x35, 0x34, 0x36, 0x36, 0x36, 0x4040051b, 0x32, 0x33, 0x39, 0x38, 0x36, 0x34, 0x35, 0x36, 0x404004c4, 0x31, 0x36, 0x33, 0x35, 0x40800253, 0x40400811, 0x37, 0x404008ad, 0x39, 0x38, 0x4040045e, 0x39, 0x33, 0x36, 0x33, 0x34, 0x4040075b, 0x37, 0x34, 0x33, 0x32, 0x34, 0x4040047b, 0x31, 0x35, 0x30, 0x37, 0x36, 0x404004bb, 0x37, 0x39, 0x34, 0x35, 0x31, 0x30, 0x39, 0x4040003e, 0x30, 0x39, 0x34, 0x30, 0x404006a6, 0x38, 0x38, 0x37, 0x39, 0x37, 0x31, 0x30, 0x38, 0x39, 0x33, 0x404008f0, 0x36, 0x39, 0x31, 0x33, 0x36, 0x38, 0x36, 0x37, 0x32, 0x4040025b, 0x404001fe, 0x35, 0x4040053f, 0x40400468, 0x40400801, 0x31, 0x37, 0x39, 0x32, 0x38, 0x36, 0x38, 0x404008cc, 0x38, 0x37, 0x34, 0x37, 0x4080079e, 0x38, 0x32, 0x34, 0x4040097a, 0x38, 0x4040025b, 0x37, 0x31, 0x34, 0x39, 0x30, 0x39, 0x36, 0x37, 0x35, 0x39, 0x38, 0x404006ef, 0x33, 0x36, 0x35, 0x40400134, 0x38, 0x31, 0x4040005c, 0x40400745, 0x40400936, 0x36, 0x38, 0x32, 0x39, 0x4040057e, 0x38, 0x37, 0x32, 0x32, 0x36, 0x35, 0x38, 0x38, 0x30, 0x40400611, 0x35, 0x40400249, 0x34, 0x32, 0x37, 0x30, 0x34, 0x37, 0x37, 0x35, 0x35, 0x4040081e, 0x33, 0x37, 0x39, 0x36, 0x34, 0x31, 0x34, 0x35, 0x31, 0x35, 0x32, 0x404005fd, 0x32, 0x33, 0x34, 0x33, 0x36, 0x34, 0x35, 0x34, 0x404005de, 0x34, 0x34, 0x34, 0x37, 0x39, 0x35, 0x4040003c, 0x40400523, 0x408008e6, 0x34, 0x31, 0x4040052a, 0x33, 0x40400304, 0x35, 0x32, 0x33, 0x31, 0x40800841, 0x31, 0x36, 0x36, 0x31, 0x404008b2, 0x35, 0x39, 0x36, 0x39, 0x35, 0x33, 0x36, 0x32, 0x33, 0x31, 0x34, 0x404005ff, 0x32, 0x34, 0x38, 0x34, 0x39, 0x33, 0x37, 0x31, 0x38, 0x37, 0x31, 0x31, 0x30, 0x31, 0x34, 0x35, 0x37, 0x36, 0x35, 0x34, 0x40400761, 0x30, 0x32, 0x37, 0x39, 0x39, 0x33, 0x34, 0x34, 0x30, 0x33, 0x37, 0x34, 0x32, 0x30, 0x30, 0x37, 0x4040093f, 0x37, 0x38, 0x35, 0x33, 0x39, 0x30, 0x36, 0x32, 0x31, 0x39, 0x40800299, 0x40400345, 0x38, 0x34, 0x37, 0x408003d2, 0x38, 0x33, 0x33, 0x32, 0x31, 0x34, 0x34, 0x35, 0x37, 0x31, 0x40400284, 0x40400776, 0x34, 0x33, 0x35, 0x30, 0x40400928, 0x40400468, 0x35, 0x33, 0x31, 0x39, 0x31, 0x30, 0x34, 0x38, 0x34, 0x38, 0x31, 0x30, 0x30, 0x35, 0x33, 0x37, 0x30, 0x36, 0x404008bc, 0x4080059d, 0x40800781, 0x31, 0x40400559, 0x37, 0x4040031b, 0x35, 0x404007ec, 0x4040040c, 0x36, 0x33, 0x408007dc, 0x34, 0x40400971, 0x4080034e, 0x408003f5, 0x38, 0x4080052d, 0x40800887, 0x39, 0x40400187, 0x39, 0x31, 0x404008ce, 0x38, 0x31, 0x34, 0x36, 0x37, 0x35, 0x31, 0x4040062b, 0x31, 0x32, 0x33, 0x39, 0x40c001a9, 0x39, 0x30, 0x37, 0x31, 0x38, 0x36, 0x34, 0x39, 0x34, 0x32, 0x33, 0x31, 0x39, 0x36, 0x31, 0x35, 0x36, 0x404001ec, 0x404006bc, 0x39, 0x35, 0x40400926, 0x40400469, 0x4040011b, 0x36, 0x30, 0x33, 0x38, 0x40400a25, 0x4040016f, 0x40400384, 0x36, 0x32, 0x4040045a, 0x35, 0x4040084c, 0x36, 0x33, 0x38, 0x39, 0x33, 0x37, 0x37, 0x38, 0x37, 0x404008c5, 0x404000f8, 0x39, 0x37, 0x39, 0x32, 0x30, 0x37, 0x37, 0x33, 0x404005d7, 0x32, 0x31, 0x38, 0x32, 0x35, 0x36, 0x404007df, 0x36, 0x36, 0x404006d6, 0x34, 0x32, 0x4080067e, 0x36, 0x404006e6, 0x34, 0x34, 0x40400024, 0x35, 0x34, 0x39, 0x32, 0x30, 0x32, 0x36, 0x30, 0x35, 0x40400ab3, 0x408003e4, 0x32, 0x30, 0x31, 0x34, 0x39, 0x404004d2, 0x38, 0x35, 0x30, 0x37, 0x33, 0x40400599, 0x36, 0x36, 0x36, 0x30, 0x40400194, 0x32, 0x34, 0x33, 0x34, 0x30, 0x40400087, 0x30, 0x4040076b, 0x38, 0x36, 0x33, 0x40400956, 0x404007e4, 0x4040042b, 0x40400174, 0x35, 0x37, 0x39, 0x36, 0x32, 0x36, 0x38, 0x35, 0x36, 0x40400140, 0x35, 0x30, 0x38, 0x40400523, 0x35, 0x38, 0x37, 0x39, 0x36, 0x39, 0x39, 0x40400711, 0x35, 0x37, 0x34, 0x40400a18, 0x38, 0x34, 0x30, 0x404008b3, 0x31, 0x34, 0x35, 0x39, 0x31, 0x4040078c, 0x37, 0x30, 0x40400234, 0x30, 0x31, 0x40400be7, 0x31, 0x32, 0x40400c74, 0x30, 0x404003c3, 0x33, 0x39, 0x40400b2a, 0x40400112, 0x37, 0x31, 0x35, 0x404003b0, 0x34, 0x32, 0x30, 0x40800bf2, 0x39, 0x40400bc2, 0x30, 0x37, 0x40400341, 0x40400795, 0x40400aaf, 0x40400c62, 0x32, 0x31, 0x40400960, 0x32, 0x35, 0x31, 0x4040057b, 0x40400944, 0x39, 0x32, 0x404001b2, 0x38, 0x32, 0x36, 0x40400b66, 0x32, 0x40400278, 0x33, 0x32, 0x31, 0x35, 0x37, 0x39, 0x31, 0x39, 0x38, 0x34, 0x31, 0x34, 0x4080087b, 0x39, 0x31, 0x36, 0x34, 0x408006e8, 0x39, 0x40800b58, 0x404008db, 0x37, 0x32, 0x32, 0x40400321, 0x35, 0x404008a4, 0x40400141, 0x39, 0x31, 0x30, 0x404000bc, 0x40400c5b, 0x35, 0x32, 0x38, 0x30, 0x31, 0x37, 0x40400231, 0x37, 0x31, 0x32, 0x40400914, 0x38, 0x33, 0x32, 0x40400373, 0x31, 0x40400589, 0x30, 0x39, 0x33, 0x35, 0x33, 0x39, 0x36, 0x35, 0x37, 0x4040064b, 0x31, 0x30, 0x38, 0x33, 0x40400069, 0x35, 0x31, 0x4040077a, 0x40400d5a, 0x31, 0x34, 0x34, 0x34, 0x32, 0x31, 0x30, 0x30, 0x40400202, 0x30, 0x33, 0x4040019c, 0x31, 0x31, 0x30, 0x33, 0x40400c81, 0x40400009, 0x40400026, 0x40c00602, 0x35, 0x31, 0x36, 0x404005d9, 0x40800883, 0x4040092a, 0x35, 0x40800c42, 0x38, 0x35, 0x31, 0x37, 0x31, 0x34, 0x33, 0x37, 0x40400605, 0x4040006d, 0x31, 0x35, 0x35, 0x36, 0x35, 0x30, 0x38, 0x38, 0x404003b9, 0x39, 0x38, 0x39, 0x38, 0x35, 0x39, 0x39, 0x38, 0x32, 0x33, 0x38, 0x404001cf, 0x404009ba, 0x33, 0x4040016c, 0x4040043e, 0x404009c3, 0x38, 0x40800e05, 0x33, 0x32, 0x40400107, 0x35, 0x40400305, 0x33, 0x404001ca, 0x39, 0x4040041b, 0x39, 0x38, 0x4040087d, 0x34, 0x40400cb8, 0x37, 0x4040064b, 0x30, 0x37, 0x404000e5, 0x34, 0x38, 0x31, 0x34, 0x31, 0x40400539, 0x38, 0x35, 0x39, 0x34, 0x36, 0x31, 0x40400bc9, 0x38, 0x30}, + }, + { + input: "testdata/huffman-rand-1k.in", + want: "testdata/huffman-rand-1k.%s.expect", + wantNoInput: "testdata/huffman-rand-1k.%s.expect-noinput", + tokens: []token{0xf8, 0x8b, 0x96, 0x76, 0x48, 0xd, 0x85, 0x94, 0x25, 0x80, 0xaf, 0xc2, 0xfe, 0x8d, 0xe8, 0x20, 0xeb, 0x17, 0x86, 0xc9, 0xb7, 0xc5, 0xde, 0x6, 0xea, 0x7d, 0x18, 0x8b, 0xe7, 0x3e, 0x7, 0xda, 0xdf, 0xff, 0x6c, 0x73, 0xde, 0xcc, 0xe7, 0x6d, 0x8d, 0x4, 0x19, 0x49, 0x7f, 0x47, 0x1f, 0x48, 0x15, 0xb0, 0xe8, 0x9e, 0xf2, 0x31, 0x59, 0xde, 0x34, 0xb4, 0x5b, 0xe5, 0xe0, 0x9, 0x11, 0x30, 0xc2, 0x88, 0x5b, 0x7c, 0x5d, 0x14, 0x13, 0x6f, 0x23, 0xa9, 0xd, 0xbc, 0x2d, 0x23, 0xbe, 0xd9, 0xed, 0x75, 0x4, 0x6c, 0x99, 0xdf, 0xfd, 0x70, 0x66, 0xe6, 0xee, 0xd9, 0xb1, 0x9e, 0x6e, 0x83, 0x59, 0xd5, 0xd4, 0x80, 0x59, 0x98, 0x77, 0x89, 0x43, 0x38, 0xc9, 0xaf, 0x30, 0x32, 0x9a, 0x20, 0x1b, 0x46, 0x3d, 0x67, 0x6e, 0xd7, 0x72, 0x9e, 0x4e, 0x21, 0x4f, 0xc6, 0xe0, 0xd4, 0x7b, 0x4, 0x8d, 0xa5, 0x3, 0xf6, 0x5, 0x9b, 0x6b, 0xdc, 0x2a, 0x93, 0x77, 0x28, 0xfd, 0xb4, 0x62, 0xda, 0x20, 0xe7, 0x1f, 0xab, 0x6b, 0x51, 0x43, 0x39, 0x2f, 0xa0, 0x92, 0x1, 0x6c, 0x75, 0x3e, 0xf4, 0x35, 0xfd, 0x43, 0x2e, 0xf7, 0xa4, 0x75, 0xda, 0xea, 0x9b, 0xa, 0x64, 0xb, 0xe0, 0x23, 0x29, 0xbd, 0xf7, 0xe7, 0x83, 0x3c, 0xfb, 0xdf, 0xb3, 0xae, 0x4f, 0xa4, 0x47, 0x55, 0x99, 0xde, 0x2f, 0x96, 0x6e, 0x1c, 0x43, 0x4c, 0x87, 0xe2, 0x7c, 0xd9, 0x5f, 0x4c, 0x7c, 0xe8, 0x90, 0x3, 0xdb, 0x30, 0x95, 0xd6, 0x22, 0xc, 0x47, 0xb8, 0x4d, 0x6b, 0xbd, 0x24, 0x11, 0xab, 0x2c, 0xd7, 0xbe, 0x6e, 0x7a, 0xd6, 0x8, 0xa3, 0x98, 0xd8, 0xdd, 0x15, 0x6a, 0xfa, 0x93, 0x30, 0x1, 0x25, 0x1d, 0xa2, 0x74, 0x86, 0x4b, 0x6a, 0x95, 0xe8, 0xe1, 0x4e, 0xe, 0x76, 0xb9, 0x49, 0xa9, 0x5f, 0xa0, 0xa6, 0x63, 0x3c, 0x7e, 0x7e, 0x20, 0x13, 0x4f, 0xbb, 0x66, 0x92, 0xb8, 0x2e, 0xa4, 0xfa, 0x48, 0xcb, 0xae, 0xb9, 0x3c, 0xaf, 0xd3, 0x1f, 0xe1, 0xd5, 0x8d, 0x42, 0x6d, 0xf0, 0xfc, 0x8c, 0xc, 0x0, 0xde, 0x40, 0xab, 0x8b, 0x47, 0x97, 0x4e, 0xa8, 0xcf, 0x8e, 0xdb, 0xa6, 0x8b, 0x20, 0x9, 0x84, 0x7a, 0x66, 0xe5, 0x98, 0x29, 0x2, 0x95, 0xe6, 0x38, 0x32, 0x60, 0x3, 0xe3, 0x9a, 0x1e, 0x54, 0xe8, 0x63, 0x80, 0x48, 0x9c, 0xe7, 0x63, 0x33, 0x6e, 0xa0, 0x65, 0x83, 0xfa, 0xc6, 0xba, 0x7a, 0x43, 0x71, 0x5, 0xf5, 0x68, 0x69, 0x85, 0x9c, 0xba, 0x45, 0xcd, 0x6b, 0xb, 0x19, 0xd1, 0xbb, 0x7f, 0x70, 0x85, 0x92, 0xd1, 0xb4, 0x64, 0x82, 0xb1, 0xe4, 0x62, 0xc5, 0x3c, 0x46, 0x1f, 0x92, 0x31, 0x1c, 0x4e, 0x41, 0x77, 0xf7, 0xe7, 0x87, 0xa2, 0xf, 0x6e, 0xe8, 0x92, 0x3, 0x6b, 0xa, 0xe7, 0xa9, 0x3b, 0x11, 0xda, 0x66, 0x8a, 0x29, 0xda, 0x79, 0xe1, 0x64, 0x8d, 0xe3, 0x54, 0xd4, 0xf5, 0xef, 0x64, 0x87, 0x3b, 0xf4, 0xc2, 0xf4, 0x71, 0x13, 0xa9, 0xe9, 0xe0, 0xa2, 0x6, 0x14, 0xab, 0x5d, 0xa7, 0x96, 0x0, 0xd6, 0xc3, 0xcc, 0x57, 0xed, 0x39, 0x6a, 0x25, 0xcd, 0x76, 0xea, 0xba, 0x3a, 0xf2, 0xa1, 0x95, 0x5d, 0xe5, 0x71, 0xcf, 0x9c, 0x62, 0x9e, 0x6a, 0xfa, 0xd5, 0x31, 0xd1, 0xa8, 0x66, 0x30, 0x33, 0xaa, 0x51, 0x17, 0x13, 0x82, 0x99, 0xc8, 0x14, 0x60, 0x9f, 0x4d, 0x32, 0x6d, 0xda, 0x19, 0x26, 0x21, 0xdc, 0x7e, 0x2e, 0x25, 0x67, 0x72, 0xca, 0xf, 0x92, 0xcd, 0xf6, 0xd6, 0xcb, 0x97, 0x8a, 0x33, 0x58, 0x73, 0x70, 0x91, 0x1d, 0xbf, 0x28, 0x23, 0xa3, 0xc, 0xf1, 0x83, 0xc3, 0xc8, 0x56, 0x77, 0x68, 0xe3, 0x82, 0xba, 0xb9, 0x57, 0x56, 0x57, 0x9c, 0xc3, 0xd6, 0x14, 0x5, 0x3c, 0xb1, 0xaf, 0x93, 0xc8, 0x8a, 0x57, 0x7f, 0x53, 0xfa, 0x2f, 0xaa, 0x6e, 0x66, 0x83, 0xfa, 0x33, 0xd1, 0x21, 0xab, 0x1b, 0x71, 0xb4, 0x7c, 0xda, 0xfd, 0xfb, 0x7f, 0x20, 0xab, 0x5e, 0xd5, 0xca, 0xfd, 0xdd, 0xe0, 0xee, 0xda, 0xba, 0xa8, 0x27, 0x99, 0x97, 0x69, 0xc1, 0x3c, 0x82, 0x8c, 0xa, 0x5c, 0x2d, 0x5b, 0x88, 0x3e, 0x34, 0x35, 0x86, 0x37, 0x46, 0x79, 0xe1, 0xaa, 0x19, 0xfb, 0xaa, 0xde, 0x15, 0x9, 0xd, 0x1a, 0x57, 0xff, 0xb5, 0xf, 0xf3, 0x2b, 0x5a, 0x6a, 0x4d, 0x19, 0x77, 0x71, 0x45, 0xdf, 0x4f, 0xb3, 0xec, 0xf1, 0xeb, 0x18, 0x53, 0x3e, 0x3b, 0x47, 0x8, 0x9a, 0x73, 0xa0, 0x5c, 0x8c, 0x5f, 0xeb, 0xf, 0x3a, 0xc2, 0x43, 0x67, 0xb4, 0x66, 0x67, 0x80, 0x58, 0xe, 0xc1, 0xec, 0x40, 0xd4, 0x22, 0x94, 0xca, 0xf9, 0xe8, 0x92, 0xe4, 0x69, 0x38, 0xbe, 0x67, 0x64, 0xca, 0x50, 0xc7, 0x6, 0x67, 0x42, 0x6e, 0xa3, 0xf0, 0xb7, 0x6c, 0xf2, 0xe8, 0x5f, 0xb1, 0xaf, 0xe7, 0xdb, 0xbb, 0x77, 0xb5, 0xf8, 0xcb, 0x8, 0xc4, 0x75, 0x7e, 0xc0, 0xf9, 0x1c, 0x7f, 0x3c, 0x89, 0x2f, 0xd2, 0x58, 0x3a, 0xe2, 0xf8, 0x91, 0xb6, 0x7b, 0x24, 0x27, 0xe9, 0xae, 0x84, 0x8b, 0xde, 0x74, 0xac, 0xfd, 0xd9, 0xb7, 0x69, 0x2a, 0xec, 0x32, 0x6f, 0xf0, 0x92, 0x84, 0xf1, 0x40, 0xc, 0x8a, 0xbc, 0x39, 0x6e, 0x2e, 0x73, 0xd4, 0x6e, 0x8a, 0x74, 0x2a, 0xdc, 0x60, 0x1f, 0xa3, 0x7, 0xde, 0x75, 0x8b, 0x74, 0xc8, 0xfe, 0x63, 0x75, 0xf6, 0x3d, 0x63, 0xac, 0x33, 0x89, 0xc3, 0xf0, 0xf8, 0x2d, 0x6b, 0xb4, 0x9e, 0x74, 0x8b, 0x5c, 0x33, 0xb4, 0xca, 0xa8, 0xe4, 0x99, 0xb6, 0x90, 0xa1, 0xef, 0xf, 0xd3, 0x61, 0xb2, 0xc6, 0x1a, 0x94, 0x7c, 0x44, 0x55, 0xf4, 0x45, 0xff, 0x9e, 0xa5, 0x5a, 0xc6, 0xa0, 0xe8, 0x2a, 0xc1, 0x8d, 0x6f, 0x34, 0x11, 0xb9, 0xbe, 0x4e, 0xd9, 0x87, 0x97, 0x73, 0xcf, 0x3d, 0x23, 0xae, 0xd5, 0x1a, 0x5e, 0xae, 0x5d, 0x6a, 0x3, 0xf9, 0x22, 0xd, 0x10, 0xd9, 0x47, 0x69, 0x15, 0x3f, 0xee, 0x52, 0xa3, 0x8, 0xd2, 0x3c, 0x51, 0xf4, 0xf8, 0x9d, 0xe4, 0x98, 0x89, 0xc8, 0x67, 0x39, 0xd5, 0x5e, 0x35, 0x78, 0x27, 0xe8, 0x3c, 0x80, 0xae, 0x79, 0x71, 0xd2, 0x93, 0xf4, 0xaa, 0x51, 0x12, 0x1c, 0x4b, 0x1b, 0xe5, 0x6e, 0x15, 0x6f, 0xe4, 0xbb, 0x51, 0x9b, 0x45, 0x9f, 0xf9, 0xc4, 0x8c, 0x2a, 0xfb, 0x1a, 0xdf, 0x55, 0xd3, 0x48, 0x93, 0x27, 0x1, 0x26, 0xc2, 0x6b, 0x55, 0x6d, 0xa2, 0xfb, 0x84, 0x8b, 0xc9, 0x9e, 0x28, 0xc2, 0xef, 0x1a, 0x24, 0xec, 0x9b, 0xae, 0xbd, 0x60, 0xe9, 0x15, 0x35, 0xee, 0x42, 0xa4, 0x33, 0x5b, 0xfa, 0xf, 0xb6, 0xf7, 0x1, 0xa6, 0x2, 0x4c, 0xca, 0x90, 0x58, 0x3a, 0x96, 0x41, 0xe7, 0xcb, 0x9, 0x8c, 0xdb, 0x85, 0x4d, 0xa8, 0x89, 0xf3, 0xb5, 0x8e, 0xfd, 0x75, 0x5b, 0x4f, 0xed, 0xde, 0x3f, 0xeb, 0x38, 0xa3, 0xbe, 0xb0, 0x73, 0xfc, 0xb8, 0x54, 0xf7, 0x4c, 0x30, 0x67, 0x2e, 0x38, 0xa2, 0x54, 0x18, 0xba, 0x8, 0xbf, 0xf2, 0x39, 0xd5, 0xfe, 0xa5, 0x41, 0xc6, 0x66, 0x66, 0xba, 0x81, 0xef, 0x67, 0xe4, 0xe6, 0x3c, 0xc, 0xca, 0xa4, 0xa, 0x79, 0xb3, 0x57, 0x8b, 0x8a, 0x75, 0x98, 0x18, 0x42, 0x2f, 0x29, 0xa3, 0x82, 0xef, 0x9f, 0x86, 0x6, 0x23, 0xe1, 0x75, 0xfa, 0x8, 0xb1, 0xde, 0x17, 0x4a}, + }, + { + input: "testdata/huffman-rand-limit.in", + want: "testdata/huffman-rand-limit.%s.expect", + wantNoInput: "testdata/huffman-rand-limit.%s.expect-noinput", + tokens: []token{0x61, 0x51c00000, 0xa, 0xf8, 0x8b, 0x96, 0x76, 0x48, 0xa, 0x85, 0x94, 0x25, 0x80, 0xaf, 0xc2, 0xfe, 0x8d, 0xe8, 0x20, 0xeb, 0x17, 0x86, 0xc9, 0xb7, 0xc5, 0xde, 0x6, 0xea, 0x7d, 0x18, 0x8b, 0xe7, 0x3e, 0x7, 0xda, 0xdf, 0xff, 0x6c, 0x73, 0xde, 0xcc, 0xe7, 0x6d, 0x8d, 0x4, 0x19, 0x49, 0x7f, 0x47, 0x1f, 0x48, 0x15, 0xb0, 0xe8, 0x9e, 0xf2, 0x31, 0x59, 0xde, 0x34, 0xb4, 0x5b, 0xe5, 0xe0, 0x9, 0x11, 0x30, 0xc2, 0x88, 0x5b, 0x7c, 0x5d, 0x14, 0x13, 0x6f, 0x23, 0xa9, 0xa, 0xbc, 0x2d, 0x23, 0xbe, 0xd9, 0xed, 0x75, 0x4, 0x6c, 0x99, 0xdf, 0xfd, 0x70, 0x66, 0xe6, 0xee, 0xd9, 0xb1, 0x9e, 0x6e, 0x83, 0x59, 0xd5, 0xd4, 0x80, 0x59, 0x98, 0x77, 0x89, 0x43, 0x38, 0xc9, 0xaf, 0x30, 0x32, 0x9a, 0x20, 0x1b, 0x46, 0x3d, 0x67, 0x6e, 0xd7, 0x72, 0x9e, 0x4e, 0x21, 0x4f, 0xc6, 0xe0, 0xd4, 0x7b, 0x4, 0x8d, 0xa5, 0x3, 0xf6, 0x5, 0x9b, 0x6b, 0xdc, 0x2a, 0x93, 0x77, 0x28, 0xfd, 0xb4, 0x62, 0xda, 0x20, 0xe7, 0x1f, 0xab, 0x6b, 0x51, 0x43, 0x39, 0x2f, 0xa0, 0x92, 0x1, 0x6c, 0x75, 0x3e, 0xf4, 0x35, 0xfd, 0x43, 0x2e, 0xf7, 0xa4, 0x75, 0xda, 0xea, 0x9b, 0xa}, + }, + { + input: "testdata/huffman-shifts.in", + want: "testdata/huffman-shifts.%s.expect", + wantNoInput: "testdata/huffman-shifts.%s.expect-noinput", + tokens: []token{0x31, 0x30, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x52400001, 0xd, 0xa, 0x32, 0x33, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7f400001}, + }, + { + input: "testdata/huffman-text-shift.in", + want: "testdata/huffman-text-shift.%s.expect", + wantNoInput: "testdata/huffman-text-shift.%s.expect-noinput", + tokens: []token{0x2f, 0x2f, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x32, 0x30, 0x30, 0x39, 0x54, 0x68, 0x47, 0x6f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x2e, 0x41, 0x6c, 0x6c, 0x40800016, 0x72, 0x72, 0x76, 0x64, 0x2e, 0xd, 0xa, 0x2f, 0x2f, 0x55, 0x6f, 0x66, 0x74, 0x68, 0x69, 0x6f, 0x75, 0x72, 0x63, 0x63, 0x6f, 0x64, 0x69, 0x67, 0x6f, 0x76, 0x72, 0x6e, 0x64, 0x62, 0x79, 0x42, 0x53, 0x44, 0x2d, 0x74, 0x79, 0x6c, 0x40400020, 0x6c, 0x69, 0x63, 0x6e, 0x74, 0x68, 0x74, 0x63, 0x6e, 0x62, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x69, 0x6e, 0x74, 0x68, 0x4c, 0x49, 0x43, 0x45, 0x4e, 0x53, 0x45, 0x66, 0x69, 0x6c, 0x2e, 0xd, 0xa, 0xd, 0xa, 0x70, 0x63, 0x6b, 0x67, 0x6d, 0x69, 0x6e, 0x4040000a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x6f, 0x22, 0x4040000c, 0x66, 0x75, 0x6e, 0x63, 0x6d, 0x69, 0x6e, 0x28, 0x29, 0x7b, 0xd, 0xa, 0x9, 0x76, 0x72, 0x62, 0x3d, 0x6d, 0x6b, 0x28, 0x5b, 0x5d, 0x62, 0x79, 0x74, 0x2c, 0x36, 0x35, 0x35, 0x33, 0x35, 0x29, 0xd, 0xa, 0x9, 0x66, 0x2c, 0x5f, 0x3a, 0x3d, 0x6f, 0x2e, 0x43, 0x72, 0x74, 0x28, 0x22, 0x68, 0x75, 0x66, 0x66, 0x6d, 0x6e, 0x2d, 0x6e, 0x75, 0x6c, 0x6c, 0x2d, 0x6d, 0x78, 0x2e, 0x69, 0x6e, 0x22, 0x40800021, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x28, 0x62, 0x29, 0xd, 0xa, 0x7d, 0xd, 0xa, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x58, 0x78, 0x79, 0x7a, 0x21, 0x22, 0x23, 0xc2, 0xa4, 0x25, 0x26, 0x2f, 0x3f, 0x22}, + }, + { + input: "testdata/huffman-text.in", + want: "testdata/huffman-text.%s.expect", + wantNoInput: "testdata/huffman-text.%s.expect-noinput", + tokens: []token{0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0x32, 0x30, 0x30, 0x39, 0x20, 0x54, 0x68, 0x65, 0x20, 0x47, 0x6f, 0x20, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x73, 0x2e, 0x20, 0x41, 0x6c, 0x6c, 0x20, 0x4080001e, 0x73, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x2e, 0xd, 0xa, 0x2f, 0x2f, 0x20, 0x55, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x63, 0x6f, 0x64, 0x65, 0x20, 0x69, 0x73, 0x20, 0x67, 0x6f, 0x76, 0x65, 0x72, 0x6e, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x61, 0x20, 0x42, 0x53, 0x44, 0x2d, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x40800036, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x4c, 0x49, 0x43, 0x45, 0x4e, 0x53, 0x45, 0x20, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0xd, 0xa, 0xd, 0xa, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x20, 0x6d, 0x61, 0x69, 0x6e, 0x4040000f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x22, 0x6f, 0x73, 0x22, 0x4040000e, 0x66, 0x75, 0x6e, 0x63, 0x4080001b, 0x28, 0x29, 0x20, 0x7b, 0xd, 0xa, 0x9, 0x76, 0x61, 0x72, 0x20, 0x62, 0x20, 0x3d, 0x20, 0x6d, 0x61, 0x6b, 0x65, 0x28, 0x5b, 0x5d, 0x62, 0x79, 0x74, 0x65, 0x2c, 0x20, 0x36, 0x35, 0x35, 0x33, 0x35, 0x29, 0xd, 0xa, 0x9, 0x66, 0x2c, 0x20, 0x5f, 0x20, 0x3a, 0x3d, 0x20, 0x6f, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x28, 0x22, 0x68, 0x75, 0x66, 0x66, 0x6d, 0x61, 0x6e, 0x2d, 0x6e, 0x75, 0x6c, 0x6c, 0x2d, 0x6d, 0x61, 0x78, 0x2e, 0x69, 0x6e, 0x22, 0x4080002a, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x28, 0x62, 0x29, 0xd, 0xa, 0x7d, 0xd, 0xa}, + }, + { + input: "testdata/huffman-zero.in", + want: "testdata/huffman-zero.%s.expect", + wantNoInput: "testdata/huffman-zero.%s.expect-noinput", + tokens: []token{0x30, ml, 0x4b800000}, + }, + { + input: "", + want: "", + wantNoInput: "testdata/null-long-match.%s.expect-noinput", + tokens: []token{0x0, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, 0x41400000}, + }, +} + +// TestWriteBlock tests if the writeBlock encoding has changed. +// To update the reference files use the "-update" flag on the test. +func TestWriteBlock(t *testing.T) { + for _, test := range writeBlockTests { + testBlock(t, test, "wb") + } +} + +// TestWriteBlockDynamic tests if the writeBlockDynamic encoding has changed. +// To update the reference files use the "-update" flag on the test. +func TestWriteBlockDynamic(t *testing.T) { + for _, test := range writeBlockTests { + testBlock(t, test, "dyn") + } +} + +// testBlock tests a block against its references, +// or regenerate the references, if "-update" flag is set. +func testBlock(t *testing.T, test huffTest, ttype string) { + if test.want != "" { + test.want = fmt.Sprintf(test.want, ttype) + } + test.wantNoInput = fmt.Sprintf(test.wantNoInput, ttype) + if *update { + if test.input != "" { + t.Logf("Updating %q", test.want) + input, err := ioutil.ReadFile(test.input) + if err != nil { + t.Error(err) + return + } + + f, err := os.Create(test.want) + if err != nil { + t.Error(err) + return + } + defer f.Close() + bw := newHuffmanBitWriter(f) + writeToType(t, ttype, bw, test.tokens, input) + } + + t.Logf("Updating %q", test.wantNoInput) + f, err := os.Create(test.wantNoInput) + if err != nil { + t.Error(err) + return + } + defer f.Close() + bw := newHuffmanBitWriter(f) + writeToType(t, ttype, bw, test.tokens, nil) + return + } + + if test.input != "" { + t.Logf("Testing %q", test.want) + input, err := ioutil.ReadFile(test.input) + if err != nil { + t.Error(err) + return + } + want, err := ioutil.ReadFile(test.want) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + writeToType(t, ttype, bw, test.tokens, input) + + got := buf.Bytes() + if !bytes.Equal(got, want) { + t.Errorf("writeBlock did not yield expected result for file %q with input. See %q", test.want, test.want+".got") + if err := ioutil.WriteFile(test.want+".got", got, 0666); err != nil { + t.Error(err) + } + } + t.Log("Output ok") + + // Test if the writer produces the same output after reset. + buf.Reset() + bw.reset(&buf) + writeToType(t, ttype, bw, test.tokens, input) + bw.flush() + got = buf.Bytes() + if !bytes.Equal(got, want) { + t.Errorf("reset: writeBlock did not yield expected result for file %q with input. See %q", test.want, test.want+".reset.got") + if err := ioutil.WriteFile(test.want+".reset.got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Reset ok") + testWriterEOF(t, "wb", test, true) + } + t.Logf("Testing %q", test.wantNoInput) + wantNI, err := ioutil.ReadFile(test.wantNoInput) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + writeToType(t, ttype, bw, test.tokens, nil) + + got := buf.Bytes() + if !bytes.Equal(got, wantNI) { + t.Errorf("writeBlock did not yield expected result for file %q with input. See %q", test.wantNoInput, test.wantNoInput+".got") + if err := ioutil.WriteFile(test.want+".got", got, 0666); err != nil { + t.Error(err) + } + } else if got[0]&1 == 1 { + t.Error("got unexpected EOF") + return + } + + t.Log("Output ok") + + // Test if the writer produces the same output after reset. + buf.Reset() + bw.reset(&buf) + writeToType(t, ttype, bw, test.tokens, nil) + bw.flush() + got = buf.Bytes() + if !bytes.Equal(got, wantNI) { + t.Errorf("reset: writeBlock did not yield expected result for file %q without input. See %q", test.want, test.want+".reset.got") + if err := ioutil.WriteFile(test.want+".reset.got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Reset ok") + testWriterEOF(t, "wb", test, false) +} + +func writeToType(t *testing.T, ttype string, bw *huffmanBitWriter, tok []token, input []byte) { + switch ttype { + case "wb": + bw.writeBlock(tok, false, input) + case "dyn": + bw.writeBlockDynamic(tok, false, input) + default: + panic("unknown test type") + } + + if bw.err != nil { + t.Error(bw.err) + return + } + + bw.flush() + if bw.err != nil { + t.Error(bw.err) + return + } +} + +// testWriterEOF tests if the written block contains an EOF marker. +func testWriterEOF(t *testing.T, ttype string, test huffTest, useInput bool) { + if useInput && test.input == "" { + return + } + var input []byte + if useInput { + var err error + input, err = ioutil.ReadFile(test.input) + if err != nil { + t.Error(err) + return + } + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + switch ttype { + case "wb": + bw.writeBlock(test.tokens, true, input) + case "dyn": + bw.writeBlockDynamic(test.tokens, true, input) + case "huff": + bw.writeBlockHuff(true, input) + default: + panic("unknown test type") + } + if bw.err != nil { + t.Error(bw.err) + return + } + + bw.flush() + if bw.err != nil { + t.Error(bw.err) + return + } + b := buf.Bytes() + if len(b) == 0 { + t.Error("no output received") + return + } + if b[0]&1 != 1 { + t.Errorf("block not marked with EOF for input %q", test.input) + return + } + t.Log("EOF ok") +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go new file mode 100644 index 0000000..bdcbd82 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -0,0 +1,344 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "sort" +) + +// hcode is a huffman code with a bit code and bit length. +type hcode struct { + code, len uint16 +} + +type huffmanEncoder struct { + codes []hcode + freqcache []literalNode + bitCount [17]int32 + lns byLiteral // stored to avoid repeated allocation in generate + lfs byFreq // stored to avoid repeated allocation in generate +} + +type literalNode struct { + literal uint16 + freq int32 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +// set sets the code and length of an hcode. +func (h *hcode) set(code uint16, length uint16) { + h.len = length + h.code = code +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + return &huffmanEncoder{codes: make([]hcode, size)} +} + +// Generates a HuffmanCode corresponding to the fixed literal table +func generateFixedLiteralEncoding() *huffmanEncoder { + h := newHuffmanEncoder(maxNumLit) + codes := h.codes + var ch uint16 + for ch = 0; ch < maxNumLit; ch++ { + var bits uint16 + var size uint16 + switch { + case ch < 144: + // size 8, 000110000 .. 10111111 + bits = ch + 48 + size = 8 + break + case ch < 256: + // size 9, 110010000 .. 111111111 + bits = ch + 400 - 144 + size = 9 + break + case ch < 280: + // size 7, 0000000 .. 0010111 + bits = ch - 256 + size = 7 + break + default: + // size 8, 11000000 .. 11000111 + bits = ch + 192 - 280 + size = 8 + } + codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + } + return h +} + +func generateFixedOffsetEncoding() *huffmanEncoder { + h := newHuffmanEncoder(30) + codes := h.codes + for ch := range codes { + codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5} + } + return h +} + +var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() +var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() + +func (h *huffmanEncoder) bitLength(freq []int32) int { + var total int + for i, f := range freq { + if f != 0 { + total += int(f) * int(h.codes[i].len) + } + } + return total +} + +const maxBitsLimit = 16 + +// Return the number of literals assigned to each bit size in the Huffman encoding +// +// This method is only called when list.length >= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// maxBits The maximum number of bits that should be used to encode any literal. +// Must be less than 16. +// return An integer array in which array[i] indicates the number of literals +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: list[1].freq, + nextCharFreq: list[2].freq, + nextPairFreq: list[0].freq + list[1].freq, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := maxBits + for { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + l.nextCharFreq = list[n].freq + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + h.lns.sort(chunk) + for _, node := range chunk { + h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []int32, maxBits int32) { + if h.freqcache == nil { + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit. + // The largest of these is maxNumLit, so we allocate for that case. + h.freqcache = make([]literalNode, maxNumLit+1) + } + list := h.freqcache[:len(freq)+1] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + list[count] = literalNode{} + h.codes[i].len = 0 + } + } + list[len(freq)] = literalNode{} + + list = list[:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + } + return + } + h.lfs.sort(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +type byLiteral []literalNode + +func (s *byLiteral) sort(a []literalNode) { + *s = byLiteral(a) + sort.Sort(s) +} + +func (s byLiteral) Len() int { return len(s) } + +func (s byLiteral) Less(i, j int) bool { + return s[i].literal < s[j].literal +} + +func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type byFreq []literalNode + +func (s *byFreq) sort(a []literalNode) { + *s = byFreq(a) + sort.Sort(s) +} + +func (s byFreq) Len() int { return len(s) } + +func (s byFreq) Less(i, j int) bool { + if s[i].freq == s[j].freq { + return s[i].literal < s[j].literal + } + return s[i].freq < s[j].freq +} + +func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go new file mode 100644 index 0000000..53b63d9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -0,0 +1,846 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "io" + "strconv" + "sync" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code +) + +// Initialize the fixedHuffmanDecoder only once upon first use. +var fixedOnce sync.Once +var fixedHuffmanDecoder huffmanDecoder + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError int64 + +func (e CorruptInputError) Error() string { + return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10) +} + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +// +// Deprecated: No longer returned. +type ReadError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Read +} + +func (e *ReadError) Error() string { + return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// A WriteError reports an error encountered while writing output. +// +// Deprecated: No longer returned. +type WriteError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Write +} + +func (e *WriteError) Error() string { + return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. +// +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. +// +// See the following: +// http://www.gzip.org/algorithm.txt + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks [huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(bits []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.min != 0 { + *h = huffmanDecoder{} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range bits { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i] = code + code += count[i] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + h.links = make([][]uint32, huffmanNumChunks-link) + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 + reverse >>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// The actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + + // Input bits, in top of b. + b uint32 + nb uint + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + dict dictDecoder + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Next step in the decompression, + // and decompression state. + step func(*decompressor) + stepState int + final bool + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int +} + +func (f *decompressor) nextBlock() { + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlock() + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlock() + default: + // 3 is reserved. + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } + return n, nil + } + if f.err != nil { + return 0, f.err + } + f.step(f) + if f.err != nil && len(f.toRead) == 0 { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + } + } +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + flushed := false + for { + if len(f.toRead) > 0 { + n, err := w.Write(f.toRead) + total += int64(n) + if err != nil { + f.err = err + return total, err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + } + if f.err != nil && flushed { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if f.err == nil { + f.step(f) + } + if len(f.toRead) == 0 && f.err != nil && !flushed { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + flushed = true + } + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + return err + } + } + rep += int(f.b & uint32(1<>= nb + f.nb -= nb + if i+rep > n { + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + return CorruptInputError(f.roffset) + } + + // As an optimization, we can initialize the min bits to read at a time + // for the HLIT tree to the length of the EOB marker since we know that + // every block must terminate with one. This preserves the property that + // we never read any extra bytes after the end of the DEFLATE stream. + if f.h1.min < f.bits[endBlockMarker] { + f.h1.min = f.bits[endBlockMarker] + } + + return nil +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBlock() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + v, err := f.huffSym(f.hl) + if err != nil { + f.err = err + return + } + var n uint // number of bits extra + var length int + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBlock + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + length += int(f.b & uint32(1<>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + dist = int(reverseByte[(f.b&0x1F)<<3]) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + extra |= int(f.b & uint32(1<>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBlock // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + f.nb = 0 + f.b = 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[0:4]) + f.roffset += int64(nr) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + f.err = err + return + } + n := int(f.buf[0]) | int(f.buf[1])<<8 + nn := int(f.buf[2]) | int(f.buf[3])<<8 + if uint16(nn) != uint16(^n) { + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + f.toRead = f.dict.readFlush() + f.finishBlock() + return + } + + f.copyLen = n + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + buf := f.dict.writeSlice() + if len(buf) > f.copyLen { + buf = buf[:f.copyLen] + } + + cnt, err := io.ReadFull(f.r, buf) + f.roffset += int64(cnt) + f.copyLen -= cnt + f.dict.writeMark(cnt) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + f.err = err + return + } + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).copyData + return + } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } + f.step = (*decompressor).nextBlock +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.min) + for { + for f.nb < n { + if err := f.moreBits(); err != nil { + return 0, err + } + } + chunk := h.chunks[f.b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(f.b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= f.nb { + if n == 0 { + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b >>= n + f.nb -= n + return int(chunk >> huffmanValueShift), nil + } + } +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func fixedHuffmanDecoderInit() { + fixedOnce.Do(func() { + // These come from the RFC section 3.2.6. + var bits [288]int + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + fixedHuffmanDecoder.init(bits[:]) + }) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + dict: f.dict, + step: (*decompressor).nextBlock, + } + f.dict.init(maxMatchOffset, dict) + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, nil) + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, dict) + return &f +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_test.go b/vendor/github.com/klauspost/compress/flate/inflate_test.go new file mode 100644 index 0000000..8402c0c --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate_test.go @@ -0,0 +1,282 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "crypto/rand" + "io" + "io/ioutil" + "strconv" + "strings" + "testing" +) + +func TestReset(t *testing.T) { + ss := []string{ + "lorem ipsum izzle fo rizzle", + "the quick brown fox jumped over", + } + + deflated := make([]bytes.Buffer, 2) + for i, s := range ss { + w, _ := NewWriter(&deflated[i], 1) + w.Write([]byte(s)) + w.Close() + } + + inflated := make([]bytes.Buffer, 2) + + f := NewReader(&deflated[0]) + io.Copy(&inflated[0], f) + f.(Resetter).Reset(&deflated[1], nil) + io.Copy(&inflated[1], f) + f.Close() + + for i, s := range ss { + if s != inflated[i].String() { + t.Errorf("inflated[%d]:\ngot %q\nwant %q", i, inflated[i], s) + } + } +} + +func TestReaderTruncated(t *testing.T) { + vectors := []struct{ input, output string }{ + {"\x00", ""}, + {"\x00\f", ""}, + {"\x00\f\x00", ""}, + {"\x00\f\x00\xf3\xff", ""}, + {"\x00\f\x00\xf3\xffhello", "hello"}, + {"\x00\f\x00\xf3\xffhello, world", "hello, world"}, + {"\x02", ""}, + {"\xf2H\xcd", "He"}, + {"\xf2H͙0a\u0084\t", "Hel\x90\x90\x90\x90\x90"}, + {"\xf2H͙0a\u0084\t\x00", "Hel\x90\x90\x90\x90\x90"}, + } + + for i, v := range vectors { + r := strings.NewReader(v.input) + zr := NewReader(r) + b, err := ioutil.ReadAll(zr) + if err != io.ErrUnexpectedEOF { + t.Errorf("test %d, error mismatch: got %v, want io.ErrUnexpectedEOF", i, err) + } + if string(b) != v.output { + t.Errorf("test %d, output mismatch: got %q, want %q", i, b, v.output) + } + } +} + +func TestResetDict(t *testing.T) { + dict := []byte("the lorem fox") + ss := []string{ + "lorem ipsum izzle fo rizzle", + "the quick brown fox jumped over", + } + + deflated := make([]bytes.Buffer, len(ss)) + for i, s := range ss { + w, _ := NewWriterDict(&deflated[i], DefaultCompression, dict) + w.Write([]byte(s)) + w.Close() + } + + inflated := make([]bytes.Buffer, len(ss)) + + f := NewReader(nil) + for i := range inflated { + f.(Resetter).Reset(&deflated[i], dict) + io.Copy(&inflated[i], f) + } + f.Close() + + for i, s := range ss { + if s != inflated[i].String() { + t.Errorf("inflated[%d]:\ngot %q\nwant %q", i, inflated[i], s) + } + } +} + +// Tests ported from zlib/test/infcover.c +type infTest struct { + hex string + id string + n int +} + +var infTests = []infTest{ + {"0 0 0 0 0", "invalid stored block lengths", 1}, + {"3 0", "fixed", 0}, + {"6", "invalid block type", 1}, + {"1 1 0 fe ff 0", "stored", 0}, + {"fc 0 0", "too many length or distance symbols", 1}, + {"4 0 fe ff", "invalid code lengths set", 1}, + {"4 0 24 49 0", "invalid bit length repeat", 1}, + {"4 0 24 e9 ff ff", "invalid bit length repeat", 1}, + {"4 0 24 e9 ff 6d", "invalid code -- missing end-of-block", 1}, + {"4 80 49 92 24 49 92 24 71 ff ff 93 11 0", "invalid literal/lengths set", 1}, + {"4 80 49 92 24 49 92 24 f b4 ff ff c3 84", "invalid distances set", 1}, + {"4 c0 81 8 0 0 0 0 20 7f eb b 0 0", "invalid literal/length code", 1}, + {"2 7e ff ff", "invalid distance code", 1}, + {"c c0 81 0 0 0 0 0 90 ff 6b 4 0", "invalid distance too far back", 1}, + + // also trailer mismatch just in inflate() + {"1f 8b 8 0 0 0 0 0 0 0 3 0 0 0 0 1", "incorrect data check", -1}, + {"1f 8b 8 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 1", "incorrect length check", -1}, + {"5 c0 21 d 0 0 0 80 b0 fe 6d 2f 91 6c", "pull 17", 0}, + {"5 e0 81 91 24 cb b2 2c 49 e2 f 2e 8b 9a 47 56 9f fb fe ec d2 ff 1f", "long code", 0}, + {"ed c0 1 1 0 0 0 40 20 ff 57 1b 42 2c 4f", "length extra", 0}, + {"ed cf c1 b1 2c 47 10 c4 30 fa 6f 35 1d 1 82 59 3d fb be 2e 2a fc f c", "long distance and extra", 0}, + {"ed c0 81 0 0 0 0 80 a0 fd a9 17 a9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6", "window end", 0}, +} + +func TestInflate(t *testing.T) { + for _, test := range infTests { + hex := strings.Split(test.hex, " ") + data := make([]byte, len(hex)) + for i, h := range hex { + b, _ := strconv.ParseInt(h, 16, 32) + data[i] = byte(b) + } + buf := bytes.NewReader(data) + r := NewReader(buf) + + _, err := io.Copy(ioutil.Discard, r) + if (test.n == 0 && err == nil) || (test.n != 0 && err != nil) { + t.Logf("%q: OK:", test.id) + t.Logf(" - got %v", err) + continue + } + + if test.n == 0 && err != nil { + t.Errorf("%q: Expected no error, but got %v", test.id, err) + continue + } + + if test.n != 0 && err == nil { + t.Errorf("%q:Expected an error, but got none", test.id) + continue + } + t.Fatal(test.n, err) + } + + for _, test := range infOutTests { + hex := strings.Split(test.hex, " ") + data := make([]byte, len(hex)) + for i, h := range hex { + b, _ := strconv.ParseInt(h, 16, 32) + data[i] = byte(b) + } + buf := bytes.NewReader(data) + r := NewReader(buf) + + _, err := io.Copy(ioutil.Discard, r) + if test.err == (err != nil) { + t.Logf("%q: OK:", test.id) + t.Logf(" - got %v", err) + continue + } + + if test.err == false && err != nil { + t.Errorf("%q: Expected no error, but got %v", test.id, err) + continue + } + + if test.err && err == nil { + t.Errorf("%q: Expected an error, but got none", test.id) + continue + } + t.Fatal(test.err, err) + } + +} + +// Tests ported from zlib/test/infcover.c +// Since zlib inflate is push (writer) instead of pull (reader) +// some of the window size tests have been removed, since they +// are irrelevant. +type infOutTest struct { + hex string + id string + step int + win int + length int + err bool +} + +var infOutTests = []infOutTest{ + {"2 8 20 80 0 3 0", "inflate_fast TYPE return", 0, -15, 258, false}, + {"63 18 5 40 c 0", "window wrap", 3, -8, 300, false}, + {"e5 e0 81 ad 6d cb b2 2c c9 01 1e 59 63 ae 7d ee fb 4d fd b5 35 41 68 ff 7f 0f 0 0 0", "fast length extra bits", 0, -8, 258, true}, + {"25 fd 81 b5 6d 59 b6 6a 49 ea af 35 6 34 eb 8c b9 f6 b9 1e ef 67 49 50 fe ff ff 3f 0 0", "fast distance extra bits", 0, -8, 258, true}, + {"3 7e 0 0 0 0 0", "fast invalid distance code", 0, -8, 258, true}, + {"1b 7 0 0 0 0 0", "fast invalid literal/length code", 0, -8, 258, true}, + {"d c7 1 ae eb 38 c 4 41 a0 87 72 de df fb 1f b8 36 b1 38 5d ff ff 0", "fast 2nd level codes and too far back", 0, -8, 258, true}, + {"63 18 5 8c 10 8 0 0 0 0", "very common case", 0, -8, 259, false}, + {"63 60 60 18 c9 0 8 18 18 18 26 c0 28 0 29 0 0 0", "contiguous and wrap around window", 6, -8, 259, false}, + {"63 0 3 0 0 0 0 0", "copy direct from output", 0, -8, 259, false}, + {"1f 8b 0 0", "bad gzip method", 0, 31, 0, true}, + {"1f 8b 8 80", "bad gzip flags", 0, 31, 0, true}, + {"77 85", "bad zlib method", 0, 15, 0, true}, + {"78 9c", "bad zlib window size", 0, 8, 0, true}, + {"1f 8b 8 1e 0 0 0 0 0 0 1 0 0 0 0 0 0", "bad header crc", 0, 47, 1, true}, + {"1f 8b 8 2 0 0 0 0 0 0 1d 26 3 0 0 0 0 0 0 0 0 0", "check gzip length", 0, 47, 0, true}, + {"78 90", "bad zlib header check", 0, 47, 0, true}, + {"8 b8 0 0 0 1", "need dictionary", 0, 8, 0, true}, + {"63 18 68 30 d0 0 0", "force split window update", 4, -8, 259, false}, + {"3 0", "use fixed blocks", 0, -15, 1, false}, + {"", "bad window size", 0, 1, 0, true}, +} + +func TestWriteTo(t *testing.T) { + input := make([]byte, 100000) + n, err := rand.Read(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + compressed := &bytes.Buffer{} + w, err := NewWriter(compressed, -2) + if err != nil { + t.Fatal(err) + } + n, err = w.Write(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + w.Close() + buf := compressed.Bytes() + + dec := NewReader(bytes.NewBuffer(buf)) + // ReadAll does not use WriteTo, but we wrap it in a NopCloser to be sure. + readall, err := ioutil.ReadAll(ioutil.NopCloser(dec)) + if err != nil { + t.Fatal(err) + } + if len(readall) != len(input) { + t.Fatal("did not decompress everything") + } + + dec = NewReader(bytes.NewBuffer(buf)) + wtbuf := &bytes.Buffer{} + written, err := dec.(io.WriterTo).WriteTo(wtbuf) + if err != nil { + t.Fatal(err) + } + if written != int64(len(input)) { + t.Error("Returned length did not match, expected", len(input), "got", written) + } + if wtbuf.Len() != len(input) { + t.Error("Actual Length did not match, expected", len(input), "got", wtbuf.Len()) + } + if bytes.Compare(wtbuf.Bytes(), input) != 0 { + t.Fatal("output did not match input") + } +} diff --git a/vendor/github.com/klauspost/compress/flate/reader_test.go b/vendor/github.com/klauspost/compress/flate/reader_test.go new file mode 100644 index 0000000..e42bd01 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/reader_test.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "io" + "io/ioutil" + "runtime" + "strings" + "testing" +) + +func TestNlitOutOfRange(t *testing.T) { + // Trying to decode this bogus flate data, which has a Huffman table + // with nlit=288, should not panic. + io.Copy(ioutil.Discard, NewReader(strings.NewReader( + "\xfc\xfe\x36\xe7\x5e\x1c\xef\xb3\x55\x58\x77\xb6\x56\xb5\x43\xf4"+ + "\x6f\xf2\xd2\xe6\x3d\x99\xa0\x85\x8c\x48\xeb\xf8\xda\x83\x04\x2a"+ + "\x75\xc4\xf8\x0f\x12\x11\xb9\xb4\x4b\x09\xa0\xbe\x8b\x91\x4c"))) +} + +const ( + digits = iota + twain +) + +var testfiles = []string{ + // Digits is the digits of the irrational number e. Its decimal representation + // does not repeat, but there are only 10 possible digits, so it should be + // reasonably compressible. + digits: "../testdata/e.txt", + // Twain is Project Gutenberg's edition of Mark Twain's classic English novel. + twain: "../testdata/Mark.Twain-Tom.Sawyer.txt", +} + +func benchmarkDecode(b *testing.B, testfile, level, n int) { + b.ReportAllocs() + b.StopTimer() + b.SetBytes(int64(n)) + buf0, err := ioutil.ReadFile(testfiles[testfile]) + if err != nil { + b.Fatal(err) + } + if len(buf0) == 0 { + b.Fatalf("test file %q has no data", testfiles[testfile]) + } + compressed := new(bytes.Buffer) + w, err := NewWriter(compressed, level) + if err != nil { + b.Fatal(err) + } + for i := 0; i < n; i += len(buf0) { + if len(buf0) > n-i { + buf0 = buf0[:n-i] + } + io.Copy(w, bytes.NewReader(buf0)) + } + w.Close() + buf1 := compressed.Bytes() + buf0, compressed, w = nil, nil, nil + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + io.Copy(ioutil.Discard, NewReader(bytes.NewReader(buf1))) + } +} + +// These short names are so that gofmt doesn't break the BenchmarkXxx function +// bodies below over multiple lines. +const ( + constant = ConstantCompression + speed = BestSpeed + default_ = DefaultCompression + compress = BestCompression +) + +func BenchmarkDecodeDigitsSpeed1e4(b *testing.B) { benchmarkDecode(b, digits, speed, 1e4) } +func BenchmarkDecodeDigitsSpeed1e5(b *testing.B) { benchmarkDecode(b, digits, speed, 1e5) } +func BenchmarkDecodeDigitsSpeed1e6(b *testing.B) { benchmarkDecode(b, digits, speed, 1e6) } +func BenchmarkDecodeDigitsDefault1e4(b *testing.B) { benchmarkDecode(b, digits, default_, 1e4) } +func BenchmarkDecodeDigitsDefault1e5(b *testing.B) { benchmarkDecode(b, digits, default_, 1e5) } +func BenchmarkDecodeDigitsDefault1e6(b *testing.B) { benchmarkDecode(b, digits, default_, 1e6) } +func BenchmarkDecodeDigitsCompress1e4(b *testing.B) { benchmarkDecode(b, digits, compress, 1e4) } +func BenchmarkDecodeDigitsCompress1e5(b *testing.B) { benchmarkDecode(b, digits, compress, 1e5) } +func BenchmarkDecodeDigitsCompress1e6(b *testing.B) { benchmarkDecode(b, digits, compress, 1e6) } +func BenchmarkDecodeTwainSpeed1e4(b *testing.B) { benchmarkDecode(b, twain, speed, 1e4) } +func BenchmarkDecodeTwainSpeed1e5(b *testing.B) { benchmarkDecode(b, twain, speed, 1e5) } +func BenchmarkDecodeTwainSpeed1e6(b *testing.B) { benchmarkDecode(b, twain, speed, 1e6) } +func BenchmarkDecodeTwainDefault1e4(b *testing.B) { benchmarkDecode(b, twain, default_, 1e4) } +func BenchmarkDecodeTwainDefault1e5(b *testing.B) { benchmarkDecode(b, twain, default_, 1e5) } +func BenchmarkDecodeTwainDefault1e6(b *testing.B) { benchmarkDecode(b, twain, default_, 1e6) } +func BenchmarkDecodeTwainCompress1e4(b *testing.B) { benchmarkDecode(b, twain, compress, 1e4) } +func BenchmarkDecodeTwainCompress1e5(b *testing.B) { benchmarkDecode(b, twain, compress, 1e5) } +func BenchmarkDecodeTwainCompress1e6(b *testing.B) { benchmarkDecode(b, twain, compress, 1e6) } diff --git a/vendor/github.com/klauspost/compress/flate/reverse_bits.go b/vendor/github.com/klauspost/compress/flate/reverse_bits.go new file mode 100644 index 0000000..c1a0272 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/reverse_bits.go @@ -0,0 +1,48 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +var reverseByte = [256]byte{ + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +} + +func reverseUint16(v uint16) uint16 { + return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8 +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return reverseUint16(number << uint8(16-bitLength)) +} diff --git a/vendor/github.com/klauspost/compress/flate/snappy.go b/vendor/github.com/klauspost/compress/flate/snappy.go new file mode 100644 index 0000000..d853320 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/snappy.go @@ -0,0 +1,900 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + ol := int(dst.n) + for i, v := range lit { + dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) + } + dst.n += uint16(len(lit)) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst *tokens, offset, length int) { + dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize)) + dst.n++ +} + +type snappyEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newSnappy(level int) snappyEnc { + switch level { + case 1: + return &snappyL1{} + case 2: + return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} + case 3: + return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} + case 4: + return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 14 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset +) + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func hash(u uint32) uint32 { + return (u * 0x1e35a7bd) >> tableShift +} + +// snappyL1 encapsulates level 1 compression +type snappyL1 struct{} + +func (e *snappyL1) Reset() {} + +func (e *snappyL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 16 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Initialize the hash table. + // + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535. + var table [tableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s)) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS)) + if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of Snappy's: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + s1 := base + maxMatchLength + if s1 > len(src) { + s1 = len(src) + } + a := src[s:s1] + b := src[candidate+4:] + b = b[:len(a)] + l := len(a) + for i := range a { + if a[i] != b[i] { + l = i + break + } + } + s += l + + // matchToken is flate's equivalent of Snappy's emitCopy. + dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset)) + dst.n++ + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x >> 0)) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x >> 8)) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x >> 16)) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + emitLiteral(dst, src[nextEmit:]) + } +} + +type tableEntry struct { + val uint32 + offset int32 +} + +func load3232(b []byte, i int32) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// snappyGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type snappyGen struct { + prev []byte + cur int32 +} + +// snappyGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type snappyL2 struct { + snappyGen + table [tableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *snappyL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxStoreBlockSize + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv} + nextHash = hash(now) + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || cv != candidate.val { + // Out of range or not matched. + cv = now + continue + } + break + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-1) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)} + x >>= 8 + currHash := hash(uint32(x)) + candidate = e.table[currHash&tableMask] + e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != candidate.val { + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// snappyL3 +type snappyL3 struct { + snappyGen + table [tableSize]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *snappyL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + nextHash = hash(now) + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash = hash(cv) + e.table[nextHash&tableMask] = tableEntryPrev{ + Prev: e.table[nextHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash&tableMask] + cv = uint32(x) + e.table[currHash&tableMask] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +// snappyL4 +type snappyL4 struct { + snappyL3 +} + +// Encode uses a similar algorithm to level 3, +// but will check up to two candidates if first isn't long enough. +func (e *snappyL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 3 + minNonLiteralBlockSize = 1 + 1 + inputMargin + matchLenGood = 12 + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + var candidateAlt tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + nextHash = hash(now) + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset { + offset = s - (candidates.Prev.offset - e.cur) + if cv == candidates.Prev.val && offset < maxMatchOffset { + candidateAlt = candidates.Prev + } + break + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset { + break + } + } + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + // Try alternative candidate if match length < matchLenGood. + if l < matchLenGood-4 && candidateAlt.offset != 0 { + t2 := candidateAlt.offset - e.cur + 4 + l2 := e.matchlen(s, t2, src) + if l2 > l { + l = l2 + t = t2 + } + } + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash = hash(cv) + e.table[nextHash&tableMask] = tableEntryPrev{ + Prev: e.table[nextHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash&tableMask] + cv = uint32(x) + e.table[currHash&tableMask] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + candidateAlt = tableEntry{} + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + offset = s - (candidates.Prev.offset - e.cur) + if cv == candidates.Prev.val && offset <= maxMatchOffset { + candidateAlt = candidates.Prev + } + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +func (e *snappyGen) matchlen(s, t int32, src []byte) int32 { + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // If we are inside the current block + if t >= 0 { + b := src[t:] + a := src[s:s1] + b = b[:len(a)] + // Extend the match to be as long as possible. + for i := range a { + if a[i] != b[i] { + return int32(i) + } + } + return int32(len(a)) + } + + // We found a match in the previous block. + tp := int32(len(e.prev)) + t + if tp < 0 { + return 0 + } + + // Extend the match to be as long as possible. + a := src[s:s1] + b := e.prev[tp:] + if len(b) > len(a) { + b = b[:len(a)] + } + a = a[:len(b)] + for i := range b { + if a[i] != b[i] { + return int32(i) + } + } + + // If we reached our limit, we matched everything we are + // allowed to in the previous block and we return. + n := int32(len(b)) + if int(s+n) == s1 { + return n + } + + // Continue looking for more matches in the current block. + a = src[s+n : s1] + b = src[:len(a)] + for i := range a { + if a[i] != b[i] { + return int32(i) + n + } + } + return int32(len(a)) + n +} + +// Reset the encoding table. +func (e *snappyGen) Reset() { + e.prev = e.prev[:0] + e.cur += maxMatchOffset +} diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect new file mode 100644 index 0000000..c081651 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect-noinput new file mode 100644 index 0000000..c081651 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.golden new file mode 100644 index 0000000..db422ca Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.in new file mode 100644 index 0000000..5dfddf0 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.in differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect new file mode 100644 index 0000000..c081651 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect-noinput new file mode 100644 index 0000000..c081651 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-null-max.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect new file mode 100644 index 0000000..e4396ac Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect-noinput new file mode 100644 index 0000000..e4396ac Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.golden new file mode 100644 index 0000000..23d8f7f Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.in new file mode 100644 index 0000000..efaed43 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.in @@ -0,0 +1 @@ +3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117067982148086513282306647093844609550582231725359408128481117450284102701938521105559644622948954930381964428810975665933446128475648233786783165271201909145648566923460348610454326648213393607260249141273724587006606315588174881520920962829254091715364367892590360011330530548820466521384146951941511609433057270365759591953092186117381932611793105118548074462379962749567351885752724891227938183011949129833673362440656643086021394946395224737190702179860943702770539217176293176752384674818467669405132000568127145263560827785771342757789609173637178721468440901224953430146549585371050792279689258923542019956112129021960864034418159813629774771309960518707211349999998372978049951059731732816096318595024459455346908302642522308253344685035261931188171010003137838752886587533208381420617177669147303598253490428755468731159562863882353787593751957781857780532171226806613001927876611195909216420198938095257201065485863278865936153381827968230301952035301852968995773622599413891249721775283479131515574857242454150695950829533116861727855889075098381754637464939319255060400927701671139009848824012858361603563707660104710181942955596198946767837449448255379774726847104047534646208046684259069491293313677028989152104752162056966024058038150193511253382430035587640247496473263914199272604269922796782354781636009341721641219924586315030286182974555706749838505494588586926995690927210797509302955321165344987202755960236480665499119881834797753566369807426542527862551818417574672890977772793800081647060016145249192173217214772350141441973568548161361157352552133475741849468438523323907394143334547762416862518983569485562099219222184272550254256887671790494601653466804988627232791786085784383827967976681454100953883786360950680064225125205117392984896084128488626945604241965285022210661186306744278622039194945047123713786960956364371917287467764657573962413890865832645995813390478027590099465764078951269468398352595709825822620522489407726719478268482601476990902640136394437455305068203496252451749399651431429809190659250937221696461515709858387410597885959772975498930161753928468138268683868942774155991855925245953959431049972524680845987273644695848653836736222626099124608051243884390451244136549762780797715691435997700129616089441694868555848406353422072225828488648158456028506016842739452267467678895252138522549954666727823986456596116354886230577456498035593634568174324112515076069479451096596094025228879710893145669136867228748940560101503308617928680920874760917824938589009714909675985261365549781893129784821682998948722658804857564014270477555132379641451523746234364542858444795265867821051141354735739523113427166102135969536231442952484937187110145765403590279934403742007310578539062198387447808478489683321445713868751943506430218453191048481005370614680674919278191197939952061419663428754440643745123718192179998391015919561814675142691239748940907186494231961567945208095146550225231603881930142093762137855956638937787083039069792077346722182562599661501421503068038447734549202605414665925201497442850732518666002132434088190710486331734649651453905796268561005508106658796998163574736384052571459102897064140110971206280439039759515677157700420337869936007230558763176359421873125147120532928191826186125867321579198414848829164470609575270695722091756711672291098169091528017350671274858322287183520935396572512108357915136988209144421006751033467110314126711136990865851639831501970165151168517143765761835155650884909989859982387345528331635507647918535893226185489632132933089857064204675259070915481416549859461637180 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect new file mode 100644 index 0000000..e4396ac Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect-noinput new file mode 100644 index 0000000..e4396ac Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-pi.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect new file mode 100644 index 0000000..09dc798 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinput new file mode 100644 index 0000000..0c24742 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.golden new file mode 100644 index 0000000..09dc798 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.in new file mode 100644 index 0000000..ce038eb Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.in differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect new file mode 100644 index 0000000..09dc798 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect-noinput new file mode 100644 index 0000000..0c24742 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-1k.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect new file mode 100644 index 0000000..2d65279 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinput new file mode 100644 index 0000000..2d65279 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.golden new file mode 100644 index 0000000..57e5932 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.in new file mode 100644 index 0000000..fb5b1be --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.in @@ -0,0 +1,4 @@ +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +vH +% ɷ}>lsmIGH1Y4[ 0ˆ[|]o# +-#ulpfٱnYԀYwC8ɯ02 F=gnrN!O{k*w(b kQC9/lu>5C.u diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect new file mode 100644 index 0000000..881e59c Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect-noinput new file mode 100644 index 0000000..881e59c Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-limit.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.golden new file mode 100644 index 0000000..47d53c8 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.in new file mode 100644 index 0000000..8418633 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-rand-max.in differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect new file mode 100644 index 0000000..7812c1c Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect-noinput new file mode 100644 index 0000000..7812c1c Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.golden new file mode 100644 index 0000000..f513377 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.in new file mode 100644 index 0000000..7c7a50d --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.in @@ -0,0 +1,2 @@ +101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010 +232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323232323 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect new file mode 100644 index 0000000..7812c1c Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect-noinput new file mode 100644 index 0000000..7812c1c Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-shifts.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect new file mode 100644 index 0000000..71ce3ae Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput new file mode 100644 index 0000000..71ce3ae Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.golden new file mode 100644 index 0000000..ff02311 Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.in new file mode 100644 index 0000000..cc5c3ad --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.in @@ -0,0 +1,14 @@ +//Copyright2009ThGoAuthor.Allrightrrvd. +//UofthiourccodigovrndbyBSD-tyl +//licnthtcnbfoundinthLICENSEfil. + +pckgmin + +import"o" + +funcmin(){ + vrb=mk([]byt,65535) + f,_:=o.Crt("huffmn-null-mx.in") + f.Writ(b) +} +ABCDEFGHIJKLMNOPQRSTUVXxyz!"#¤%&/?" \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect new file mode 100644 index 0000000..71ce3ae Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect-noinput new file mode 100644 index 0000000..71ce3ae Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text-shift.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect new file mode 100644 index 0000000..d448727 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect @@ -0,0 +1 @@ +_K0`K0Aasě)^HIɟb߻_>4 a=-^ 1`_ 1 ő:Y-F66!A`aC;ANyr4ߜU!GKС#r:B[G3.L׶bFRuM]^⇳(#Z ivBBH2S]u/ֽWTGnr \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect-noinput new file mode 100644 index 0000000..d448727 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.dyn.expect-noinput @@ -0,0 +1 @@ +_K0`K0Aasě)^HIɟb߻_>4 a=-^ 1`_ 1 ő:Y-F66!A`aC;ANyr4ߜU!GKС#r:B[G3.L׶bFRuM]^⇳(#Z ivBBH2S]u/ֽWTGnr \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.golden new file mode 100644 index 0000000..6d34c61 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.golden @@ -0,0 +1,3 @@ +AK0xßZLPa!xADI&#IEp]LƿFp 188h$5S- F66!)v.0Y& SN|d2: +t|둍xz9骺Ɏ3 +&&=ôUD=Fu]qUL+>FQYLZofTߵEŴ{Yʶbe \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.in new file mode 100644 index 0000000..73398b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.in @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "os" + +func main() { + var b = make([]byte, 65535) + f, _ := os.Create("huffman-null-max.in") + f.Write(b) +} diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect new file mode 100644 index 0000000..d448727 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect @@ -0,0 +1 @@ +_K0`K0Aasě)^HIɟb߻_>4 a=-^ 1`_ 1 ő:Y-F66!A`aC;ANyr4ߜU!GKС#r:B[G3.L׶bFRuM]^⇳(#Z ivBBH2S]u/ֽWTGnr \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect-noinput new file mode 100644 index 0000000..d448727 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-text.wb.expect-noinput @@ -0,0 +1 @@ +_K0`K0Aasě)^HIɟb߻_>4 a=-^ 1`_ 1 ő:Y-F66!A`aC;ANyr4ߜU!GKС#r:B[G3.L׶bFRuM]^⇳(#Z ivBBH2S]u/ֽWTGnr \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect new file mode 100644 index 0000000..830348a Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect-noinput new file mode 100644 index 0000000..830348a Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.golden b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.golden new file mode 100644 index 0000000..5abdbaf Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.golden differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.in b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.in new file mode 100644 index 0000000..349be0e --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.in @@ -0,0 +1 @@ +00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect new file mode 100644 index 0000000..dbe401c Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect-noinput new file mode 100644 index 0000000..dbe401c Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/huffman-zero.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.dyn.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.dyn.expect-noinput new file mode 100644 index 0000000..8b92d9f Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.dyn.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.wb.expect-noinput b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.wb.expect-noinput new file mode 100644 index 0000000..8b92d9f Binary files /dev/null and b/vendor/github.com/klauspost/compress/flate/testdata/null-long-match.wb.expect-noinput differ diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go new file mode 100644 index 0000000..4f275ea --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -0,0 +1,115 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import "fmt" + +const ( + // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused + // 8 bits: xlength = length - MIN_MATCH_LENGTH + // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + lengthShift = 22 + offsetMask = 1< pair into a match token. +func matchToken(xlength uint32, xoffset uint32) token { + return token(matchType + xlength< maxMatchLength || xoffset > maxMatchOffset { + panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset)) + return token(matchType) + } + return token(matchType + xlength<> lengthShift) } + +func lengthCode(len uint32) uint32 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[off>>7] + 14 + } else { + return offsetCodes[off>>14] + 28 + } +} diff --git a/vendor/github.com/klauspost/compress/flate/writer_test.go b/vendor/github.com/klauspost/compress/flate/writer_test.go new file mode 100644 index 0000000..024512a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/writer_test.go @@ -0,0 +1,258 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "runtime" + "testing" +) + +func benchmarkEncoder(b *testing.B, testfile, level, n int) { + b.StopTimer() + b.SetBytes(int64(n)) + buf0, err := ioutil.ReadFile(testfiles[testfile]) + if err != nil { + b.Fatal(err) + } + if len(buf0) == 0 { + b.Fatalf("test file %q has no data", testfiles[testfile]) + } + buf1 := make([]byte, n) + for i := 0; i < n; i += len(buf0) { + if len(buf0) > n-i { + buf0 = buf0[:n-i] + } + copy(buf1[i:], buf0) + } + buf0 = nil + runtime.GC() + w, err := NewWriter(ioutil.Discard, level) + b.StartTimer() + for i := 0; i < b.N; i++ { + w.Reset(ioutil.Discard) + _, err = w.Write(buf1) + if err != nil { + b.Fatal(err) + } + err = w.Close() + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeDigitsConstant1e4(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e4) } +func BenchmarkEncodeDigitsConstant1e5(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e5) } +func BenchmarkEncodeDigitsConstant1e6(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e6) } +func BenchmarkEncodeDigitsSpeed1e4(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e4) } +func BenchmarkEncodeDigitsSpeed1e5(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e5) } +func BenchmarkEncodeDigitsSpeed1e6(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e6) } +func BenchmarkEncodeDigitsDefault1e4(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e4) } +func BenchmarkEncodeDigitsDefault1e5(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e5) } +func BenchmarkEncodeDigitsDefault1e6(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e6) } +func BenchmarkEncodeDigitsCompress1e4(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e4) } +func BenchmarkEncodeDigitsCompress1e5(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e5) } +func BenchmarkEncodeDigitsCompress1e6(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e6) } +func BenchmarkEncodeTwainConstant1e4(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e4) } +func BenchmarkEncodeTwainConstant1e5(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e5) } +func BenchmarkEncodeTwainConstant1e6(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e6) } +func BenchmarkEncodeTwainSpeed1e4(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e4) } +func BenchmarkEncodeTwainSpeed1e5(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e5) } +func BenchmarkEncodeTwainSpeed1e6(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e6) } +func BenchmarkEncodeTwainDefault1e4(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e4) } +func BenchmarkEncodeTwainDefault1e5(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e5) } +func BenchmarkEncodeTwainDefault1e6(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e6) } +func BenchmarkEncodeTwainCompress1e4(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e4) } +func BenchmarkEncodeTwainCompress1e5(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e5) } +func BenchmarkEncodeTwainCompress1e6(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e6) } + +// A writer that fails after N writes. +type errorWriter struct { + N int +} + +func (e *errorWriter) Write(b []byte) (int, error) { + if e.N <= 0 { + return 0, io.ErrClosedPipe + } + e.N-- + return len(b), nil +} + +// Test if errors from the underlying writer is passed upwards. +func TestWriteError(t *testing.T) { + buf := new(bytes.Buffer) + n := 65536 + if !testing.Short() { + n *= 4 + } + for i := 0; i < n; i++ { + fmt.Fprintf(buf, "asdasfasf%d%dfghfgujyut%dyutyu\n", i, i, i) + } + in := buf.Bytes() + // We create our own buffer to control number of writes. + copyBuf := make([]byte, 128) + for l := 0; l < 10; l++ { + for fail := 1; fail <= 256; fail *= 2 { + // Fail after 'fail' writes + ew := &errorWriter{N: fail} + w, err := NewWriter(ew, l) + if err != nil { + t.Fatalf("NewWriter: level %d: %v", l, err) + } + n, err := copyBuffer(w, bytes.NewBuffer(in), copyBuf) + if err == nil { + t.Fatalf("Level %d: Expected an error, writer was %#v", l, ew) + } + n2, err := w.Write([]byte{1, 2, 2, 3, 4, 5}) + if n2 != 0 { + t.Fatal("Level", l, "Expected 0 length write, got", n) + } + if err == nil { + t.Fatal("Level", l, "Expected an error") + } + err = w.Flush() + if err == nil { + t.Fatal("Level", l, "Expected an error on flush") + } + err = w.Close() + if err == nil { + t.Fatal("Level", l, "Expected an error on close") + } + + w.Reset(ioutil.Discard) + n2, err = w.Write([]byte{1, 2, 3, 4, 5, 6}) + if err != nil { + t.Fatal("Level", l, "Got unexpected error after reset:", err) + } + if n2 == 0 { + t.Fatal("Level", l, "Got 0 length write, expected > 0") + } + if testing.Short() { + return + } + } + } +} + +func TestDeterministicL1(t *testing.T) { testDeterministic(1, t) } +func TestDeterministicL2(t *testing.T) { testDeterministic(2, t) } +func TestDeterministicL3(t *testing.T) { testDeterministic(3, t) } +func TestDeterministicL4(t *testing.T) { testDeterministic(4, t) } +func TestDeterministicL5(t *testing.T) { testDeterministic(5, t) } +func TestDeterministicL6(t *testing.T) { testDeterministic(6, t) } +func TestDeterministicL7(t *testing.T) { testDeterministic(7, t) } +func TestDeterministicL8(t *testing.T) { testDeterministic(8, t) } +func TestDeterministicL9(t *testing.T) { testDeterministic(9, t) } +func TestDeterministicL0(t *testing.T) { testDeterministic(0, t) } +func TestDeterministicLM2(t *testing.T) { testDeterministic(-2, t) } + +func testDeterministic(i int, t *testing.T) { + // Test so much we cross a good number of block boundaries. + var length = maxStoreBlockSize*30 + 500 + if testing.Short() { + length /= 10 + } + + // Create a random, but compressible stream. + rng := rand.New(rand.NewSource(1)) + t1 := make([]byte, length) + for i := range t1 { + t1[i] = byte(rng.Int63() & 7) + } + + // Do our first encode. + var b1 bytes.Buffer + br := bytes.NewBuffer(t1) + w, err := NewWriter(&b1, i) + if err != nil { + t.Fatal(err) + } + // Use a very small prime sized buffer. + cbuf := make([]byte, 787) + _, err = copyBuffer(w, br, cbuf) + if err != nil { + t.Fatal(err) + } + w.Close() + + // We choose a different buffer size, + // bigger than a maximum block, and also a prime. + var b2 bytes.Buffer + cbuf = make([]byte, 81761) + br2 := bytes.NewBuffer(t1) + w2, err := NewWriter(&b2, i) + if err != nil { + t.Fatal(err) + } + _, err = copyBuffer(w2, br2, cbuf) + if err != nil { + t.Fatal(err) + } + w2.Close() + + b1b := b1.Bytes() + b2b := b2.Bytes() + + if !bytes.Equal(b1b, b2b) { + t.Errorf("level %d did not produce deterministic result, result mismatch, len(a) = %d, len(b) = %d", i, len(b1b), len(b2b)) + } + + // Test using io.WriterTo interface. + var b3 bytes.Buffer + br = bytes.NewBuffer(t1) + w, err = NewWriter(&b3, i) + if err != nil { + t.Fatal(err) + } + _, err = br.WriteTo(w) + if err != nil { + t.Fatal(err) + } + w.Close() + + b3b := b3.Bytes() + if !bytes.Equal(b1b, b3b) { + t.Errorf("level %d (io.WriterTo) did not produce deterministic result, result mismatch, len(a) = %d, len(b) = %d", i, len(b1b), len(b3b)) + } +} + +// copyBuffer is a copy of io.CopyBuffer, since we want to support older go versions. +// This is modified to never use io.WriterTo or io.ReaderFrom interfaces. +func copyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + if buf == nil { + buf = make([]byte, 32*1024) + } + for { + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} diff --git a/vendor/github.com/klauspost/compress/gzip/example_test.go b/vendor/github.com/klauspost/compress/gzip/example_test.go new file mode 100644 index 0000000..e32346b --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/example_test.go @@ -0,0 +1,128 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip_test + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "log" + "os" + "time" +) + +func Example_writerReader() { + var buf bytes.Buffer + zw := gzip.NewWriter(&buf) + + // Setting the Header fields is optional. + zw.Name = "a-new-hope.txt" + zw.Comment = "an epic space opera by George Lucas" + zw.ModTime = time.Date(1977, time.May, 25, 0, 0, 0, 0, time.UTC) + + _, err := zw.Write([]byte("A long time ago in a galaxy far, far away...")) + if err != nil { + log.Fatal(err) + } + + if err := zw.Close(); err != nil { + log.Fatal(err) + } + + zr, err := gzip.NewReader(&buf) + if err != nil { + log.Fatal(err) + } + + fmt.Printf("Name: %s\nComment: %s\nModTime: %s\n\n", zr.Name, zr.Comment, zr.ModTime.UTC()) + + if _, err := io.Copy(os.Stdout, zr); err != nil { + log.Fatal(err) + } + + if err := zr.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // Name: a-new-hope.txt + // Comment: an epic space opera by George Lucas + // ModTime: 1977-05-25 00:00:00 +0000 UTC + // + // A long time ago in a galaxy far, far away... +} + +func ExampleReader_Multistream() { + var buf bytes.Buffer + zw := gzip.NewWriter(&buf) + + var files = []struct { + name string + comment string + modTime time.Time + data string + }{ + {"file-1.txt", "file-header-1", time.Date(2006, time.February, 1, 3, 4, 5, 0, time.UTC), "Hello Gophers - 1"}, + {"file-2.txt", "file-header-2", time.Date(2007, time.March, 2, 4, 5, 6, 1, time.UTC), "Hello Gophers - 2"}, + } + + for _, file := range files { + zw.Name = file.name + zw.Comment = file.comment + zw.ModTime = file.modTime + + if _, err := zw.Write([]byte(file.data)); err != nil { + log.Fatal(err) + } + + if err := zw.Close(); err != nil { + log.Fatal(err) + } + + zw.Reset(&buf) + } + + zr, err := gzip.NewReader(&buf) + if err != nil { + log.Fatal(err) + } + + for { + zr.Multistream(false) + fmt.Printf("Name: %s\nComment: %s\nModTime: %s\n\n", zr.Name, zr.Comment, zr.ModTime.UTC()) + + if _, err := io.Copy(os.Stdout, zr); err != nil { + log.Fatal(err) + } + + fmt.Println("\n") + + err = zr.Reset(&buf) + if err == io.EOF { + break + } + if err != nil { + log.Fatal(err) + } + } + + if err := zr.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // Name: file-1.txt + // Comment: file-header-1 + // ModTime: 2006-02-01 03:04:05 +0000 UTC + // + // Hello Gophers - 1 + // + // Name: file-2.txt + // Comment: file-header-2 + // ModTime: 2007-03-02 04:05:06 +0000 UTC + // + // Hello Gophers - 2 +} diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go new file mode 100644 index 0000000..e73fab3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go @@ -0,0 +1,344 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gzip implements reading and writing of gzip format compressed files, +// as specified in RFC 1952. +package gzip + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "time" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/crc32" +) + +const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + flagText = 1 << 0 + flagHdrCrc = 1 << 1 + flagExtra = 1 << 2 + flagName = 1 << 3 + flagComment = 1 << 4 +) + +var ( + // ErrChecksum is returned when reading GZIP data that has an invalid checksum. + ErrChecksum = errors.New("gzip: invalid checksum") + // ErrHeader is returned when reading GZIP data that has an invalid header. + ErrHeader = errors.New("gzip: invalid header") +) + +var le = binary.LittleEndian + +// noEOF converts io.EOF to io.ErrUnexpectedEOF. +func noEOF(err error) error { + if err == io.EOF { + return io.ErrUnexpectedEOF + } + return err +} + +// The gzip file stores a header giving metadata about the compressed file. +// That header is exposed as the fields of the Writer and Reader structs. +// +// Strings must be UTF-8 encoded and may only contain Unicode code points +// U+0001 through U+00FF, due to limitations of the GZIP file format. +type Header struct { + Comment string // comment + Extra []byte // "extra data" + ModTime time.Time // modification time + Name string // file name + OS byte // operating system type +} + +// A Reader is an io.Reader that can be read to retrieve +// uncompressed data from a gzip-format compressed file. +// +// In general, a gzip file can be a concatenation of gzip files, +// each with its own header. Reads from the Reader +// return the concatenation of the uncompressed data of each. +// Only the first header is recorded in the Reader fields. +// +// Gzip files store a length and checksum of the uncompressed data. +// The Reader will return a ErrChecksum when Read +// reaches the end of the uncompressed data if it does not +// have the expected length or checksum. Clients should treat data +// returned by Read as tentative until they receive the io.EOF +// marking the end of the data. +type Reader struct { + Header // valid after NewReader or Reader.Reset + r flate.Reader + decompressor io.ReadCloser + digest uint32 // CRC-32, IEEE polynomial (section 8) + size uint32 // Uncompressed size (section 2.3.1) + buf [512]byte + err error + multistream bool +} + +// NewReader creates a new Reader reading the given reader. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// +// It is the caller's responsibility to call Close on the Reader when done. +// +// The Reader.Header fields will be valid in the Reader returned. +func NewReader(r io.Reader) (*Reader, error) { + z := new(Reader) + if err := z.Reset(r); err != nil { + return nil, err + } + return z, nil +} + +// Reset discards the Reader z's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) error { + *z = Reader{ + decompressor: z.decompressor, + multistream: true, + } + if rr, ok := r.(flate.Reader); ok { + z.r = rr + } else { + z.r = bufio.NewReader(r) + } + z.Header, z.err = z.readHeader() + return z.err +} + +// Multistream controls whether the reader supports multistream files. +// +// If enabled (the default), the Reader expects the input to be a sequence +// of individually gzipped data streams, each with its own header and +// trailer, ending at EOF. The effect is that the concatenation of a sequence +// of gzipped files is treated as equivalent to the gzip of the concatenation +// of the sequence. This is standard behavior for gzip readers. +// +// Calling Multistream(false) disables this behavior; disabling the behavior +// can be useful when reading file formats that distinguish individual gzip +// data streams or mix gzip data streams with other data streams. +// In this mode, when the Reader reaches the end of the data stream, +// Read returns io.EOF. If the underlying reader implements io.ByteReader, +// it will be left positioned just after the gzip stream. +// To start the next stream, call z.Reset(r) followed by z.Multistream(false). +// If there is no next stream, z.Reset(r) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + +// readString reads a NUL-terminated string from z.r. +// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and +// will output a string encoded using UTF-8. +// This method always updates z.digest with the data read. +func (z *Reader) readString() (string, error) { + var err error + needConv := false + for i := 0; ; i++ { + if i >= len(z.buf) { + return "", ErrHeader + } + z.buf[i], err = z.r.ReadByte() + if err != nil { + return "", err + } + if z.buf[i] > 0x7f { + needConv = true + } + if z.buf[i] == 0 { + // Digest covers the NUL terminator. + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1]) + + // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1). + if needConv { + s := make([]rune, 0, i) + for _, v := range z.buf[:i] { + s = append(s, rune(v)) + } + return string(s), nil + } + return string(z.buf[:i]), nil + } + } +} + +// readHeader reads the GZIP header according to section 2.3.1. +// This method does not set z.err. +func (z *Reader) readHeader() (hdr Header, err error) { + if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil { + // RFC 1952, section 2.2, says the following: + // A gzip file consists of a series of "members" (compressed data sets). + // + // Other than this, the specification does not clarify whether a + // "series" is defined as "one or more" or "zero or more". To err on the + // side of caution, Go interprets this to mean "zero or more". + // Thus, it is okay to return io.EOF here. + return hdr, err + } + if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { + return hdr, ErrHeader + } + flg := z.buf[3] + hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0) + // z.buf[8] is XFL and is currently ignored. + hdr.OS = z.buf[9] + z.digest = crc32.ChecksumIEEE(z.buf[:10]) + + if flg&flagExtra != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2]) + data := make([]byte, le.Uint16(z.buf[:2])) + if _, err = io.ReadFull(z.r, data); err != nil { + return hdr, noEOF(err) + } + z.digest = crc32.Update(z.digest, crc32.IEEETable, data) + hdr.Extra = data + } + + var s string + if flg&flagName != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Name = s + } + + if flg&flagComment != 0 { + if s, err = z.readString(); err != nil { + return hdr, err + } + hdr.Comment = s + } + + if flg&flagHdrCrc != 0 { + if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { + return hdr, noEOF(err) + } + digest := le.Uint16(z.buf[:2]) + if digest != uint16(z.digest) { + return hdr, ErrHeader + } + } + + z.digest = 0 + if z.decompressor == nil { + z.decompressor = flate.NewReader(z.r) + } else { + z.decompressor.(flate.Resetter).Reset(z.r, nil) + } + return hdr, nil +} + +// Read implements io.Reader, reading uncompressed bytes from its underlying Reader. +func (z *Reader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + + n, z.err = z.decompressor.Read(p) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) + z.size += uint32(n) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } + + // Finished file; check checksum and size. + if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { + z.err = noEOF(err) + return n, z.err + } + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return n, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return n, io.EOF + } + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + return n, z.err + } + + // Read from next file, if necessary. + if n > 0 { + return n, nil + } + return z.Read(p) +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (z *Reader) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + crcWriter := crc32.NewIEEE() + for { + if z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + + // We write both to output and digest. + mw := io.MultiWriter(w, crcWriter) + n, err := z.decompressor.(io.WriterTo).WriteTo(mw) + total += n + z.size += uint32(n) + if err != nil { + z.err = err + return total, z.err + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + z.err = err + return total, err + } + z.digest = crcWriter.Sum32() + digest := le.Uint32(z.buf[:4]) + size := le.Uint32(z.buf[4:8]) + if digest != z.digest || size != z.size { + z.err = ErrChecksum + return total, z.err + } + z.digest, z.size = 0, 0 + + // File is ok; check if there is another. + if !z.multistream { + return total, nil + } + crcWriter.Reset() + z.err = nil // Remove io.EOF + + if _, z.err = z.readHeader(); z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + } +} + +// Close closes the Reader. It does not close the underlying io.Reader. +// In order for the GZIP checksum to be verified, the reader must be +// fully consumed until the io.EOF. +func (z *Reader) Close() error { return z.decompressor.Close() } diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip_test.go b/vendor/github.com/klauspost/compress/gzip/gunzip_test.go new file mode 100644 index 0000000..c200ab1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gunzip_test.go @@ -0,0 +1,682 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "bytes" + oldgz "compress/gzip" + "crypto/rand" + "io" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/klauspost/compress/flate" +) + +type gunzipTest struct { + name string + desc string + raw string + gzip []byte + err error +} + +var gunzipTests = []gunzipTest{ + { // has 1 empty fixed-huffman block + "empty.txt", + "empty.txt", + "", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xf7, 0x5e, 0x14, 0x4a, + 0x00, 0x03, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + nil, + }, + { + "", + "empty - with no file name", + "", + []byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, + 0x00, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + nil, + }, + { // has 1 non-empty fixed huffman block + "hello.txt", + "hello.txt", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + }, + nil, + }, + { // concatenation + "hello.txt", + "hello.txt x2", + "hello world\n" + + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + }, + nil, + }, + { // has a fixed huffman block with some length-distance pairs + "shesells.txt", + "shesells.txt", + "she sells seashells by the seashore\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x72, 0x66, 0x8b, 0x4a, + 0x00, 0x03, 0x73, 0x68, 0x65, 0x73, 0x65, 0x6c, + 0x6c, 0x73, 0x2e, 0x74, 0x78, 0x74, 0x00, 0x2b, + 0xce, 0x48, 0x55, 0x28, 0x4e, 0xcd, 0xc9, 0x29, + 0x06, 0x92, 0x89, 0xc5, 0x19, 0x60, 0x56, 0x52, + 0xa5, 0x42, 0x09, 0x58, 0x18, 0x28, 0x90, 0x5f, + 0x94, 0xca, 0x05, 0x00, 0x76, 0xb0, 0x3b, 0xeb, + 0x24, 0x00, 0x00, 0x00, + }, + nil, + }, + { // has dynamic huffman blocks + "gettysburg", + "gettysburg", + " Four score and seven years ago our fathers brought forth on\n" + + "this continent, a new nation, conceived in Liberty, and dedicated\n" + + "to the proposition that all men are created equal.\n" + + " Now we are engaged in a great Civil War, testing whether that\n" + + "nation, or any nation so conceived and so dedicated, can long\n" + + "endure.\n" + + " We are met on a great battle-field of that war.\n" + + " We have come to dedicate a portion of that field, as a final\n" + + "resting place for those who here gave their lives that that\n" + + "nation might live. It is altogether fitting and proper that\n" + + "we should do this.\n" + + " But, in a larger sense, we can not dedicate — we can not\n" + + "consecrate — we can not hallow — this ground.\n" + + " The brave men, living and dead, who struggled here, have\n" + + "consecrated it, far above our poor power to add or detract.\n" + + "The world will little note, nor long remember what we say here,\n" + + "but it can never forget what they did here.\n" + + " It is for us the living, rather, to be dedicated here to the\n" + + "unfinished work which they who fought here have thus far so\n" + + "nobly advanced. It is rather for us to be here dedicated to\n" + + "the great task remaining before us — that from these honored\n" + + "dead we take increased devotion to that cause for which they\n" + + "gave the last full measure of devotion —\n" + + " that we here highly resolve that these dead shall not have\n" + + "died in vain — that this nation, under God, shall have a new\n" + + "birth of freedom — and that government of the people, by the\n" + + "people, for the people, shall not perish from this earth.\n" + + "\n" + + "Abraham Lincoln, November 19, 1863, Gettysburg, Pennsylvania\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xd1, 0x12, 0x2b, 0x4a, + 0x00, 0x03, 0x67, 0x65, 0x74, 0x74, 0x79, 0x73, + 0x62, 0x75, 0x72, 0x67, 0x00, 0x65, 0x54, 0xcd, + 0x6e, 0xd4, 0x30, 0x10, 0xbe, 0xfb, 0x29, 0xe6, + 0x01, 0x42, 0xa5, 0x0a, 0x09, 0xc1, 0x11, 0x90, + 0x40, 0x48, 0xa8, 0xe2, 0x80, 0xd4, 0xf3, 0x24, + 0x9e, 0x24, 0x56, 0xbd, 0x9e, 0xc5, 0x76, 0x76, + 0x95, 0x1b, 0x0f, 0xc1, 0x13, 0xf2, 0x24, 0x7c, + 0x63, 0x77, 0x9b, 0x4a, 0x5c, 0xaa, 0x6e, 0x6c, + 0xcf, 0x7c, 0x7f, 0x33, 0x44, 0x5f, 0x74, 0xcb, + 0x54, 0x26, 0xcd, 0x42, 0x9c, 0x3c, 0x15, 0xb9, + 0x48, 0xa2, 0x5d, 0x38, 0x17, 0xe2, 0x45, 0xc9, + 0x4e, 0x67, 0xae, 0xab, 0xe0, 0xf7, 0x98, 0x75, + 0x5b, 0xd6, 0x4a, 0xb3, 0xe6, 0xba, 0x92, 0x26, + 0x57, 0xd7, 0x50, 0x68, 0xd2, 0x54, 0x43, 0x92, + 0x54, 0x07, 0x62, 0x4a, 0x72, 0xa5, 0xc4, 0x35, + 0x68, 0x1a, 0xec, 0x60, 0x92, 0x70, 0x11, 0x4f, + 0x21, 0xd1, 0xf7, 0x30, 0x4a, 0xae, 0xfb, 0xd0, + 0x9a, 0x78, 0xf1, 0x61, 0xe2, 0x2a, 0xde, 0x55, + 0x25, 0xd4, 0xa6, 0x73, 0xd6, 0xb3, 0x96, 0x60, + 0xef, 0xf0, 0x9b, 0x2b, 0x71, 0x8c, 0x74, 0x02, + 0x10, 0x06, 0xac, 0x29, 0x8b, 0xdd, 0x25, 0xf9, + 0xb5, 0x71, 0xbc, 0x73, 0x44, 0x0f, 0x7a, 0xa5, + 0xab, 0xb4, 0x33, 0x49, 0x0b, 0x2f, 0xbd, 0x03, + 0xd3, 0x62, 0x17, 0xe9, 0x73, 0xb8, 0x84, 0x48, + 0x8f, 0x9c, 0x07, 0xaa, 0x52, 0x00, 0x6d, 0xa1, + 0xeb, 0x2a, 0xc6, 0xa0, 0x95, 0x76, 0x37, 0x78, + 0x9a, 0x81, 0x65, 0x7f, 0x46, 0x4b, 0x45, 0x5f, + 0xe1, 0x6d, 0x42, 0xe8, 0x01, 0x13, 0x5c, 0x38, + 0x51, 0xd4, 0xb4, 0x38, 0x49, 0x7e, 0xcb, 0x62, + 0x28, 0x1e, 0x3b, 0x82, 0x93, 0x54, 0x48, 0xf1, + 0xd2, 0x7d, 0xe4, 0x5a, 0xa3, 0xbc, 0x99, 0x83, + 0x44, 0x4f, 0x3a, 0x77, 0x36, 0x57, 0xce, 0xcf, + 0x2f, 0x56, 0xbe, 0x80, 0x90, 0x9e, 0x84, 0xea, + 0x51, 0x1f, 0x8f, 0xcf, 0x90, 0xd4, 0x60, 0xdc, + 0x5e, 0xb4, 0xf7, 0x10, 0x0b, 0x26, 0xe0, 0xff, + 0xc4, 0xd1, 0xe5, 0x67, 0x2e, 0xe7, 0xc8, 0x93, + 0x98, 0x05, 0xb8, 0xa8, 0x45, 0xc0, 0x4d, 0x09, + 0xdc, 0x84, 0x16, 0x2b, 0x0d, 0x9a, 0x21, 0x53, + 0x04, 0x8b, 0xd2, 0x0b, 0xbd, 0xa2, 0x4c, 0xa7, + 0x60, 0xee, 0xd9, 0xe1, 0x1d, 0xd1, 0xb7, 0x4a, + 0x30, 0x8f, 0x63, 0xd5, 0xa5, 0x8b, 0x33, 0x87, + 0xda, 0x1a, 0x18, 0x79, 0xf3, 0xe3, 0xa6, 0x17, + 0x94, 0x2e, 0xab, 0x6e, 0xa0, 0xe3, 0xcd, 0xac, + 0x50, 0x8c, 0xca, 0xa7, 0x0d, 0x76, 0x37, 0xd1, + 0x23, 0xe7, 0x05, 0x57, 0x8b, 0xa4, 0x22, 0x83, + 0xd9, 0x62, 0x52, 0x25, 0xad, 0x07, 0xbb, 0xbf, + 0xbf, 0xff, 0xbc, 0xfa, 0xee, 0x20, 0x73, 0x91, + 0x29, 0xff, 0x7f, 0x02, 0x71, 0x62, 0x84, 0xb5, + 0xf6, 0xb5, 0x25, 0x6b, 0x41, 0xde, 0x92, 0xb7, + 0x76, 0x3f, 0x91, 0x91, 0x31, 0x1b, 0x41, 0x84, + 0x62, 0x30, 0x0a, 0x37, 0xa4, 0x5e, 0x18, 0x3a, + 0x99, 0x08, 0xa5, 0xe6, 0x6d, 0x59, 0x22, 0xec, + 0x33, 0x39, 0x86, 0x26, 0xf5, 0xab, 0x66, 0xc8, + 0x08, 0x20, 0xcf, 0x0c, 0xd7, 0x47, 0x45, 0x21, + 0x0b, 0xf6, 0x59, 0xd5, 0xfe, 0x5c, 0x8d, 0xaa, + 0x12, 0x7b, 0x6f, 0xa1, 0xf0, 0x52, 0x33, 0x4f, + 0xf5, 0xce, 0x59, 0xd3, 0xab, 0x66, 0x10, 0xbf, + 0x06, 0xc4, 0x31, 0x06, 0x73, 0xd6, 0x80, 0xa2, + 0x78, 0xc2, 0x45, 0xcb, 0x03, 0x65, 0x39, 0xc9, + 0x09, 0xd1, 0x06, 0x04, 0x33, 0x1a, 0x5a, 0xf1, + 0xde, 0x01, 0xb8, 0x71, 0x83, 0xc4, 0xb5, 0xb3, + 0xc3, 0x54, 0x65, 0x33, 0x0d, 0x5a, 0xf7, 0x9b, + 0x90, 0x7c, 0x27, 0x1f, 0x3a, 0x58, 0xa3, 0xd8, + 0xfd, 0x30, 0x5f, 0xb7, 0xd2, 0x66, 0xa2, 0x93, + 0x1c, 0x28, 0xb7, 0xe9, 0x1b, 0x0c, 0xe1, 0x28, + 0x47, 0x26, 0xbb, 0xe9, 0x7d, 0x7e, 0xdc, 0x96, + 0x10, 0x92, 0x50, 0x56, 0x7c, 0x06, 0xe2, 0x27, + 0xb4, 0x08, 0xd3, 0xda, 0x7b, 0x98, 0x34, 0x73, + 0x9f, 0xdb, 0xf6, 0x62, 0xed, 0x31, 0x41, 0x13, + 0xd3, 0xa2, 0xa8, 0x4b, 0x3a, 0xc6, 0x1d, 0xe4, + 0x2f, 0x8c, 0xf8, 0xfb, 0x97, 0x64, 0xf4, 0xb6, + 0x2f, 0x80, 0x5a, 0xf3, 0x56, 0xe0, 0x40, 0x50, + 0xd5, 0x19, 0xd0, 0x1e, 0xfc, 0xca, 0xe5, 0xc9, + 0xd4, 0x60, 0x00, 0x81, 0x2e, 0xa3, 0xcc, 0xb6, + 0x52, 0xf0, 0xb4, 0xdb, 0x69, 0x99, 0xce, 0x7a, + 0x32, 0x4c, 0x08, 0xed, 0xaa, 0x10, 0x10, 0xe3, + 0x6f, 0xee, 0x99, 0x68, 0x95, 0x9f, 0x04, 0x71, + 0xb2, 0x49, 0x2f, 0x62, 0xa6, 0x5e, 0xb4, 0xef, + 0x02, 0xed, 0x4f, 0x27, 0xde, 0x4a, 0x0f, 0xfd, + 0xc1, 0xcc, 0xdd, 0x02, 0x8f, 0x08, 0x16, 0x54, + 0xdf, 0xda, 0xca, 0xe0, 0x82, 0xf1, 0xb4, 0x31, + 0x7a, 0xa9, 0x81, 0xfe, 0x90, 0xb7, 0x3e, 0xdb, + 0xd3, 0x35, 0xc0, 0x20, 0x80, 0x33, 0x46, 0x4a, + 0x63, 0xab, 0xd1, 0x0d, 0x29, 0xd2, 0xe2, 0x84, + 0xb8, 0xdb, 0xfa, 0xe9, 0x89, 0x44, 0x86, 0x7c, + 0xe8, 0x0b, 0xe6, 0x02, 0x6a, 0x07, 0x9b, 0x96, + 0xd0, 0xdb, 0x2e, 0x41, 0x4c, 0xa1, 0xd5, 0x57, + 0x45, 0x14, 0xfb, 0xe3, 0xa6, 0x72, 0x5b, 0x87, + 0x6e, 0x0c, 0x6d, 0x5b, 0xce, 0xe0, 0x2f, 0xe2, + 0x21, 0x81, 0x95, 0xb0, 0xe8, 0xb6, 0x32, 0x0b, + 0xb2, 0x98, 0x13, 0x52, 0x5d, 0xfb, 0xec, 0x63, + 0x17, 0x8a, 0x9e, 0x23, 0x22, 0x36, 0xee, 0xcd, + 0xda, 0xdb, 0xcf, 0x3e, 0xf1, 0xc7, 0xf1, 0x01, + 0x12, 0x93, 0x0a, 0xeb, 0x6f, 0xf2, 0x02, 0x15, + 0x96, 0x77, 0x5d, 0xef, 0x9c, 0xfb, 0x88, 0x91, + 0x59, 0xf9, 0x84, 0xdd, 0x9b, 0x26, 0x8d, 0x80, + 0xf9, 0x80, 0x66, 0x2d, 0xac, 0xf7, 0x1f, 0x06, + 0xba, 0x7f, 0xff, 0xee, 0xed, 0x40, 0x5f, 0xa5, + 0xd6, 0xbd, 0x8c, 0x5b, 0x46, 0xd2, 0x7e, 0x48, + 0x4a, 0x65, 0x8f, 0x08, 0x42, 0x60, 0xf7, 0x0f, + 0xb9, 0x16, 0x0b, 0x0c, 0x1a, 0x06, 0x00, 0x00, + }, + nil, + }, + { // has 1 non-empty fixed huffman block then garbage + "hello.txt", + "hello.txt + garbage", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, 'g', 'a', 'r', 'b', 'a', 'g', 'e', '!', '!', '!', + }, + ErrHeader, + }, + { // has 1 non-empty fixed huffman block not enough header + "hello.txt", + "hello.txt + garbage", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, gzipID1, + }, + io.ErrUnexpectedEOF, + }, + { // has 1 non-empty fixed huffman block but corrupt checksum + "hello.txt", + "hello.txt + corrupt checksum", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x00, + 0x00, 0x00, + }, + ErrChecksum, + }, + { // has 1 non-empty fixed huffman block but corrupt size + "hello.txt", + "hello.txt + corrupt size", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0xff, 0x00, + 0x00, 0x00, + }, + ErrChecksum, + }, + { + "f1l3n4m3.tXt", + "header with all fields used", + "", + []byte{ + 0x1f, 0x8b, 0x08, 0x1e, 0x70, 0xf0, 0xf9, 0x4a, + 0x00, 0xaa, 0x09, 0x00, 0x7a, 0x7a, 0x05, 0x00, + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x31, 0x6c, + 0x33, 0x6e, 0x34, 0x6d, 0x33, 0x2e, 0x74, 0x58, + 0x74, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, + 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, + 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, + 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, + 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, + 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, + 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, + 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, + 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, + 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, + 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, + 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, + 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, + 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, + 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, + 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, + 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, + 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, + 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, + 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, + 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, + 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, + 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, + 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, + 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, + 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, + 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, + 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, + 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, + 0xff, 0x00, 0x92, 0xfd, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, + }, + nil, + }, + { + "", + "truncated gzip file amid raw-block", + "hello", + []byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + 0x00, 0x0c, 0x00, 0xf3, 0xff, 0x68, 0x65, 0x6c, 0x6c, 0x6f, + }, + io.ErrUnexpectedEOF, + }, + { + "", + "truncated gzip file amid fixed-block", + "He", + []byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + 0xf2, 0x48, 0xcd, + }, + io.ErrUnexpectedEOF, + }, +} + +func TestDecompressor(t *testing.T) { + b := new(bytes.Buffer) + for _, tt := range gunzipTests { + in := bytes.NewReader(tt.gzip) + gzip, err := NewReader(in) + if err != nil { + t.Errorf("%s: NewReader: %s", tt.name, err) + continue + } + defer gzip.Close() + if tt.name != gzip.Name { + t.Errorf("%s: got name %s", tt.name, gzip.Name) + } + b.Reset() + n, err := io.Copy(b, gzip) + if err != tt.err { + t.Errorf("%s: io.Copy: %v want %v", tt.name, err, tt.err) + } + s := b.String() + if s != tt.raw { + t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.name, n, s, len(tt.raw), tt.raw) + } + + // Test Reader Reset. + in = bytes.NewReader(tt.gzip) + err = gzip.Reset(in) + if err != nil { + t.Errorf("%s: Reset: %s", tt.name, err) + continue + } + if tt.name != gzip.Name { + t.Errorf("%s: got name %s", tt.name, gzip.Name) + } + b.Reset() + n, err = io.Copy(b, gzip) + if err != tt.err { + t.Errorf("%s: io.Copy: %v want %v", tt.name, err, tt.err) + } + s = b.String() + if s != tt.raw { + t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.name, n, s, len(tt.raw), tt.raw) + } + } +} + +func TestIssue6550(t *testing.T) { + f, err := os.Open("testdata/issue6550.gz") + if err != nil { + t.Fatal(err) + } + gzip, err := NewReader(f) + if err != nil { + t.Fatalf("NewReader(testdata/issue6550.gz): %v", err) + } + defer gzip.Close() + done := make(chan bool, 1) + go func() { + _, err := io.Copy(ioutil.Discard, gzip) + if err == nil { + t.Errorf("Copy succeeded") + } else { + t.Logf("Copy failed (correctly): %v", err) + } + done <- true + }() + select { + case <-time.After(1 * time.Second): + t.Errorf("Copy hung") + case <-done: + // ok + } +} + +func TestInitialReset(t *testing.T) { + var r Reader + if err := r.Reset(bytes.NewReader(gunzipTests[1].gzip)); err != nil { + t.Error(err) + } + var buf bytes.Buffer + if _, err := io.Copy(&buf, &r); err != nil { + t.Error(err) + } + if s := buf.String(); s != gunzipTests[1].raw { + t.Errorf("got %q want %q", s, gunzipTests[1].raw) + } +} + +func TestMultistreamFalse(t *testing.T) { + // Find concatenation test. + var tt gunzipTest + for _, tt = range gunzipTests { + if strings.HasSuffix(tt.desc, " x2") { + goto Found + } + } + t.Fatal("cannot find hello.txt x2 in gunzip tests") + +Found: + br := bytes.NewReader(tt.gzip) + var r Reader + if err := r.Reset(br); err != nil { + t.Fatalf("first reset: %v", err) + } + + // Expect two streams with "hello world\n", then real EOF. + const hello = "hello world\n" + + r.Multistream(false) + data, err := ioutil.ReadAll(&r) + if string(data) != hello || err != nil { + t.Fatalf("first stream = %q, %v, want %q, %v", string(data), err, hello, nil) + } + + if err := r.Reset(br); err != nil { + t.Fatalf("second reset: %v", err) + } + r.Multistream(false) + data, err = ioutil.ReadAll(&r) + if string(data) != hello || err != nil { + t.Fatalf("second stream = %q, %v, want %q, %v", string(data), err, hello, nil) + } + + if err := r.Reset(br); err != io.EOF { + t.Fatalf("third reset: err=%v, want io.EOF", err) + } +} + +func TestWriteTo(t *testing.T) { + input := make([]byte, 100000) + n, err := rand.Read(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + compressed := &bytes.Buffer{} + // Do it twice to test MultiStream functionality + for i := 0; i < 2; i++ { + w, err := NewWriterLevel(compressed, -2) + if err != nil { + t.Fatal(err) + } + n, err = w.Write(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + w.Close() + } + input = append(input, input...) + buf := compressed.Bytes() + + dec, err := NewReader(bytes.NewBuffer(buf)) + if err != nil { + t.Fatal(err) + } + // ReadAll does not use WriteTo, but we wrap it in a NopCloser to be sure. + readall, err := ioutil.ReadAll(ioutil.NopCloser(dec)) + if err != nil { + t.Fatal(err) + } + if len(readall) != len(input) { + t.Errorf("did not decompress everything, want %d, got %d", len(input), len(readall)) + } + if bytes.Compare(readall, input) != 0 { + t.Error("output did not match input") + } + + dec, err = NewReader(bytes.NewBuffer(buf)) + if err != nil { + t.Fatal(err) + } + wtbuf := &bytes.Buffer{} + written, err := dec.WriteTo(wtbuf) + if err != nil { + t.Fatal(err) + } + if written != int64(len(input)) { + t.Error("Returned length did not match, expected", len(input), "got", written) + } + if wtbuf.Len() != len(input) { + t.Error("Actual Length did not match, expected", len(input), "got", wtbuf.Len()) + } + if bytes.Compare(wtbuf.Bytes(), input) != 0 { + t.Fatal("output did not match input") + } +} + +func TestNilStream(t *testing.T) { + // Go liberally interprets RFC 1952 section 2.2 to mean that a gzip file + // consist of zero or more members. Thus, we test that a nil stream is okay. + _, err := NewReader(bytes.NewReader(nil)) + if err != io.EOF { + t.Fatalf("NewReader(nil) on empty stream: got %v, want io.EOF", err) + } +} + +func TestTruncatedStreams(t *testing.T) { + const data = "\x1f\x8b\b\x04\x00\tn\x88\x00\xff\a\x00foo bar\xcbH\xcd\xc9\xc9\xd7Q(\xcf/\xcaI\x01\x04:r\xab\xff\f\x00\x00\x00" + + // Intentionally iterate starting with at least one byte in the stream. + for i := 1; i < len(data)-1; i++ { + r, err := NewReader(strings.NewReader(data[:i])) + if err != nil { + if err != io.ErrUnexpectedEOF { + t.Errorf("NewReader(%d) on truncated stream: got %v, want %v", i, err, io.ErrUnexpectedEOF) + } + continue + } + _, err = io.Copy(ioutil.Discard, r) + if ferr, ok := err.(*flate.ReadError); ok { + err = ferr.Err + } + if err != io.ErrUnexpectedEOF { + t.Errorf("io.Copy(%d) on truncated stream: got %v, want %v", i, err, io.ErrUnexpectedEOF) + } + } +} + +func BenchmarkGunzipCopy(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + r, err := NewReader(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkGunzipNoWriteTo(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + r, err := NewReader(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err := r.Reset(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, ioutil.NopCloser(r)) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkGunzipStdlib(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + r, err := oldgz.NewReader(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err := r.Reset(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go new file mode 100644 index 0000000..a0f3ed0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gzip.go @@ -0,0 +1,251 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/crc32" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/gzip" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer is an io.WriteCloser. +// Writes to a Writer are compressed and written to w. +type Writer struct { + Header // written at first call to Write, Flush, or Close + w io.Writer + level int + wroteHeader bool + compressor *flate.Writer + digest uint32 // CRC-32, IEEE polynomial (section 8) + size uint32 // Uncompressed size (section 2.3.1) + closed bool + buf [10]byte + err error +} + +// NewWriter returns a new Writer. +// Writes to the returned writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +// +// Callers that wish to set the fields in Writer.Header must do so before +// the first call to Write, Flush, or Close. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevel(w, DefaultCompression) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, or any +// integer value between BestSpeed and BestCompression inclusive. The error +// returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + if level < HuffmanOnly || level > BestCompression { + return nil, fmt.Errorf("gzip: invalid compression level: %d", level) + } + z := new(Writer) + z.init(w, level) + return z, nil +} + +func (z *Writer) init(w io.Writer, level int) { + compressor := z.compressor + if compressor != nil { + compressor.Reset(w) + } + *z = Writer{ + Header: Header{ + OS: 255, // unknown + }, + w: w, + level: level, + compressor: compressor, + } +} + +// Reset discards the Writer z's state and makes it equivalent to the +// result of its original state from NewWriter or NewWriterLevel, but +// writing to w instead. This permits reusing a Writer rather than +// allocating a new one. +func (z *Writer) Reset(w io.Writer) { + z.init(w, z.level) +} + +// writeBytes writes a length-prefixed byte slice to z.w. +func (z *Writer) writeBytes(b []byte) error { + if len(b) > 0xffff { + return errors.New("gzip.Write: Extra data is too large") + } + le.PutUint16(z.buf[:2], uint16(len(b))) + _, err := z.w.Write(z.buf[:2]) + if err != nil { + return err + } + _, err = z.w.Write(b) + return err +} + +// writeString writes a UTF-8 string s in GZIP's format to z.w. +// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). +func (z *Writer) writeString(s string) (err error) { + // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. + needconv := false + for _, v := range s { + if v == 0 || v > 0xff { + return errors.New("gzip.Write: non-Latin-1 header string") + } + if v > 0x7f { + needconv = true + } + } + if needconv { + b := make([]byte, 0, len(s)) + for _, v := range s { + b = append(b, byte(v)) + } + _, err = z.w.Write(b) + } else { + _, err = io.WriteString(z.w, s) + } + if err != nil { + return err + } + // GZIP strings are NUL-terminated. + z.buf[0] = 0 + _, err = z.w.Write(z.buf[:1]) + return err +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed. +func (z *Writer) Write(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + var n int + // Write the GZIP header lazily. + if !z.wroteHeader { + z.wroteHeader = true + z.buf[0] = gzipID1 + z.buf[1] = gzipID2 + z.buf[2] = gzipDeflate + z.buf[3] = 0 + if z.Extra != nil { + z.buf[3] |= 0x04 + } + if z.Name != "" { + z.buf[3] |= 0x08 + } + if z.Comment != "" { + z.buf[3] |= 0x10 + } + le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix())) + if z.level == BestCompression { + z.buf[8] = 2 + } else if z.level == BestSpeed { + z.buf[8] = 4 + } else { + z.buf[8] = 0 + } + z.buf[9] = z.OS + n, z.err = z.w.Write(z.buf[:10]) + if z.err != nil { + return n, z.err + } + if z.Extra != nil { + z.err = z.writeBytes(z.Extra) + if z.err != nil { + return n, z.err + } + } + if z.Name != "" { + z.err = z.writeString(z.Name) + if z.err != nil { + return n, z.err + } + } + if z.Comment != "" { + z.err = z.writeString(z.Comment) + if z.err != nil { + return n, z.err + } + } + if z.compressor == nil { + z.compressor, _ = flate.NewWriter(z.w, z.level) + } + } + z.size += uint32(len(p)) + z.digest = crc32.Update(z.digest, crc32.IEEETable, p) + n, z.err = z.compressor.Write(p) + return n, z.err +} + +// Flush flushes any pending compressed data to the underlying writer. +// +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. Flush does +// not return until the data has been written. If the underlying +// writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (z *Writer) Flush() error { + if z.err != nil { + return z.err + } + if z.closed { + return nil + } + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if z.err != nil { + return z.err + } + if z.closed { + return nil + } + z.closed = true + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + z.err = z.compressor.Close() + if z.err != nil { + return z.err + } + le.PutUint32(z.buf[:4], z.digest) + le.PutUint32(z.buf[4:8], z.size) + _, z.err = z.w.Write(z.buf[:8]) + return z.err +} diff --git a/vendor/github.com/klauspost/compress/gzip/gzip_test.go b/vendor/github.com/klauspost/compress/gzip/gzip_test.go new file mode 100644 index 0000000..b18bb54 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gzip_test.go @@ -0,0 +1,519 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "bufio" + "bytes" + oldgz "compress/gzip" + "io" + "io/ioutil" + "math/rand" + "testing" + "time" +) + +// TestEmpty tests that an empty payload still forms a valid GZIP stream. +func TestEmpty(t *testing.T) { + buf := new(bytes.Buffer) + + if err := NewWriter(buf).Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + + r, err := NewReader(buf) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + if len(b) != 0 { + t.Fatalf("got %d bytes, want 0", len(b)) + } + if err := r.Close(); err != nil { + t.Fatalf("Reader.Close: %v", err) + } +} + +// TestRoundTrip tests that gzipping and then gunzipping is the identity +// function. +func TestRoundTrip(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Comment = "comment" + w.Extra = []byte("extra") + w.ModTime = time.Unix(1e8, 0) + w.Name = "name" + if _, err := w.Write([]byte("payload")); err != nil { + t.Fatalf("Write: %v", err) + } + if err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + + r, err := NewReader(buf) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + if string(b) != "payload" { + t.Fatalf("payload is %q, want %q", string(b), "payload") + } + if r.Comment != "comment" { + t.Fatalf("comment is %q, want %q", r.Comment, "comment") + } + if string(r.Extra) != "extra" { + t.Fatalf("extra is %q, want %q", r.Extra, "extra") + } + if r.ModTime.Unix() != 1e8 { + t.Fatalf("mtime is %d, want %d", r.ModTime.Unix(), uint32(1e8)) + } + if r.Name != "name" { + t.Fatalf("name is %q, want %q", r.Name, "name") + } + if err := r.Close(); err != nil { + t.Fatalf("Reader.Close: %v", err) + } +} + +// TestLatin1 tests the internal functions for converting to and from Latin-1. +func TestLatin1(t *testing.T) { + latin1 := []byte{0xc4, 'u', 0xdf, 'e', 'r', 'u', 'n', 'g', 0} + utf8 := "Äußerung" + z := Reader{r: bufio.NewReader(bytes.NewReader(latin1))} + s, err := z.readString() + if err != nil { + t.Fatalf("readString: %v", err) + } + if s != utf8 { + t.Fatalf("read latin-1: got %q, want %q", s, utf8) + } + + buf := bytes.NewBuffer(make([]byte, 0, len(latin1))) + c := Writer{w: buf} + if err = c.writeString(utf8); err != nil { + t.Fatalf("writeString: %v", err) + } + s = buf.String() + if s != string(latin1) { + t.Fatalf("write utf-8: got %q, want %q", s, string(latin1)) + } +} + +// TestLatin1RoundTrip tests that metadata that is representable in Latin-1 +// survives a round trip. +func TestLatin1RoundTrip(t *testing.T) { + testCases := []struct { + name string + ok bool + }{ + {"", true}, + {"ASCII is OK", true}, + {"unless it contains a NUL\x00", false}, + {"no matter where \x00 occurs", false}, + {"\x00\x00\x00", false}, + {"Látin-1 also passes (U+00E1)", true}, + {"but LĀtin Extended-A (U+0100) does not", false}, + {"neither does 日本語", false}, + {"invalid UTF-8 also \xffails", false}, + {"\x00 as does Látin-1 with NUL", false}, + } + for _, tc := range testCases { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Name = tc.name + err := w.Close() + if (err == nil) != tc.ok { + t.Errorf("Writer.Close: name = %q, err = %v", tc.name, err) + continue + } + if !tc.ok { + continue + } + + r, err := NewReader(buf) + if err != nil { + t.Errorf("NewReader: %v", err) + continue + } + _, err = ioutil.ReadAll(r) + if err != nil { + t.Errorf("ReadAll: %v", err) + continue + } + if r.Name != tc.name { + t.Errorf("name is %q, want %q", r.Name, tc.name) + continue + } + if err := r.Close(); err != nil { + t.Errorf("Reader.Close: %v", err) + continue + } + } +} + +func TestWriterFlush(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Comment = "comment" + w.Extra = []byte("extra") + w.ModTime = time.Unix(1e8, 0) + w.Name = "name" + + n0 := buf.Len() + if n0 != 0 { + t.Fatalf("buffer size = %d before writes; want 0", n0) + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } + + n1 := buf.Len() + if n1 == 0 { + t.Fatal("no data after first flush") + } + + w.Write([]byte("x")) + + n2 := buf.Len() + if n1 != n2 { + t.Fatalf("after writing a single byte, size changed from %d to %d; want no change", n1, n2) + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } + + n3 := buf.Len() + if n2 == n3 { + t.Fatal("Flush didn't flush any data") + } +} + +// Multiple gzip files concatenated form a valid gzip file. +func TestConcat(t *testing.T) { + var buf bytes.Buffer + w := NewWriter(&buf) + w.Write([]byte("hello ")) + w.Close() + w = NewWriter(&buf) + w.Write([]byte("world\n")) + w.Close() + + r, err := NewReader(&buf) + data, err := ioutil.ReadAll(r) + if string(data) != "hello world\n" || err != nil { + t.Fatalf("ReadAll = %q, %v, want %q, nil", data, err, "hello world") + } +} + +func TestWriterReset(t *testing.T) { + buf := new(bytes.Buffer) + buf2 := new(bytes.Buffer) + z := NewWriter(buf) + msg := []byte("hello world") + z.Write(msg) + z.Close() + z.Reset(buf2) + z.Write(msg) + z.Close() + if buf.String() != buf2.String() { + t.Errorf("buf2 %q != original buf of %q", buf2.String(), buf.String()) + } +} + +var testbuf []byte + +func testFile(i, level int, t *testing.T) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dl := len(dat) + if len(testbuf) != i*dl { + // Make results predictable + testbuf = make([]byte, i*dl) + for j := 0; j < i; j++ { + copy(testbuf[j*dl:j*dl+dl], dat) + } + } + + br := bytes.NewBuffer(testbuf) + var buf bytes.Buffer + w, err := NewWriterLevel(&buf, DefaultCompression) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(w, br) + if err != nil { + t.Fatal(err) + } + if int(n) != len(testbuf) { + t.Fatal("Short write:", n, "!=", testbuf) + } + err = w.Close() + if err != nil { + t.Fatal(err) + } + r, err := NewReader(&buf) + if err != nil { + t.Fatal(err.Error()) + } + decoded, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err.Error()) + } + if !bytes.Equal(testbuf, decoded) { + t.Errorf("decoded content does not match.") + } +} + +func TestFile1xM2(t *testing.T) { testFile(1, -2, t) } +func TestFile1xM1(t *testing.T) { testFile(1, -1, t) } +func TestFile1x0(t *testing.T) { testFile(1, 0, t) } +func TestFile1x1(t *testing.T) { testFile(1, 1, t) } +func TestFile1x2(t *testing.T) { testFile(1, 2, t) } +func TestFile1x3(t *testing.T) { testFile(1, 3, t) } +func TestFile1x4(t *testing.T) { testFile(1, 4, t) } +func TestFile1x5(t *testing.T) { testFile(1, 5, t) } +func TestFile1x6(t *testing.T) { testFile(1, 6, t) } +func TestFile1x7(t *testing.T) { testFile(1, 7, t) } +func TestFile1x8(t *testing.T) { testFile(1, 8, t) } +func TestFile1x9(t *testing.T) { testFile(1, 9, t) } +func TestFile10(t *testing.T) { testFile(10, DefaultCompression, t) } + +func TestFile50(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testFile(50, DefaultCompression, t) +} + +func TestFile200(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testFile(200, BestSpeed, t) +} + +func testBigGzip(i int, t *testing.T) { + if len(testbuf) != i { + // Make results predictable + rand.Seed(1337) + testbuf = make([]byte, i) + for idx := range testbuf { + testbuf[idx] = byte(65 + rand.Intn(20)) + } + } + c := BestCompression + if testing.Short() { + c = BestSpeed + } + + br := bytes.NewBuffer(testbuf) + var buf bytes.Buffer + w, err := NewWriterLevel(&buf, c) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(w, br) + if err != nil { + t.Fatal(err) + } + if int(n) != len(testbuf) { + t.Fatal("Short write:", n, "!=", len(testbuf)) + } + err = w.Close() + if err != nil { + t.Fatal(err.Error()) + } + + r, err := NewReader(&buf) + if err != nil { + t.Fatal(err.Error()) + } + decoded, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err.Error()) + } + if !bytes.Equal(testbuf, decoded) { + t.Errorf("decoded content does not match.") + } +} + +func TestGzip1K(t *testing.T) { testBigGzip(1000, t) } +func TestGzip100K(t *testing.T) { testBigGzip(100000, t) } +func TestGzip1M(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + + testBigGzip(1000000, t) +} +func TestGzip10M(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testBigGzip(10000000, t) +} + +// Test if two runs produce identical results. +func TestDeterministicLM2(t *testing.T) { testDeterm(-2, t) } + +// Level 0 is not deterministic since it depends on the size of each write. +// func TestDeterministicL0(t *testing.T) { testDeterm(0, t) } +func TestDeterministicL1(t *testing.T) { testDeterm(1, t) } +func TestDeterministicL2(t *testing.T) { testDeterm(2, t) } +func TestDeterministicL3(t *testing.T) { testDeterm(3, t) } +func TestDeterministicL4(t *testing.T) { testDeterm(4, t) } +func TestDeterministicL5(t *testing.T) { testDeterm(5, t) } +func TestDeterministicL6(t *testing.T) { testDeterm(6, t) } +func TestDeterministicL7(t *testing.T) { testDeterm(7, t) } +func TestDeterministicL8(t *testing.T) { testDeterm(8, t) } +func TestDeterministicL9(t *testing.T) { testDeterm(9, t) } + +func testDeterm(i int, t *testing.T) { + var length = 500000 + if testing.Short() { + length = 100000 + } + rand.Seed(1337) + t1 := make([]byte, length) + for idx := range t1 { + t1[idx] = byte(65 + rand.Intn(8)) + } + + br := bytes.NewBuffer(t1) + var b1 bytes.Buffer + w, err := NewWriterLevel(&b1, i) + if err != nil { + t.Fatal(err) + } + _, err = io.Copy(w, br) + if err != nil { + t.Fatal(err) + } + w.Flush() + w.Close() + + // We recreate the buffer, so we have a goos chance of getting a + // different memory address. + rand.Seed(1337) + t2 := make([]byte, length) + for idx := range t2 { + t2[idx] = byte(65 + rand.Intn(8)) + } + + br2 := bytes.NewBuffer(t2) + var b2 bytes.Buffer + w2, err := NewWriterLevel(&b2, i) + if err != nil { + t.Fatal(err) + } + + // We write the same data, but with a different size than + // the default copy. + for { + _, err = io.CopyN(w2, br2, 1234) + if err == io.EOF { + err = nil + break + } else if err != nil { + break + } + } + if err != nil { + t.Fatal(err) + } + w2.Flush() + w2.Close() + + b1b := b1.Bytes() + b2b := b2.Bytes() + + if bytes.Compare(b1b, b2b) != 0 { + t.Fatalf("Level %d did not produce deterministric result, len(a) = %d, len(b) = %d", i, len(b1b), len(b2b)) + } +} + +func BenchmarkGzipLM2(b *testing.B) { benchmarkGzipN(b, -2) } +func BenchmarkGzipL1(b *testing.B) { benchmarkGzipN(b, 1) } +func BenchmarkGzipL2(b *testing.B) { benchmarkGzipN(b, 2) } +func BenchmarkGzipL3(b *testing.B) { benchmarkGzipN(b, 3) } +func BenchmarkGzipL4(b *testing.B) { benchmarkGzipN(b, 4) } +func BenchmarkGzipL5(b *testing.B) { benchmarkGzipN(b, 5) } +func BenchmarkGzipL6(b *testing.B) { benchmarkGzipN(b, 6) } +func BenchmarkGzipL7(b *testing.B) { benchmarkGzipN(b, 7) } +func BenchmarkGzipL8(b *testing.B) { benchmarkGzipN(b, 8) } +func BenchmarkGzipL9(b *testing.B) { benchmarkGzipN(b, 9) } + +func benchmarkGzipN(b *testing.B, level int) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + b.SetBytes(int64(len(dat))) + w, _ := NewWriterLevel(ioutil.Discard, level) + b.ResetTimer() + for n := 0; n < b.N; n++ { + w.Reset(ioutil.Discard) + n, err := w.Write(dat) + if n != len(dat) { + panic("short write") + } + if err != nil { + panic(err) + } + err = w.Close() + if err != nil { + panic(err) + } + } +} + +func BenchmarkOldGzipL1(b *testing.B) { benchmarkOldGzipN(b, 1) } +func BenchmarkOldGzipL2(b *testing.B) { benchmarkOldGzipN(b, 2) } +func BenchmarkOldGzipL3(b *testing.B) { benchmarkOldGzipN(b, 3) } +func BenchmarkOldGzipL4(b *testing.B) { benchmarkOldGzipN(b, 4) } +func BenchmarkOldGzipL5(b *testing.B) { benchmarkOldGzipN(b, 5) } +func BenchmarkOldGzipL6(b *testing.B) { benchmarkOldGzipN(b, 6) } +func BenchmarkOldGzipL7(b *testing.B) { benchmarkOldGzipN(b, 7) } +func BenchmarkOldGzipL8(b *testing.B) { benchmarkOldGzipN(b, 8) } +func BenchmarkOldGzipL9(b *testing.B) { benchmarkOldGzipN(b, 9) } + +func benchmarkOldGzipN(b *testing.B, level int) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + + b.SetBytes(int64(len(dat))) + w, _ := oldgz.NewWriterLevel(ioutil.Discard, level) + b.ResetTimer() + for n := 0; n < b.N; n++ { + w.Reset(ioutil.Discard) + n, err := w.Write(dat) + if n != len(dat) { + panic("short write") + } + if err != nil { + panic(err) + } + err = w.Close() + if err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/klauspost/compress/gzip/testdata/issue6550.gz b/vendor/github.com/klauspost/compress/gzip/testdata/issue6550.gz new file mode 100644 index 0000000..57972b6 Binary files /dev/null and b/vendor/github.com/klauspost/compress/gzip/testdata/issue6550.gz differ diff --git a/vendor/github.com/klauspost/compress/gzip/testdata/test.json b/vendor/github.com/klauspost/compress/gzip/testdata/test.json new file mode 100644 index 0000000..3b7b678 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/testdata/test.json @@ -0,0 +1,5902 @@ +[ + { + "_id": "543fa821aeca0fed7f182f01", + "index": 0, + "guid": "3526d142-6d2b-4266-9855-e6ec1589a265", + "isActive": false, + "balance": "$2,156.72", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "brown", + "name": { + "first": "Rosella", + "last": "Hale" + }, + "company": "SKINSERVE", + "email": "rosella.hale@skinserve.net", + "phone": "+1 (920) 528-2959", + "address": "324 Imlay Street, Sehili, Guam, 3022", + "about": "Est consectetur ut incididunt commodo elit cillum incididunt consectetur id officia pariatur pariatur cillum. Ipsum non incididunt tempor non. Cillum aliquip aliquip non minim ipsum voluptate incididunt adipisicing aute pariatur laborum minim deserunt laborum. Do do consequat enim adipisicing dolor incididunt reprehenderit sint. Veniam dolor consequat sint ullamco id enim occaecat.\r\n", + "registered": "Wednesday, August 27, 2014 9:12 PM", + "latitude": 43.44586, + "longitude": -65.480986, + "tags": [ + "Lorem", + "ex", + "magna", + "aliqua", + "id", + "sint", + "elit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Etta Stanton" + }, + { + "id": 1, + "name": "Cora Velazquez" + }, + { + "id": 2, + "name": "Deann Guy" + } + ], + "greeting": "Hello, Rosella! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8218066e8499ef38bcc", + "index": 1, + "guid": "991a35b5-91db-49e8-8a1e-13688b5ed88d", + "isActive": true, + "balance": "$1,762.71", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "green", + "name": { + "first": "Rose", + "last": "Lynn" + }, + "company": "NITRACYR", + "email": "rose.lynn@nitracyr.com", + "phone": "+1 (912) 564-2131", + "address": "485 Pulaski Street, Logan, Mississippi, 7453", + "about": "Minim proident enim eiusmod reprehenderit excepteur laboris. Adipisicing culpa cupidatat eiusmod exercitation reprehenderit anim. Nostrud mollit reprehenderit reprehenderit id magna et id esse cillum et proident. Incididunt eu nisi excepteur est est irure voluptate id nulla. Laboris consectetur aliqua cupidatat ex elit proident officia ex quis. Minim officia eu eiusmod velit. Ullamco dolor non quis aliqua cupidatat amet laborum laborum ad ex proident qui eiusmod ea.\r\n", + "registered": "Sunday, October 5, 2014 10:36 PM", + "latitude": -3.548698, + "longitude": 79.421107, + "tags": [ + "exercitation", + "adipisicing", + "aliqua", + "do", + "id", + "veniam", + "est" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Ada Little" + }, + { + "id": 1, + "name": "Lopez Osborne" + }, + { + "id": 2, + "name": "Tami Leach" + } + ], + "greeting": "Hello, Rose! You have 5 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821255974bb9f89e5ea", + "index": 2, + "guid": "e5727238-63a4-4e1e-88cc-67300826259c", + "isActive": false, + "balance": "$2,131.97", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "green", + "name": { + "first": "Gloria", + "last": "Richards" + }, + "company": "SPHERIX", + "email": "gloria.richards@spherix.biz", + "phone": "+1 (884) 536-3434", + "address": "493 Judge Street, Cetronia, Rhode Island, 4439", + "about": "Lorem cupidatat ea et laboris tempor enim non. Sit consequat culpa et qui aute cillum ut ullamco. Nulla duis sit Lorem incididunt mollit nostrud dolor veniam ullamco. Sunt magna id velit in laborum nisi labore. Id deserunt labore dolore dolor aliqua culpa est id duis.\r\n", + "registered": "Saturday, March 29, 2014 8:18 AM", + "latitude": 60.328012, + "longitude": 126.657357, + "tags": [ + "dolore", + "laboris", + "proident", + "cillum", + "in", + "fugiat", + "incididunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bowen Cote" + }, + { + "id": 1, + "name": "Olga Gardner" + }, + { + "id": 2, + "name": "Evangeline Howard" + } + ], + "greeting": "Hello, Gloria! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8212b7e1e8201a38702", + "index": 3, + "guid": "bab757bd-2ebd-4c2c-86b7-0d4d8b059d35", + "isActive": true, + "balance": "$2,509.81", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Casey", + "last": "Hayes" + }, + "company": "SURELOGIC", + "email": "casey.hayes@surelogic.co.uk", + "phone": "+1 (993) 573-3937", + "address": "330 Tapscott Avenue, Eastvale, New Mexico, 928", + "about": "Eu elit sint sunt labore dolor cillum esse ad voluptate commodo. Dolor aliqua do dolore ex tempor sint consequat culpa et consectetur nisi voluptate reprehenderit. Dolor velit eu cillum tempor anim anim. Nostrud laboris eiusmod elit enim duis in consectetur esse anim qui. Et eiusmod culpa nulla anim et officia pariatur reprehenderit eiusmod veniam. Ullamco nisi ea incididunt velit. Ullamco cillum mollit ea aliqua ea eu et enim.\r\n", + "registered": "Sunday, September 14, 2014 8:35 AM", + "latitude": -43.494604, + "longitude": 95.217518, + "tags": [ + "officia", + "sunt", + "dolore", + "qui", + "elit", + "irure", + "cillum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Serrano Wise" + }, + { + "id": 1, + "name": "Lorene Macias" + }, + { + "id": 2, + "name": "Kristen Lott" + } + ], + "greeting": "Hello, Casey! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821bfefa43403d5d054", + "index": 4, + "guid": "675d1598-8c45-4d67-a4df-d38a270de371", + "isActive": false, + "balance": "$3,887.07", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "blue", + "name": { + "first": "Price", + "last": "Oconnor" + }, + "company": "ANOCHA", + "email": "price.oconnor@anocha.tv", + "phone": "+1 (855) 410-3197", + "address": "447 Stockholm Street, Templeton, Wisconsin, 2216", + "about": "Cillum veniam esse duis tempor incididunt do dolor officia elit eu. Excepteur velit reprehenderit minim Lorem commodo est. Duis Lorem nisi elit aliquip est deserunt fugiat ut. Nisi tempor ex est pariatur laborum eiusmod anim eu nulla. Nisi enim id aute id ex id nostrud.\r\n", + "registered": "Wednesday, May 14, 2014 5:19 PM", + "latitude": 26.083477, + "longitude": 122.61114, + "tags": [ + "in", + "ad", + "aliqua", + "minim", + "nisi", + "cupidatat", + "id" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Montgomery Mccray" + }, + { + "id": 1, + "name": "Lucia Ferrell" + }, + { + "id": 2, + "name": "Glover Brock" + } + ], + "greeting": "Hello, Price! You have 5 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a699260d8ed4439a", + "index": 5, + "guid": "5e271270-fef3-48a7-b389-346251b46abc", + "isActive": false, + "balance": "$1,046.50", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "blue", + "name": { + "first": "Rita", + "last": "Huber" + }, + "company": "VURBO", + "email": "rita.huber@vurbo.name", + "phone": "+1 (803) 589-3948", + "address": "838 River Street, Gadsden, American Samoa, 2602", + "about": "Culpa quis qui exercitation velit officia eu id qui consequat qui. Ea fugiat quis fugiat proident velit. Velit et reprehenderit quis irure adipisicing duis dolor id cupidatat ea aliqua elit.\r\n", + "registered": "Friday, April 11, 2014 11:56 AM", + "latitude": 30.717665, + "longitude": -29.687902, + "tags": [ + "veniam", + "ex", + "deserunt", + "cillum", + "sint", + "eu", + "proident" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Oliver Terrell" + }, + { + "id": 1, + "name": "Lora Shepherd" + }, + { + "id": 2, + "name": "Guzman Holman" + } + ], + "greeting": "Hello, Rita! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821b888230e87ce950e", + "index": 6, + "guid": "7e4efd9a-4923-42ae-8924-d6d5fae80ec0", + "isActive": false, + "balance": "$1,205.59", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "green", + "name": { + "first": "Peterson", + "last": "Oliver" + }, + "company": "ZENTIX", + "email": "peterson.oliver@zentix.me", + "phone": "+1 (924) 564-2815", + "address": "596 Middleton Street, Walker, Louisiana, 3358", + "about": "Exercitation cillum sit exercitation voluptate duis nostrud incididunt cillum sint minim labore tempor minim ad. Esse ad id pariatur cillum id exercitation ullamco elit. Quis nisi excepteur mollit consectetur id et. Ea voluptate nulla duis minim exercitation aliqua aute nisi enim enim excepteur dolor ad non. Aliquip elit eu enim officia minim enim Lorem tempor. Cillum anim aute sunt cupidatat deserunt consequat.\r\n", + "registered": "Friday, March 28, 2014 2:28 PM", + "latitude": 45.092029, + "longitude": 56.730029, + "tags": [ + "in", + "voluptate", + "sit", + "sit", + "Lorem", + "reprehenderit", + "esse" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Wiley Henry" + }, + { + "id": 1, + "name": "Downs Rowland" + }, + { + "id": 2, + "name": "White Guerra" + } + ], + "greeting": "Hello, Peterson! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8210f8589ab846b3d88", + "index": 7, + "guid": "afc25e30-7e70-4a87-bdd3-519e1837969a", + "isActive": false, + "balance": "$3,928.85", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Shauna", + "last": "Morse" + }, + "company": "CENTICE", + "email": "shauna.morse@centice.us", + "phone": "+1 (926) 517-3679", + "address": "752 Dunne Place, Ebro, Kansas, 3215", + "about": "Cupidatat incididunt sit duis tempor labore dolore aute qui magna in. Consequat aute ut veniam laborum aliqua Lorem esse. Cillum in qui sint excepteur eiusmod eiusmod eu anim adipisicing et.\r\n", + "registered": "Saturday, September 6, 2014 2:32 PM", + "latitude": 36.341849, + "longitude": 108.378341, + "tags": [ + "in", + "nulla", + "labore", + "qui", + "id", + "enim", + "fugiat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lizzie Carson" + }, + { + "id": 1, + "name": "Eliza Hall" + }, + { + "id": 2, + "name": "Baxter Burton" + } + ], + "greeting": "Hello, Shauna! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8213c997bd81d4a7fa5", + "index": 8, + "guid": "1337ad27-17e3-459f-90a3-a43b54b88184", + "isActive": true, + "balance": "$2,096.67", + "picture": "http://placehold.it/32x32", + "age": 24, + "eyeColor": "blue", + "name": { + "first": "Glenn", + "last": "Brooks" + }, + "company": "MANGLO", + "email": "glenn.brooks@manglo.ca", + "phone": "+1 (895) 595-2669", + "address": "605 McDonald Avenue, Nicholson, Indiana, 2302", + "about": "Deserunt incididunt ullamco dolore nostrud cupidatat sit consequat adipisicing incididunt sunt. Laboris fugiat et laboris est eu laborum culpa. Labore ad aliquip ut enim aute nulla quis cillum dolor aliqua. Culpa labore occaecat et sunt qui. Velit consequat ad proident non voluptate non mollit eu et cillum tempor. Velit quis deserunt Lorem cupidatat enim ut.\r\n", + "registered": "Friday, March 21, 2014 9:06 AM", + "latitude": -40.51084, + "longitude": -137.771438, + "tags": [ + "enim", + "laboris", + "culpa", + "do", + "nulla", + "anim", + "cillum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Shelly Cardenas" + }, + { + "id": 1, + "name": "Kristine Mendoza" + }, + { + "id": 2, + "name": "Hall Hendrix" + } + ], + "greeting": "Hello, Glenn! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821054bcf388259b272", + "index": 9, + "guid": "cd9601fc-7ca7-4d54-830b-145ff5c5c147", + "isActive": true, + "balance": "$1,816.14", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Maribel", + "last": "Small" + }, + "company": "FUTURITY", + "email": "maribel.small@futurity.org", + "phone": "+1 (825) 532-2134", + "address": "424 Rockaway Parkway, Vale, Alaska, 1834", + "about": "Aliqua irure culpa exercitation nostrud qui exercitation deserunt ullamco culpa aliquip irure. Proident officia in consequat laborum ex adipisicing exercitation proident anim cupidatat excepteur anim. Labore irure pariatur laboris reprehenderit.\r\n", + "registered": "Tuesday, July 29, 2014 9:59 PM", + "latitude": 53.843872, + "longitude": 85.292318, + "tags": [ + "dolore", + "laborum", + "aute", + "aliqua", + "nostrud", + "commodo", + "commodo" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Anthony Bray" + }, + { + "id": 1, + "name": "Vicki Kelly" + }, + { + "id": 2, + "name": "Baird Wagner" + } + ], + "greeting": "Hello, Maribel! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821f14efb6f7f1210b9", + "index": 10, + "guid": "c9400d51-ea8d-4748-9a10-fa0e3a037b23", + "isActive": false, + "balance": "$1,395.15", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Kendra", + "last": "Knapp" + }, + "company": "UNCORP", + "email": "kendra.knapp@uncorp.biz", + "phone": "+1 (830) 509-3054", + "address": "765 Cameron Court, Ferney, Florida, 1963", + "about": "Duis irure ea qui ut velit nostrud. Lorem laborum excepteur do qui ad sit culpa. Labore mollit mollit deserunt sint aute officia qui laboris dolor aliqua magna in officia.\r\n", + "registered": "Sunday, April 27, 2014 11:55 AM", + "latitude": 84.200593, + "longitude": -155.377179, + "tags": [ + "laborum", + "labore", + "aliquip", + "ex", + "voluptate", + "dolor", + "Lorem" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Marva Cash" + }, + { + "id": 1, + "name": "Aimee Velez" + }, + { + "id": 2, + "name": "Eaton Delgado" + } + ], + "greeting": "Hello, Kendra! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82119105322ba52b407", + "index": 11, + "guid": "b6862230-95da-43d5-97ba-13ed4bcc0744", + "isActive": false, + "balance": "$1,442.51", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "brown", + "name": { + "first": "Katrina", + "last": "Ferguson" + }, + "company": "MANTRIX", + "email": "katrina.ferguson@mantrix.io", + "phone": "+1 (938) 541-3037", + "address": "447 Putnam Avenue, Collins, Georgia, 8421", + "about": "Minim aliquip Lorem fugiat et fugiat esse aliqua consectetur non officia esse. Fugiat irure eu ut irure cillum mollit nisi consequat do cillum. Est exercitation deserunt proident ex cupidatat. Elit aliquip pariatur ad minim adipisicing qui. Quis enim laborum incididunt eiusmod deserunt cillum amet enim. Proident et do voluptate esse laboris nisi. Duis cupidatat fugiat adipisicing aute velit et ullamco anim velit velit et excepteur laboris.\r\n", + "registered": "Tuesday, February 4, 2014 5:01 PM", + "latitude": 43.287084, + "longitude": 133.518964, + "tags": [ + "magna", + "officia", + "reprehenderit", + "excepteur", + "cillum", + "veniam", + "officia" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tameka Mccullough" + }, + { + "id": 1, + "name": "Madden Vincent" + }, + { + "id": 2, + "name": "Jewel Mccarthy" + } + ], + "greeting": "Hello, Katrina! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821ee55902ac207b17a", + "index": 12, + "guid": "50161207-4477-47b9-8aa7-a845c3c2e96f", + "isActive": false, + "balance": "$1,937.64", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Gilliam", + "last": "Flowers" + }, + "company": "INTERLOO", + "email": "gilliam.flowers@interloo.net", + "phone": "+1 (930) 564-2474", + "address": "986 Elton Street, Bagtown, Alabama, 9115", + "about": "Velit incididunt ut nulla adipisicing ad qui sint dolor cillum cupidatat in. Commodo aliqua deserunt ea eu irure irure nisi ullamco culpa nostrud. Adipisicing exercitation excepteur et id cupidatat. Ullamco ut incididunt proident est ad deserunt duis id ut. Excepteur cupidatat irure reprehenderit et excepteur minim cillum occaecat adipisicing. Commodo fugiat ad ex consectetur commodo dolore id nisi deserunt commodo aliquip. Veniam amet mollit nulla adipisicing eu minim sit magna incididunt adipisicing.\r\n", + "registered": "Tuesday, April 1, 2014 12:42 AM", + "latitude": -55.19047, + "longitude": 177.975351, + "tags": [ + "sint", + "pariatur", + "incididunt", + "exercitation", + "quis", + "ad", + "sint" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Patsy Hunter" + }, + { + "id": 1, + "name": "Cecilia Green" + }, + { + "id": 2, + "name": "Meyer Jones" + } + ], + "greeting": "Hello, Gilliam! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821658f3962ce822678", + "index": 13, + "guid": "7017674f-79b3-43ed-8832-2b787c51f59d", + "isActive": false, + "balance": "$2,291.31", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "blue", + "name": { + "first": "Roberts", + "last": "Floyd" + }, + "company": "OVOLO", + "email": "roberts.floyd@ovolo.com", + "phone": "+1 (935) 401-2916", + "address": "723 Amherst Street, Brady, District Of Columbia, 4241", + "about": "Occaecat incididunt eu do quis est. Est mollit incididunt sint aute sunt. Consectetur incididunt officia eu fugiat quis officia pariatur excepteur sint. In enim nostrud nisi culpa. Ex incididunt exercitation id voluptate.\r\n", + "registered": "Thursday, June 19, 2014 4:16 PM", + "latitude": 72.321258, + "longitude": 28.548926, + "tags": [ + "et", + "ipsum", + "anim", + "dolor", + "commodo", + "do", + "exercitation" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tracey Vasquez" + }, + { + "id": 1, + "name": "Castro Harrell" + }, + { + "id": 2, + "name": "Sanders Barr" + } + ], + "greeting": "Hello, Roberts! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821f95ca5383b9eb53d", + "index": 14, + "guid": "a6532d9a-d291-4a51-92e7-33c50ceecc12", + "isActive": true, + "balance": "$3,310.31", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Miles", + "last": "Valdez" + }, + "company": "DENTREX", + "email": "miles.valdez@dentrex.biz", + "phone": "+1 (960) 513-3228", + "address": "726 Stillwell Place, Soham, Pennsylvania, 3510", + "about": "Sit labore ex commodo duis tempor labore officia et et est qui ullamco. Aute elit in labore laboris magna duis ipsum excepteur anim laboris ipsum magna magna non. Sint mollit eiusmod in est sint ipsum excepteur do anim cillum cillum.\r\n", + "registered": "Thursday, May 1, 2014 6:08 PM", + "latitude": 88.123309, + "longitude": -121.226418, + "tags": [ + "voluptate", + "sunt", + "anim", + "laboris", + "exercitation", + "deserunt", + "culpa" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bradford Horn" + }, + { + "id": 1, + "name": "Hanson Dillon" + }, + { + "id": 2, + "name": "Whitley Stanley" + } + ], + "greeting": "Hello, Miles! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821c52f80cda8ff36b3", + "index": 15, + "guid": "63820e33-a8c7-410e-afa9-32b7f7017a32", + "isActive": true, + "balance": "$2,616.09", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Floyd", + "last": "Barker" + }, + "company": "TALAE", + "email": "floyd.barker@talae.co.uk", + "phone": "+1 (843) 435-2898", + "address": "501 Sullivan Place, Cotopaxi, Nevada, 5498", + "about": "Non deserunt voluptate occaecat est mollit dolor aliqua. Qui elit aute qui aliquip ipsum et labore est aliquip pariatur. Sint deserunt tempor dolore excepteur elit est sint in est ex anim. Nostrud culpa amet eiusmod incididunt. Ea exercitation amet labore cillum culpa duis aute incididunt dolore sunt. Cillum velit laboris quis eiusmod fugiat consectetur sit fugiat irure labore.\r\n", + "registered": "Monday, March 10, 2014 7:48 AM", + "latitude": -86.397923, + "longitude": 171.646534, + "tags": [ + "dolore", + "sit", + "qui", + "id", + "aliquip", + "mollit", + "laborum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Eileen Daniels" + }, + { + "id": 1, + "name": "Genevieve Wood" + }, + { + "id": 2, + "name": "Carver Fields" + } + ], + "greeting": "Hello, Floyd! You have 6 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8219eb621c418384935", + "index": 16, + "guid": "ed67eac2-cfdf-4d28-a734-bc973cba8613", + "isActive": false, + "balance": "$1,917.58", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Carrillo", + "last": "Cox" + }, + "company": "ARCHITAX", + "email": "carrillo.cox@architax.tv", + "phone": "+1 (818) 444-3875", + "address": "309 Randolph Street, Avoca, Illinois, 770", + "about": "Labore consequat et nostrud officia ad. Sint ipsum ipsum sint laboris adipisicing minim voluptate aliqua proident est commodo nulla. Officia sint ipsum laborum aliquip adipisicing adipisicing ea et reprehenderit dolore. Et deserunt sint incididunt velit dolore voluptate deserunt anim nisi sit est officia fugiat. Velit dolore ea do enim veniam ut do. Duis adipisicing fugiat magna Lorem ullamco quis sint ut cupidatat laborum aute laboris sint aliqua.\r\n", + "registered": "Friday, January 17, 2014 12:19 AM", + "latitude": -31.228015, + "longitude": -82.248255, + "tags": [ + "occaecat", + "nostrud", + "ex", + "dolor", + "magna", + "minim", + "pariatur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Douglas Mayer" + }, + { + "id": 1, + "name": "Dorothy Riddle" + }, + { + "id": 2, + "name": "Melanie Thompson" + } + ], + "greeting": "Hello, Carrillo! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821e152869356625777", + "index": 17, + "guid": "1dc17af3-a194-42e8-add8-a73853f16da2", + "isActive": true, + "balance": "$1,703.08", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "green", + "name": { + "first": "Ana", + "last": "Reese" + }, + "company": "FORTEAN", + "email": "ana.reese@fortean.name", + "phone": "+1 (876) 419-2128", + "address": "451 Brevoort Place, Leola, Tennessee, 3725", + "about": "Consectetur officia irure proident nulla. Anim veniam mollit sit id aliqua. Do reprehenderit culpa magna magna aute est pariatur consequat ut occaecat cillum adipisicing consectetur. Sint qui pariatur id velit deserunt laborum. Minim consequat ut sunt qui. Ex occaecat tempor fugiat sit anim veniam incididunt mollit mollit. Non id anim cillum culpa tempor voluptate aute consequat proident reprehenderit.\r\n", + "registered": "Saturday, June 28, 2014 4:53 AM", + "latitude": 80.18306, + "longitude": 70.818006, + "tags": [ + "mollit", + "voluptate", + "est", + "magna", + "ad", + "duis", + "est" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mclaughlin Johns" + }, + { + "id": 1, + "name": "Leanne Hanson" + }, + { + "id": 2, + "name": "Isabel Leon" + } + ], + "greeting": "Hello, Ana! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82168ed66f19f510aea", + "index": 18, + "guid": "87be73dd-bf5e-48c4-8168-f8f76cda905d", + "isActive": true, + "balance": "$1,307.88", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Daugherty", + "last": "Ware" + }, + "company": "TYPHONICA", + "email": "daugherty.ware@typhonica.me", + "phone": "+1 (936) 470-3445", + "address": "919 Oriental Boulevard, Westboro, Iowa, 2587", + "about": "Occaecat in nisi et consequat. Laboris minim consequat qui proident id aute occaecat pariatur. Sint esse anim id ex voluptate fugiat culpa anim commodo incididunt.\r\n", + "registered": "Tuesday, March 4, 2014 11:53 PM", + "latitude": -15.007384, + "longitude": -86.496257, + "tags": [ + "consequat", + "nisi", + "duis", + "cupidatat", + "anim", + "eu", + "culpa" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Maddox Wells" + }, + { + "id": 1, + "name": "Maura May" + }, + { + "id": 2, + "name": "Terry Calhoun" + } + ], + "greeting": "Hello, Daugherty! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8210abc44da895c5591", + "index": 19, + "guid": "32c4c5d0-b54c-4ea4-999a-f4fa517ac5ce", + "isActive": true, + "balance": "$2,706.98", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "green", + "name": { + "first": "Sonja", + "last": "Craft" + }, + "company": "KOZGENE", + "email": "sonja.craft@kozgene.us", + "phone": "+1 (808) 410-3427", + "address": "808 Lawn Court, Blodgett, Massachusetts, 560", + "about": "Eu occaecat reprehenderit ea ad ullamco ea sint cupidatat ex. Deserunt eu est veniam consectetur do anim in. Dolore minim veniam dolore elit sunt labore id eiusmod.\r\n", + "registered": "Sunday, August 31, 2014 12:09 AM", + "latitude": -47.101894, + "longitude": -130.294589, + "tags": [ + "sint", + "cillum", + "magna", + "sit", + "fugiat", + "nisi", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Loretta Mcgee" + }, + { + "id": 1, + "name": "Wilson Merritt" + }, + { + "id": 2, + "name": "Susanne Lloyd" + } + ], + "greeting": "Hello, Sonja! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82134480e13e931193a", + "index": 20, + "guid": "c4a7ded5-aa3e-411f-9956-f4b938ce93dc", + "isActive": false, + "balance": "$3,216.50", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "green", + "name": { + "first": "Powers", + "last": "Mathews" + }, + "company": "ANDRYX", + "email": "powers.mathews@andryx.ca", + "phone": "+1 (914) 559-2596", + "address": "545 Nolans Lane, Brenton, Arkansas, 7607", + "about": "In irure et tempor ad commodo culpa reprehenderit excepteur tempor ex. Exercitation eiusmod consequat anim incididunt veniam duis sunt velit sunt aliquip esse adipisicing do. Elit ea incididunt id amet mollit ea in ad ea cupidatat duis minim consectetur incididunt.\r\n", + "registered": "Friday, July 11, 2014 11:27 AM", + "latitude": 64.259332, + "longitude": 111.604942, + "tags": [ + "ut", + "ex", + "cillum", + "commodo", + "pariatur", + "ex", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Woodward Witt" + }, + { + "id": 1, + "name": "Hazel Mcfadden" + }, + { + "id": 2, + "name": "Desiree Mclean" + } + ], + "greeting": "Hello, Powers! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821117e0b8bd3f6e566", + "index": 21, + "guid": "fae9ba5c-948b-429c-b1e6-a0a6835f0694", + "isActive": false, + "balance": "$1,046.41", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "brown", + "name": { + "first": "Lola", + "last": "Roy" + }, + "company": "EURON", + "email": "lola.roy@euron.org", + "phone": "+1 (920) 413-2000", + "address": "237 Tabor Court, Berwind, Hawaii, 2276", + "about": "Aute voluptate proident occaecat exercitation aute proident ullamco veniam aute magna velit cupidatat. Occaecat dolor aliquip adipisicing dolor do elit eu elit laboris officia magna dolore. Velit tempor sit ad et occaecat nisi elit excepteur. Non velit sint deserunt culpa magna irure.\r\n", + "registered": "Thursday, February 27, 2014 1:20 AM", + "latitude": 12.90929, + "longitude": 68.693395, + "tags": [ + "ad", + "officia", + "non", + "aute", + "magna", + "minim", + "sint" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lee Tate" + }, + { + "id": 1, + "name": "Miranda Payne" + }, + { + "id": 2, + "name": "Jocelyn Cantrell" + } + ], + "greeting": "Hello, Lola! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8211cf627425d947100", + "index": 22, + "guid": "590c913b-2457-47a5-ad5c-8a5fc3c249f9", + "isActive": true, + "balance": "$2,144.16", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "green", + "name": { + "first": "Marci", + "last": "Mcpherson" + }, + "company": "VIAGREAT", + "email": "marci.mcpherson@viagreat.biz", + "phone": "+1 (916) 417-2166", + "address": "527 Kenilworth Place, Bartonsville, Puerto Rico, 4739", + "about": "Duis velit irure sit sit aliquip sit culpa velit labore velit ipsum amet. Pariatur labore ex et sunt proident ad minim. Aliquip qui adipisicing elit do sunt mollit irure adipisicing in labore cillum. Ut velit dolor cillum irure voluptate ad incididunt consequat cillum esse laborum consequat do.\r\n", + "registered": "Wednesday, August 27, 2014 6:55 AM", + "latitude": 23.135493, + "longitude": -133.213153, + "tags": [ + "ut", + "ex", + "deserunt", + "mollit", + "cillum", + "aliquip", + "excepteur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Latisha Ortiz" + }, + { + "id": 1, + "name": "Ila Marshall" + }, + { + "id": 2, + "name": "Kathie Strong" + } + ], + "greeting": "Hello, Marci! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82113ff45aaf64d6b75", + "index": 23, + "guid": "a074b7d2-92de-4728-9032-ac711cc8ca1b", + "isActive": true, + "balance": "$1,927.67", + "picture": "http://placehold.it/32x32", + "age": 26, + "eyeColor": "green", + "name": { + "first": "Lorie", + "last": "Haynes" + }, + "company": "CUBIX", + "email": "lorie.haynes@cubix.io", + "phone": "+1 (914) 479-2574", + "address": "209 Stoddard Place, Grahamtown, New Hampshire, 3422", + "about": "Commodo eu reprehenderit aute veniam occaecat eiusmod ex enim mollit elit. Officia fugiat proident cillum sint sint. In anim occaecat in dolore pariatur occaecat dolore eu duis sint veniam labore tempor id.\r\n", + "registered": "Thursday, June 19, 2014 3:03 AM", + "latitude": -80.694066, + "longitude": 98.315178, + "tags": [ + "proident", + "est", + "nulla", + "minim", + "aute", + "duis", + "ea" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Shana Jensen" + }, + { + "id": 1, + "name": "Audra Hays" + }, + { + "id": 2, + "name": "Shannon Stewart" + } + ], + "greeting": "Hello, Lorie! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8216788a07fe633c5a6", + "index": 24, + "guid": "ff361134-2ce3-4cce-b043-d571e87a041d", + "isActive": false, + "balance": "$1,274.62", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "green", + "name": { + "first": "Parker", + "last": "Higgins" + }, + "company": "ISOSTREAM", + "email": "parker.higgins@isostream.net", + "phone": "+1 (965) 467-3975", + "address": "908 Division Place, Homeland, South Carolina, 2577", + "about": "Laborum minim consectetur ipsum incididunt cupidatat ex ad labore eu non est consequat. Tempor eiusmod commodo Lorem enim aliquip ad non sint ipsum culpa amet. Eu sit amet velit est sit cupidatat aliquip magna proident id veniam Lorem dolore. Eiusmod ex amet proident enim ipsum proident mollit adipisicing ut.\r\n", + "registered": "Monday, August 25, 2014 4:51 AM", + "latitude": -28.784274, + "longitude": -151.224185, + "tags": [ + "ea", + "cupidatat", + "do", + "culpa", + "ea", + "ullamco", + "nulla" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bridgette Tyler" + }, + { + "id": 1, + "name": "Harris Pollard" + }, + { + "id": 2, + "name": "Davenport Skinner" + } + ], + "greeting": "Hello, Parker! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821ff898d23056717a4", + "index": 25, + "guid": "f5e85a0d-f46e-427e-86a7-657eaaadb169", + "isActive": true, + "balance": "$3,413.58", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "brown", + "name": { + "first": "Rios", + "last": "Reilly" + }, + "company": "QUILTIGEN", + "email": "rios.reilly@quiltigen.com", + "phone": "+1 (982) 565-3930", + "address": "789 Beekman Place, Wiscon, Texas, 8745", + "about": "Consectetur qui do sint deserunt voluptate sunt dolor in officia aliquip. Eu irure sit veniam nostrud culpa laboris. Commodo nostrud cillum nulla nostrud.\r\n", + "registered": "Thursday, September 4, 2014 1:54 PM", + "latitude": 6.093115, + "longitude": 145.037939, + "tags": [ + "eu", + "consectetur", + "veniam", + "pariatur", + "laboris", + "ad", + "cupidatat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Sweet Conley" + }, + { + "id": 1, + "name": "Key Grant" + }, + { + "id": 2, + "name": "Guthrie Moss" + } + ], + "greeting": "Hello, Rios! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82125f7c765fcbb1743", + "index": 26, + "guid": "e5f12323-5c7c-4103-b20c-1a633845a28c", + "isActive": true, + "balance": "$1,645.61", + "picture": "http://placehold.it/32x32", + "age": 26, + "eyeColor": "green", + "name": { + "first": "Hurley", + "last": "Cooke" + }, + "company": "QUORDATE", + "email": "hurley.cooke@quordate.biz", + "phone": "+1 (841) 404-3894", + "address": "369 Denton Place, Curtice, South Dakota, 2613", + "about": "Nulla non in aliqua sit mollit pariatur do mollit. Ut pariatur ut velit minim. Fugiat deserunt velit duis consequat labore culpa voluptate sint voluptate consectetur officia voluptate et laborum. Et exercitation ut eu pariatur minim velit elit. Dolore amet officia ipsum voluptate occaecat eiusmod cupidatat do dolore consequat esse consectetur aliquip.\r\n", + "registered": "Sunday, April 13, 2014 11:42 AM", + "latitude": -78.463811, + "longitude": 36.580914, + "tags": [ + "deserunt", + "nisi", + "do", + "enim", + "nisi", + "qui", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Harvey Norman" + }, + { + "id": 1, + "name": "Porter Shannon" + }, + { + "id": 2, + "name": "Reyes Goodman" + } + ], + "greeting": "Hello, Hurley! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821f375b9cabd418303", + "index": 27, + "guid": "452fe001-7fff-4f69-9336-00e3e80e9792", + "isActive": true, + "balance": "$2,608.67", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Jill", + "last": "Blair" + }, + "company": "ORBALIX", + "email": "jill.blair@orbalix.co.uk", + "phone": "+1 (863) 519-2778", + "address": "680 Cleveland Street, Kohatk, Ohio, 1688", + "about": "Do labore sint cupidatat dolor. Mollit nulla voluptate nostrud tempor ad cillum in mollit officia reprehenderit duis commodo veniam ad. Adipisicing enim adipisicing consequat sint minim ut. Cupidatat non ullamco sunt mollit proident. Aliquip dolore dolor excepteur cupidatat. Consectetur duis adipisicing qui enim aute quis veniam deserunt occaecat. Duis elit exercitation ullamco voluptate aliqua.\r\n", + "registered": "Tuesday, September 30, 2014 11:23 PM", + "latitude": -33.279869, + "longitude": 6.221211, + "tags": [ + "nostrud", + "elit", + "adipisicing", + "esse", + "in", + "commodo", + "ea" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tania Flores" + }, + { + "id": 1, + "name": "Nina Blackburn" + }, + { + "id": 2, + "name": "Mathews Fischer" + } + ], + "greeting": "Hello, Jill! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821304372a99199671f", + "index": 28, + "guid": "d160ded3-911c-4ce2-a314-9ed9a8e6fa9b", + "isActive": true, + "balance": "$3,005.54", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "brown", + "name": { + "first": "Estela", + "last": "Dalton" + }, + "company": "POWERNET", + "email": "estela.dalton@powernet.tv", + "phone": "+1 (959) 527-2607", + "address": "820 Montauk Avenue, Whitmer, Maine, 3867", + "about": "Commodo est ullamco sit eu irure tempor veniam deserunt in aute cillum tempor. Occaecat velit et deserunt incididunt sint do eu consectetur enim ullamco consectetur esse ipsum pariatur. Tempor exercitation dolore tempor enim. Dolor esse est magna occaecat. Elit culpa sint non ea exercitation. Aliquip nostrud aliquip culpa Lorem cillum incididunt do sit sunt velit id. Proident sit proident est velit consequat cillum officia in et.\r\n", + "registered": "Sunday, April 13, 2014 5:53 AM", + "latitude": 32.713335, + "longitude": 174.505048, + "tags": [ + "aliquip", + "dolore", + "proident", + "pariatur", + "elit", + "cillum", + "id" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Gibson Durham" + }, + { + "id": 1, + "name": "Carolina Cooley" + }, + { + "id": 2, + "name": "Rosa Mcintyre" + } + ], + "greeting": "Hello, Estela! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82187c6143c96c5ce1f", + "index": 29, + "guid": "f62ae8de-8905-4ac0-9b5c-ce12dad96a86", + "isActive": false, + "balance": "$1,859.49", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "brown", + "name": { + "first": "Martina", + "last": "Jacobson" + }, + "company": "CUIZINE", + "email": "martina.jacobson@cuizine.name", + "phone": "+1 (927) 493-2997", + "address": "234 Utica Avenue, Hinsdale, Vermont, 459", + "about": "Pariatur nulla ad sint tempor qui in id aliqua ex et ut. Qui occaecat quis veniam mollit officia duis ad ea. Est consectetur sit sint proident sit do. Id ut incididunt tempor id irure. Qui commodo cillum labore anim eiusmod exercitation ea qui nulla qui amet.\r\n", + "registered": "Sunday, February 9, 2014 3:54 AM", + "latitude": -36.70558, + "longitude": -140.397297, + "tags": [ + "voluptate", + "adipisicing", + "do", + "deserunt", + "aliquip", + "est", + "minim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Griffith Martinez" + }, + { + "id": 1, + "name": "Richard Chavez" + }, + { + "id": 2, + "name": "Mckinney Butler" + } + ], + "greeting": "Hello, Martina! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821392ebf56130b6eaa", + "index": 30, + "guid": "91c5f9c9-d3c7-435a-98cc-06e360c12e1d", + "isActive": true, + "balance": "$3,693.79", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "brown", + "name": { + "first": "Althea", + "last": "Valencia" + }, + "company": "BLANET", + "email": "althea.valencia@blanet.me", + "phone": "+1 (887) 501-3212", + "address": "911 Adams Street, Brambleton, Delaware, 5831", + "about": "Quis culpa exercitation dolor anim. Id labore ea aute aliqua. Dolor dolore sit duis anim cillum nostrud officia dolor sit. Laborum tempor dolore id consequat.\r\n", + "registered": "Saturday, January 11, 2014 6:27 PM", + "latitude": -39.101471, + "longitude": -22.991091, + "tags": [ + "eu", + "anim", + "elit", + "pariatur", + "cupidatat", + "cupidatat", + "enim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Dawson Foreman" + }, + { + "id": 1, + "name": "Sanford Meyer" + }, + { + "id": 2, + "name": "Ruth Barron" + } + ], + "greeting": "Hello, Althea! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82111b129b44454e349", + "index": 31, + "guid": "184ea3dd-af3f-400a-aa64-429b6cac091f", + "isActive": true, + "balance": "$1,351.78", + "picture": "http://placehold.it/32x32", + "age": 20, + "eyeColor": "blue", + "name": { + "first": "Morrison", + "last": "Chan" + }, + "company": "TALKALOT", + "email": "morrison.chan@talkalot.us", + "phone": "+1 (822) 448-3384", + "address": "599 Olive Street, Franklin, Palau, 3303", + "about": "Ex deserunt nulla velit dolore. Sunt sit ea irure incididunt aute sint do veniam. Sit ut ad ipsum est velit ea duis exercitation aliquip consectetur Lorem fugiat eu.\r\n", + "registered": "Sunday, February 16, 2014 2:04 PM", + "latitude": 61.099205, + "longitude": -37.736061, + "tags": [ + "eu", + "deserunt", + "pariatur", + "labore", + "reprehenderit", + "magna", + "consectetur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Norma Montoya" + }, + { + "id": 1, + "name": "Bailey Gillespie" + }, + { + "id": 2, + "name": "Candace Kent" + } + ], + "greeting": "Hello, Morrison! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821aec56cb0ce50b54b", + "index": 32, + "guid": "b211ab76-8674-4ed0-9a40-2087930468ad", + "isActive": false, + "balance": "$1,492.99", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "green", + "name": { + "first": "Walters", + "last": "Potts" + }, + "company": "FLUMBO", + "email": "walters.potts@flumbo.ca", + "phone": "+1 (871) 461-3958", + "address": "438 Highland Avenue, Elwood, Arizona, 9669", + "about": "Lorem do Lorem aliquip ipsum. Elit labore reprehenderit tempor do. Incididunt labore ad eu occaecat enim laborum irure elit nulla Lorem anim sit exercitation velit. Proident ullamco voluptate aute ex et aute mollit nostrud. Adipisicing labore sit irure amet dolore nostrud. Tempor nulla aliqua culpa commodo aliqua ut esse velit mollit ad. Aliqua nulla enim non nisi laboris sint aute duis proident qui officia.\r\n", + "registered": "Saturday, September 6, 2014 11:47 AM", + "latitude": 64.732922, + "longitude": -168.513014, + "tags": [ + "tempor", + "amet", + "dolore", + "proident", + "reprehenderit", + "non", + "tempor" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Freida Bailey" + }, + { + "id": 1, + "name": "Bernice Curry" + }, + { + "id": 2, + "name": "Ochoa Jefferson" + } + ], + "greeting": "Hello, Walters! You have 6 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82137be18ee6b852a45", + "index": 33, + "guid": "e8c67cca-c977-4668-9aff-46bfde1cd3de", + "isActive": true, + "balance": "$3,747.65", + "picture": "http://placehold.it/32x32", + "age": 24, + "eyeColor": "blue", + "name": { + "first": "Meredith", + "last": "Santana" + }, + "company": "GADTRON", + "email": "meredith.santana@gadtron.org", + "phone": "+1 (836) 438-3637", + "address": "999 Centre Street, Chaparrito, Colorado, 4540", + "about": "Magna nisi laboris sit quis duis anim et ullamco nostrud exercitation. Tempor enim nisi non culpa sit ex elit labore proident veniam dolore anim ex. Nostrud est qui do magna proident et. Nulla ea laboris incididunt elit labore id mollit reprehenderit. Amet in Lorem exercitation tempor voluptate labore anim adipisicing labore in dolor proident labore. Lorem labore duis ex Lorem nulla. Veniam in fugiat ex ullamco officia elit eiusmod enim.\r\n", + "registered": "Sunday, March 2, 2014 1:38 PM", + "latitude": 80.220732, + "longitude": 79.102966, + "tags": [ + "culpa", + "esse", + "velit", + "consectetur", + "incididunt", + "dolore", + "aliqua" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bernadine Manning" + }, + { + "id": 1, + "name": "Daphne Wyatt" + }, + { + "id": 2, + "name": "Keri Harrison" + } + ], + "greeting": "Hello, Meredith! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821ecdd700bb0b77dc2", + "index": 34, + "guid": "b93f3a6f-f05b-4d7e-93b8-4502d9c76cd2", + "isActive": true, + "balance": "$3,059.56", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "brown", + "name": { + "first": "Samantha", + "last": "Finley" + }, + "company": "XYQAG", + "email": "samantha.finley@xyqag.biz", + "phone": "+1 (879) 568-2419", + "address": "349 Truxton Street, Haring, Missouri, 8383", + "about": "Consequat do id et quis eiusmod eu irure sunt qui. Mollit minim nulla magna duis nostrud cillum ullamco sunt adipisicing elit ex. Minim fugiat deserunt nostrud esse laboris ullamco sit sit magna. Tempor occaecat Lorem qui ad ut tempor excepteur. Et sunt ullamco officia et Lorem est. Ipsum dolor ut ut elit do nisi in aute consequat. Enim esse ex aliqua anim aliquip cupidatat do Lorem voluptate quis ea culpa incididunt reprehenderit.\r\n", + "registered": "Saturday, March 29, 2014 1:38 PM", + "latitude": 79.209401, + "longitude": -139.211605, + "tags": [ + "irure", + "eiusmod", + "nulla", + "officia", + "eu", + "elit", + "nisi" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mayer Justice" + }, + { + "id": 1, + "name": "Mae Hancock" + }, + { + "id": 2, + "name": "Sherri Bradshaw" + } + ], + "greeting": "Hello, Samantha! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8218ac5365ad300d1bd", + "index": 35, + "guid": "f3b50cb9-da35-47fe-b0fa-fec9fc84cf8e", + "isActive": false, + "balance": "$2,819.28", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "blue", + "name": { + "first": "Cohen", + "last": "Finch" + }, + "company": "SYNTAC", + "email": "cohen.finch@syntac.io", + "phone": "+1 (950) 459-2729", + "address": "436 Pineapple Street, Deercroft, Minnesota, 3218", + "about": "Sint velit officia quis esse. Nulla aute laborum veniam dolore tempor adipisicing proident. Duis irure esse nostrud veniam est mollit mollit voluptate eiusmod anim veniam eiusmod. Ullamco sunt sit sint minim ea reprehenderit qui consequat ipsum. Sint id voluptate reprehenderit irure nulla veniam eu Lorem enim nulla. Cupidatat amet pariatur dolor amet ex nostrud dolor ipsum tempor enim nulla aliquip tempor Lorem.\r\n", + "registered": "Wednesday, October 8, 2014 10:04 PM", + "latitude": -84.299718, + "longitude": 52.573184, + "tags": [ + "deserunt", + "eu", + "consectetur", + "ea", + "non", + "officia", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Stevenson Mcintosh" + }, + { + "id": 1, + "name": "Chrystal Oneill" + }, + { + "id": 2, + "name": "Janine Rowe" + } + ], + "greeting": "Hello, Cohen! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82161d5c66d4bd0996e", + "index": 36, + "guid": "6dfc2232-f3c6-4818-956a-e8c683fd69fe", + "isActive": true, + "balance": "$3,262.20", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "blue", + "name": { + "first": "Joy", + "last": "Moran" + }, + "company": "UPDAT", + "email": "joy.moran@updat.net", + "phone": "+1 (968) 581-3365", + "address": "279 Kosciusko Street, Smock, Northern Mariana Islands, 5007", + "about": "Quis elit pariatur eu enim magna magna sunt dolore duis commodo. Pariatur sint duis ex aute eu est deserunt culpa fugiat minim non. Tempor consequat consectetur consequat sit incididunt officia id. Incididunt ex eiusmod excepteur aute mollit veniam quis excepteur occaecat excepteur deserunt reprehenderit. Est sit laboris eu dolor. Sunt voluptate quis aliquip nulla ex irure velit in. Aliqua sit id eiusmod amet commodo pariatur deserunt voluptate qui minim ex incididunt voluptate.\r\n", + "registered": "Saturday, February 8, 2014 9:55 PM", + "latitude": 53.735731, + "longitude": 46.00211, + "tags": [ + "exercitation", + "dolor", + "minim", + "incididunt", + "laborum", + "qui", + "sit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Joyce Mckay" + }, + { + "id": 1, + "name": "Turner Murray" + }, + { + "id": 2, + "name": "Jackson Jackson" + } + ], + "greeting": "Hello, Joy! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a9817ca1a9be6517", + "index": 37, + "guid": "84fe8707-cfbc-4434-98bb-faf9cb97471a", + "isActive": true, + "balance": "$3,224.80", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "blue", + "name": { + "first": "Brennan", + "last": "Stafford" + }, + "company": "LOVEPAD", + "email": "brennan.stafford@lovepad.com", + "phone": "+1 (873) 405-3600", + "address": "471 Ryder Avenue, Succasunna, Marshall Islands, 2595", + "about": "Et officia quis magna laborum et proident labore elit do Lorem reprehenderit irure. Laborum culpa culpa voluptate commodo consequat non et amet. Mollit cupidatat irure magna sint commodo ipsum proident tempor est. Laboris exercitation aliqua aute deserunt do in aliqua minim ex excepteur. Consequat in minim officia labore laboris laboris occaecat occaecat. Ex qui aliquip sint consectetur elit excepteur incididunt eu non laborum do eu excepteur.\r\n", + "registered": "Saturday, May 31, 2014 3:28 PM", + "latitude": -69.429728, + "longitude": 46.837644, + "tags": [ + "proident", + "ad", + "non", + "duis", + "occaecat", + "proident", + "non" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Jones Kirk" + }, + { + "id": 1, + "name": "Howe Drake" + }, + { + "id": 2, + "name": "Kimberly Jennings" + } + ], + "greeting": "Hello, Brennan! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8219251dc49e1cb846a", + "index": 38, + "guid": "c260bce1-46ac-4e84-9490-13eb8202904e", + "isActive": true, + "balance": "$1,330.69", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "brown", + "name": { + "first": "Neal", + "last": "Mooney" + }, + "company": "SOFTMICRO", + "email": "neal.mooney@softmicro.biz", + "phone": "+1 (883) 463-3623", + "address": "818 Lancaster Avenue, Chelsea, New Jersey, 3590", + "about": "Reprehenderit anim nostrud adipisicing non minim ea. Elit deserunt id in mollit nisi. Pariatur in consequat irure aliqua laboris ipsum.\r\n", + "registered": "Wednesday, May 21, 2014 4:33 PM", + "latitude": -57.826881, + "longitude": 154.840249, + "tags": [ + "consequat", + "aliquip", + "pariatur", + "nulla", + "dolore", + "deserunt", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Beach Roman" + }, + { + "id": 1, + "name": "Nash Young" + }, + { + "id": 2, + "name": "Carey Dale" + } + ], + "greeting": "Hello, Neal! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821cedc96e4f2209487", + "index": 39, + "guid": "ed7bbe27-811c-44e4-896e-cdf6ef62e048", + "isActive": false, + "balance": "$3,148.21", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "green", + "name": { + "first": "Roy", + "last": "Becker" + }, + "company": "KONGENE", + "email": "roy.becker@kongene.co.uk", + "phone": "+1 (895) 426-2172", + "address": "414 Seagate Avenue, Watrous, Virgin Islands, 3905", + "about": "Commodo mollit do minim dolor magna occaecat labore Lorem eiusmod. Occaecat mollit occaecat ex anim est amet irure non minim. Tempor laborum cupidatat tempor ex Lorem cupidatat incididunt ullamco fugiat Lorem consequat labore Lorem non.\r\n", + "registered": "Sunday, August 17, 2014 3:16 PM", + "latitude": -2.609533, + "longitude": -143.844769, + "tags": [ + "do", + "culpa", + "sint", + "ea", + "duis", + "aliqua", + "anim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lowery Hull" + }, + { + "id": 1, + "name": "Abbott Oneal" + }, + { + "id": 2, + "name": "Nellie Hammond" + } + ], + "greeting": "Hello, Roy! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821668031e7f50e30e9", + "index": 40, + "guid": "14dc3a87-0acf-4edd-93e4-ddcbeeecf96b", + "isActive": false, + "balance": "$2,617.16", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "brown", + "name": { + "first": "Courtney", + "last": "Watson" + }, + "company": "ISOSWITCH", + "email": "courtney.watson@isoswitch.tv", + "phone": "+1 (882) 468-2163", + "address": "385 Douglass Street, Iberia, Oregon, 7802", + "about": "Culpa sunt amet eu magna id quis quis irure velit. Culpa nostrud do enim proident officia. Laboris laborum laborum esse irure proident laborum amet sunt ipsum dolor nulla non ipsum sint. Amet deserunt in esse aliquip laboris proident fugiat nisi cillum ullamco occaecat est. Reprehenderit laborum enim labore ex. Velit do adipisicing irure dolor pariatur duis magna velit. Laborum sint laborum eu anim aliquip adipisicing labore.\r\n", + "registered": "Tuesday, July 22, 2014 6:03 PM", + "latitude": -75.831312, + "longitude": -172.468604, + "tags": [ + "dolor", + "velit", + "et", + "id", + "cupidatat", + "exercitation", + "laborum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mccray Gomez" + }, + { + "id": 1, + "name": "Hoover Rasmussen" + }, + { + "id": 2, + "name": "Hillary Castillo" + } + ], + "greeting": "Hello, Courtney! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8213c480453670f0428", + "index": 41, + "guid": "e7b97ea6-a13f-42ab-a998-e48614000aca", + "isActive": true, + "balance": "$1,499.72", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "blue", + "name": { + "first": "Dale", + "last": "Love" + }, + "company": "BULLJUICE", + "email": "dale.love@bulljuice.name", + "phone": "+1 (933) 588-3310", + "address": "603 Myrtle Avenue, Allentown, Utah, 793", + "about": "Ad quis anim commodo nulla et anim minim commodo irure excepteur. Pariatur ut anim aliquip id ex ipsum exercitation irure qui in nisi quis. Cillum deserunt duis dolore quis nostrud incididunt ipsum ea ipsum id fugiat eu voluptate nisi. Exercitation laborum fugiat irure anim. Ex nostrud aliqua deserunt amet.\r\n", + "registered": "Tuesday, July 22, 2014 1:19 PM", + "latitude": 55.335017, + "longitude": 101.730023, + "tags": [ + "ea", + "non", + "fugiat", + "Lorem", + "tempor", + "ut", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Spears Diaz" + }, + { + "id": 1, + "name": "Nora Dominguez" + }, + { + "id": 2, + "name": "Tamra Paul" + } + ], + "greeting": "Hello, Dale! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821c82bf70ca08206a9", + "index": 42, + "guid": "4367142d-e2b7-475c-b430-ac617295fbfc", + "isActive": false, + "balance": "$2,698.29", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "blue", + "name": { + "first": "Gibbs", + "last": "Hunt" + }, + "company": "COWTOWN", + "email": "gibbs.hunt@cowtown.me", + "phone": "+1 (960) 424-3404", + "address": "758 Suydam Place, Adamstown, North Carolina, 2800", + "about": "Veniam qui incididunt officia amet commodo nostrud. Magna consectetur consectetur officia Lorem amet sit officia excepteur minim consectetur pariatur dolore. Mollit fugiat aliqua consectetur qui non elit in aliquip culpa Lorem consectetur velit ad.\r\n", + "registered": "Monday, February 10, 2014 4:10 PM", + "latitude": 62.263652, + "longitude": 64.978136, + "tags": [ + "et", + "dolor", + "aute", + "minim", + "sunt", + "veniam", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Henry Schmidt" + }, + { + "id": 1, + "name": "Herman Wynn" + }, + { + "id": 2, + "name": "Wilda Grimes" + } + ], + "greeting": "Hello, Gibbs! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8210b1096c8a2f821c4", + "index": 43, + "guid": "5184508c-47f7-48b5-85ac-d15ed747ed07", + "isActive": true, + "balance": "$3,460.45", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "green", + "name": { + "first": "Francis", + "last": "Barton" + }, + "company": "COMTRAIL", + "email": "francis.barton@comtrail.us", + "phone": "+1 (881) 419-2936", + "address": "562 Ferris Street, Ola, Maryland, 3838", + "about": "Duis minim laboris in reprehenderit id ut laborum esse consequat. In dolore sunt consequat non fugiat do duis duis. Officia ipsum eiusmod laboris do aliqua aute velit minim nulla nisi. Dolor incididunt enim est eu cupidatat. Dolor commodo sit consectetur irure aliqua ea enim esse reprehenderit ullamco.\r\n", + "registered": "Thursday, April 10, 2014 2:48 PM", + "latitude": -35.457713, + "longitude": 141.805123, + "tags": [ + "tempor", + "officia", + "quis", + "tempor", + "ex", + "mollit", + "amet" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mayra Walters" + }, + { + "id": 1, + "name": "Casey Gross" + }, + { + "id": 2, + "name": "Aisha Santos" + } + ], + "greeting": "Hello, Francis! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821890ad48b2eaabcfb", + "index": 44, + "guid": "07497907-7e6f-471d-af9d-1dcbcf056a56", + "isActive": true, + "balance": "$2,166.79", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "blue", + "name": { + "first": "Dina", + "last": "Travis" + }, + "company": "VENOFLEX", + "email": "dina.travis@venoflex.ca", + "phone": "+1 (921) 485-3865", + "address": "366 Tillary Street, Century, New York, 6335", + "about": "Dolor sunt culpa enim sint officia sint id do ut anim ex in. Dolore est irure aliquip nulla laborum aliqua tempor id ea mollit in ad deserunt. Qui ullamco duis qui elit excepteur. Aute proident duis veniam enim commodo non minim id. Consequat anim eiusmod consectetur ut et labore officia ex ad cillum occaecat. Sit irure officia veniam sint et consequat reprehenderit officia qui. Aute esse nulla ad quis reprehenderit duis.\r\n", + "registered": "Friday, September 12, 2014 5:53 AM", + "latitude": 24.764352, + "longitude": 148.493552, + "tags": [ + "qui", + "officia", + "dolor", + "velit", + "ex", + "veniam", + "ullamco" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Sharpe Foster" + }, + { + "id": 1, + "name": "Kathrine Ayers" + }, + { + "id": 2, + "name": "Cox Acosta" + } + ], + "greeting": "Hello, Dina! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82101996fc944f7f215", + "index": 45, + "guid": "46b331f8-78d3-403f-8c93-cbaac9438998", + "isActive": false, + "balance": "$2,193.84", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "blue", + "name": { + "first": "Haney", + "last": "Garrett" + }, + "company": "EZENTIA", + "email": "haney.garrett@ezentia.org", + "phone": "+1 (965) 596-2629", + "address": "162 Village Road, Southmont, Virginia, 6673", + "about": "Laboris do veniam exercitation officia id eu minim irure Lorem laborum. Sint magna aliquip ad elit eiusmod cillum laborum. Adipisicing aliquip nulla mollit ipsum cupidatat nisi duis irure ullamco. Et elit nulla nisi culpa mollit esse in. Dolore non nulla anim magna enim aliquip quis amet non tempor incididunt dolor.\r\n", + "registered": "Monday, August 18, 2014 3:42 PM", + "latitude": 6.549909, + "longitude": 32.226887, + "tags": [ + "laboris", + "duis", + "culpa", + "qui", + "dolor", + "esse", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mariana Sharp" + }, + { + "id": 1, + "name": "Sue Baldwin" + }, + { + "id": 2, + "name": "Ross Arnold" + } + ], + "greeting": "Hello, Haney! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821453cc33bd1abd21d", + "index": 46, + "guid": "313f0c45-6eaf-46d9-a28d-ca31e79d2c1c", + "isActive": false, + "balance": "$2,303.36", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "blue", + "name": { + "first": "Gale", + "last": "Robles" + }, + "company": "NEWCUBE", + "email": "gale.robles@newcube.biz", + "phone": "+1 (996) 558-2811", + "address": "597 Java Street, Sanborn, North Dakota, 1789", + "about": "Anim fugiat do nisi dolor sunt consequat irure quis laborum. Nisi cupidatat dolore excepteur irure ea minim proident excepteur exercitation ut voluptate deserunt. Ad do amet id voluptate enim commodo ex. Sunt sint quis sint aute do ea aliqua. Enim ullamco dolore proident qui mollit irure consequat. Nostrud sunt adipisicing elit incididunt do laboris ad officia ea amet id reprehenderit nulla.\r\n", + "registered": "Tuesday, July 15, 2014 2:25 PM", + "latitude": -21.549196, + "longitude": -97.373962, + "tags": [ + "ullamco", + "amet", + "sint", + "elit", + "tempor", + "ex", + "pariatur" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bell Gould" + }, + { + "id": 1, + "name": "Denise Kirby" + }, + { + "id": 2, + "name": "Hess Hinton" + } + ], + "greeting": "Hello, Gale! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821ed643a410e2d79dc", + "index": 47, + "guid": "2620cb2b-df00-4303-a5ff-768ffb1697c5", + "isActive": true, + "balance": "$2,890.73", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "blue", + "name": { + "first": "Alisha", + "last": "Hamilton" + }, + "company": "FITCORE", + "email": "alisha.hamilton@fitcore.io", + "phone": "+1 (853) 468-3192", + "address": "257 Bayard Street, Coldiron, Oklahoma, 9228", + "about": "Sunt nostrud sunt magna amet excepteur est tempor veniam aliqua. Laboris id aliquip fugiat exercitation dolore veniam et anim duis sit esse ex elit ullamco. Lorem commodo exercitation in sit cillum ipsum do dolor.\r\n", + "registered": "Tuesday, March 11, 2014 12:29 PM", + "latitude": 85.840017, + "longitude": -57.093095, + "tags": [ + "consectetur", + "commodo", + "est", + "ut", + "incididunt", + "elit", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Steele Ellis" + }, + { + "id": 1, + "name": "Serena Emerson" + }, + { + "id": 2, + "name": "Betty Langley" + } + ], + "greeting": "Hello, Alisha! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8215c6db03112e3e13d", + "index": 48, + "guid": "28b59ead-0fd8-480b-8eb8-2d75290934f6", + "isActive": false, + "balance": "$3,497.68", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "blue", + "name": { + "first": "Kaufman", + "last": "Williams" + }, + "company": "COMBOGENE", + "email": "kaufman.williams@combogene.net", + "phone": "+1 (820) 465-3213", + "address": "353 Clarendon Road, Wilmington, Michigan, 3811", + "about": "Irure id sint elit mollit occaecat occaecat veniam elit reprehenderit esse officia cillum. Aute aute occaecat ipsum commodo laborum adipisicing fugiat aliquip dolore. Deserunt id excepteur enim eu adipisicing nulla ut non est dolore est. Culpa magna et sit et non ex.\r\n", + "registered": "Sunday, September 7, 2014 5:57 AM", + "latitude": 10.667631, + "longitude": 157.707911, + "tags": [ + "consequat", + "dolor", + "deserunt", + "amet", + "Lorem", + "aliqua", + "minim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Horn Franco" + }, + { + "id": 1, + "name": "Kathleen Hickman" + }, + { + "id": 2, + "name": "Rosario Scott" + } + ], + "greeting": "Hello, Kaufman! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821045c82593b79538d", + "index": 49, + "guid": "cef964ed-4208-41eb-a8d8-916d95d18f8a", + "isActive": true, + "balance": "$3,808.37", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "blue", + "name": { + "first": "Tran", + "last": "Gallegos" + }, + "company": "CONCILITY", + "email": "tran.gallegos@concility.com", + "phone": "+1 (925) 487-2143", + "address": "969 Schermerhorn Street, Watchtower, Idaho, 413", + "about": "Velit ut enim cupidatat adipisicing culpa in non incididunt exercitation dolor pariatur. Deserunt dolor occaecat dolor officia ipsum occaecat tempor nisi. Culpa et culpa aute incididunt et labore sunt cillum nulla. Reprehenderit culpa enim laborum nostrud consectetur velit nulla consequat aliqua non exercitation sunt nulla aliqua. Labore incididunt aliqua aliqua anim duis culpa elit labore. Aliqua ipsum mollit ut sint aliquip in aute do qui amet.\r\n", + "registered": "Friday, May 9, 2014 9:24 AM", + "latitude": -49.148583, + "longitude": 4.911715, + "tags": [ + "labore", + "duis", + "proident", + "adipisicing", + "nisi", + "tempor", + "nisi" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Nicole Holloway" + }, + { + "id": 1, + "name": "Minerva Beasley" + }, + { + "id": 2, + "name": "Bowers Suarez" + } + ], + "greeting": "Hello, Tran! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821fbd8e9836595f96d", + "index": 50, + "guid": "966b2f5e-5686-4381-9ab7-87bc9ac9968b", + "isActive": true, + "balance": "$1,351.67", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "green", + "name": { + "first": "Mable", + "last": "Lambert" + }, + "company": "ZENSUS", + "email": "mable.lambert@zensus.biz", + "phone": "+1 (917) 404-3441", + "address": "174 Concord Street, Somerset, Nebraska, 7318", + "about": "Ad minim esse ipsum incididunt incididunt enim Lorem nulla excepteur velit fugiat ullamco amet reprehenderit. Labore velit fugiat proident enim Lorem mollit ex. Tempor voluptate cupidatat officia nostrud qui. Veniam duis voluptate deserunt commodo consectetur eiusmod qui excepteur aliquip. Exercitation laborum Lorem excepteur ipsum aliqua fugiat reprehenderit ea dolore deserunt commodo cillum ipsum eu. Ullamco quis voluptate eiusmod ea aute et pariatur consequat duis occaecat nulla aliquip.\r\n", + "registered": "Sunday, May 25, 2014 4:54 PM", + "latitude": 79.811908, + "longitude": -62.629133, + "tags": [ + "sit", + "non", + "reprehenderit", + "exercitation", + "dolor", + "labore", + "irure" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Rosie Gill" + }, + { + "id": 1, + "name": "Parrish Dean" + }, + { + "id": 2, + "name": "Annmarie Delacruz" + } + ], + "greeting": "Hello, Mable! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821c85822cf149b785f", + "index": 51, + "guid": "1afd0e8f-b518-4bca-a4df-29e9b6a961d6", + "isActive": true, + "balance": "$2,628.22", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "brown", + "name": { + "first": "Sadie", + "last": "Clarke" + }, + "company": "UNISURE", + "email": "sadie.clarke@unisure.co.uk", + "phone": "+1 (905) 480-2930", + "address": "326 Gilmore Court, Hamilton, Montana, 9217", + "about": "Dolore do nisi reprehenderit consectetur in. In esse sit proident enim duis veniam quis laboris nulla cillum adipisicing veniam aute. Culpa sunt ex exercitation sit esse exercitation dolor ea enim ad est aute consequat qui. Esse esse nulla eiusmod eiusmod ullamco esse cillum aute ea id ex quis. Pariatur officia Lorem aute officia anim velit velit elit sint voluptate. Aliquip ad velit velit laboris et culpa. Do consectetur aliqua sit sunt eu anim culpa ut incididunt et.\r\n", + "registered": "Sunday, April 27, 2014 10:37 AM", + "latitude": -71.828101, + "longitude": -138.908359, + "tags": [ + "nostrud", + "amet", + "minim", + "occaecat", + "proident", + "sint", + "nostrud" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Christa Estes" + }, + { + "id": 1, + "name": "Alana Schneider" + }, + { + "id": 2, + "name": "Frank Spears" + } + ], + "greeting": "Hello, Sadie! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821aa6b44e8d20db81c", + "index": 52, + "guid": "e0220e02-565c-424a-8834-f955ccb72f7d", + "isActive": false, + "balance": "$2,918.63", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "brown", + "name": { + "first": "Deana", + "last": "Fletcher" + }, + "company": "VISALIA", + "email": "deana.fletcher@visalia.tv", + "phone": "+1 (815) 430-2641", + "address": "347 Tehama Street, Hollins, Washington, 5953", + "about": "Est consequat id ad Lorem consequat quis ullamco minim pariatur ipsum cillum. Enim exercitation qui duis cillum ea amet ea sint proident officia dolor non. Irure culpa cillum minim officia est culpa sit.\r\n", + "registered": "Monday, May 5, 2014 1:51 AM", + "latitude": 51.516197, + "longitude": 80.400628, + "tags": [ + "ut", + "tempor", + "pariatur", + "ex", + "dolore", + "deserunt", + "culpa" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hansen Estrada" + }, + { + "id": 1, + "name": "Regina Munoz" + }, + { + "id": 2, + "name": "Bethany Cabrera" + } + ], + "greeting": "Hello, Deana! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82120c61930bc25e2ff", + "index": 53, + "guid": "99c62c99-ef16-4eb7-a41d-80feafca740a", + "isActive": false, + "balance": "$1,836.96", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "brown", + "name": { + "first": "Long", + "last": "Sandoval" + }, + "company": "GINKLE", + "email": "long.sandoval@ginkle.name", + "phone": "+1 (989) 541-2327", + "address": "161 Crown Street, Omar, Kentucky, 8615", + "about": "Et ea eiusmod ex consequat culpa proident. Reprehenderit proident ullamco ullamco aliquip incididunt ullamco sit proident dolore nulla fugiat sit laboris. Adipisicing ullamco laborum nulla exercitation reprehenderit irure ex.\r\n", + "registered": "Tuesday, July 29, 2014 12:14 AM", + "latitude": -20.766582, + "longitude": 74.616145, + "tags": [ + "et", + "consequat", + "duis", + "excepteur", + "sint", + "sunt", + "aliqua" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Aline Rosa" + }, + { + "id": 1, + "name": "Olivia Quinn" + }, + { + "id": 2, + "name": "Fowler Carter" + } + ], + "greeting": "Hello, Long! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821cc7ceb7da88472ed", + "index": 54, + "guid": "3a965f51-48f9-4d10-8a9b-a0b529749c93", + "isActive": true, + "balance": "$2,950.94", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "green", + "name": { + "first": "Rush", + "last": "Thornton" + }, + "company": "AQUASSEUR", + "email": "rush.thornton@aquasseur.me", + "phone": "+1 (916) 493-2777", + "address": "344 Sunnyside Avenue, Brethren, California, 9529", + "about": "Duis ut consequat eu laborum voluptate eu Lorem cillum ad in commodo adipisicing. Excepteur aliquip sint dolor voluptate cillum nisi mollit mollit laborum ex culpa adipisicing voluptate. Cupidatat do sit fugiat amet irure. Cupidatat ex ut commodo reprehenderit veniam sit est officia ad pariatur aliquip.\r\n", + "registered": "Thursday, September 25, 2014 11:09 AM", + "latitude": -61.409359, + "longitude": -87.414208, + "tags": [ + "adipisicing", + "consequat", + "magna", + "Lorem", + "consequat", + "voluptate", + "eu" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hill Good" + }, + { + "id": 1, + "name": "Hartman Rice" + }, + { + "id": 2, + "name": "Petersen Hogan" + } + ], + "greeting": "Hello, Rush! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821e221eb5b7ed845c0", + "index": 55, + "guid": "ab0f1517-8c25-46ff-a281-0dcc932e2f9a", + "isActive": false, + "balance": "$2,070.82", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Alyce", + "last": "Perez" + }, + "company": "CAPSCREEN", + "email": "alyce.perez@capscreen.us", + "phone": "+1 (955) 432-2025", + "address": "896 Troutman Street, Williamson, West Virginia, 6491", + "about": "In consequat quis ex sint nisi proident esse excepteur quis nostrud. Enim incididunt ullamco sint quis eiusmod qui tempor ad laboris eiusmod nulla in aliquip sit. Nostrud fugiat fugiat Lorem laboris pariatur eiusmod amet ea do irure et. Excepteur pariatur consequat exercitation amet occaecat do aliqua non deserunt nulla cupidatat tempor id. Mollit ex incididunt et nulla culpa mollit veniam qui amet in excepteur pariatur. Commodo reprehenderit tempor laborum nisi anim minim deserunt eiusmod adipisicing deserunt ut eiusmod excepteur.\r\n", + "registered": "Monday, June 23, 2014 7:37 AM", + "latitude": -4.180393, + "longitude": 21.4789, + "tags": [ + "dolore", + "officia", + "laborum", + "aliquip", + "ex", + "eu", + "sint" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Rosalind Lang" + }, + { + "id": 1, + "name": "Ina Pratt" + }, + { + "id": 2, + "name": "Tamika Mercer" + } + ], + "greeting": "Hello, Alyce! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8215e315c720e186460", + "index": 56, + "guid": "10727900-9be7-46d5-8f2c-7c6834d95a25", + "isActive": false, + "balance": "$1,907.19", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "brown", + "name": { + "first": "Snider", + "last": "Johnson" + }, + "company": "IPLAX", + "email": "snider.johnson@iplax.ca", + "phone": "+1 (883) 539-3127", + "address": "424 Hancock Street, Springdale, Wyoming, 7054", + "about": "Incididunt proident amet consectetur cupidatat ex officia labore cupidatat laborum. Tempor proident officia nisi Lorem. Lorem commodo commodo ea voluptate excepteur consequat anim quis excepteur sunt officia.\r\n", + "registered": "Saturday, March 8, 2014 9:23 PM", + "latitude": 16.66716, + "longitude": 17.844641, + "tags": [ + "esse", + "est", + "eu", + "dolor", + "ea", + "voluptate", + "nostrud" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bauer Burt" + }, + { + "id": 1, + "name": "Lowe Boyd" + }, + { + "id": 2, + "name": "Moon Garcia" + } + ], + "greeting": "Hello, Snider! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82130f82b9ee9265e5f", + "index": 57, + "guid": "ba427ee5-5013-498f-a1a2-97a71b249a6e", + "isActive": true, + "balance": "$2,070.53", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Berry", + "last": "Carver" + }, + "company": "EVENTIX", + "email": "berry.carver@eventix.org", + "phone": "+1 (907) 508-2463", + "address": "415 Havens Place, Nile, Connecticut, 6089", + "about": "Quis dolor aute consequat sunt esse dolore Lorem pariatur reprehenderit incididunt aliqua. Officia sunt aute fugiat consectetur id exercitation aliquip velit do fugiat culpa. Et ad amet exercitation veniam ipsum duis qui sunt incididunt. Eiusmod commodo esse aliquip exercitation pariatur consequat nulla nulla quis eiusmod dolor. Ut consectetur qui culpa id veniam dolore pariatur quis est cillum voluptate esse. Sunt eiusmod adipisicing mollit est tempor ipsum dolore tempor. Velit consequat dolore cillum adipisicing id nulla veniam nisi velit in magna id anim.\r\n", + "registered": "Monday, January 13, 2014 10:45 PM", + "latitude": -60.884888, + "longitude": 139.360489, + "tags": [ + "eu", + "deserunt", + "minim", + "quis", + "eiusmod", + "sint", + "dolor" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tanisha Dudley" + }, + { + "id": 1, + "name": "Dale Mcgowan" + }, + { + "id": 2, + "name": "Torres Pennington" + } + ], + "greeting": "Hello, Berry! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821bc354233d50ef914", + "index": 58, + "guid": "6ab626f8-6b71-4f17-ba39-4cc97fdf4855", + "isActive": false, + "balance": "$2,822.25", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Kramer", + "last": "Berg" + }, + "company": "INTRADISK", + "email": "kramer.berg@intradisk.biz", + "phone": "+1 (901) 534-3326", + "address": "455 Bath Avenue, Hoagland, Guam, 2206", + "about": "Labore ullamco aliquip id incididunt cupidatat pariatur. In magna et aliquip consectetur dolor ullamco aliqua reprehenderit. Ad velit nisi ex culpa consequat. Culpa eiusmod incididunt pariatur esse tempor officia mollit.\r\n", + "registered": "Friday, June 13, 2014 8:45 AM", + "latitude": -43.442578, + "longitude": 69.627031, + "tags": [ + "exercitation", + "dolor", + "quis", + "laboris", + "exercitation", + "sunt", + "ipsum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Love Hutchinson" + }, + { + "id": 1, + "name": "Hayden Marquez" + }, + { + "id": 2, + "name": "Macdonald Hahn" + } + ], + "greeting": "Hello, Kramer! You have 5 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821cf5d51b48b4bd07a", + "index": 59, + "guid": "7b339d1e-d759-4fed-9e58-fd2608cdb0f2", + "isActive": false, + "balance": "$3,836.86", + "picture": "http://placehold.it/32x32", + "age": 20, + "eyeColor": "brown", + "name": { + "first": "Joann", + "last": "Elliott" + }, + "company": "ATOMICA", + "email": "joann.elliott@atomica.io", + "phone": "+1 (992) 429-2667", + "address": "788 Willow Place, Lindisfarne, Mississippi, 3656", + "about": "Ut consequat sunt ipsum minim velit. Lorem eiusmod dolor voluptate est deserunt cupidatat ut ipsum. Et et irure Lorem laborum sint mollit pariatur elit et enim eu eu sunt. Nisi do quis proident enim irure dolore ut Lorem fugiat quis voluptate non reprehenderit dolore.\r\n", + "registered": "Monday, May 12, 2014 6:04 PM", + "latitude": 14.309335, + "longitude": 32.596666, + "tags": [ + "nulla", + "magna", + "dolore", + "incididunt", + "fugiat", + "elit", + "veniam" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mason Hurst" + }, + { + "id": 1, + "name": "Castaneda Davidson" + }, + { + "id": 2, + "name": "Rasmussen Adkins" + } + ], + "greeting": "Hello, Joann! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821876778d3d011eb7b", + "index": 60, + "guid": "a016ab64-b6dd-42bc-8b5f-dbbab7a01d2a", + "isActive": true, + "balance": "$2,795.00", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Barbara", + "last": "Nolan" + }, + "company": "FURNAFIX", + "email": "barbara.nolan@furnafix.net", + "phone": "+1 (892) 600-2820", + "address": "540 Dennett Place, Mammoth, Rhode Island, 6151", + "about": "In velit officia quis Lorem. Ex quis cillum esse deserunt consectetur et nulla tempor. Lorem reprehenderit cillum excepteur ea veniam commodo et ad ullamco. Ex elit nisi non ipsum aliqua laborum sint aliqua. Reprehenderit consectetur dolore occaecat irure incididunt sunt.\r\n", + "registered": "Monday, April 21, 2014 11:59 AM", + "latitude": 71.103088, + "longitude": -78.48592, + "tags": [ + "eu", + "ullamco", + "cillum", + "est", + "commodo", + "nisi", + "tempor" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Cynthia Aguilar" + }, + { + "id": 1, + "name": "Deanna Graves" + }, + { + "id": 2, + "name": "Bertha Caldwell" + } + ], + "greeting": "Hello, Barbara! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821d6e2aaf18a0b184f", + "index": 61, + "guid": "89ca6bc0-19c2-4d13-a37c-90d02005a6bc", + "isActive": false, + "balance": "$3,134.62", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "blue", + "name": { + "first": "Penelope", + "last": "William" + }, + "company": "UTARA", + "email": "penelope.william@utara.com", + "phone": "+1 (968) 575-2395", + "address": "276 Ralph Avenue, Ezel, New Mexico, 2656", + "about": "Pariatur officia anim dolore commodo ipsum labore sint officia. Lorem culpa ea sunt non. Voluptate irure voluptate ut cupidatat nulla nostrud.\r\n", + "registered": "Monday, July 7, 2014 11:29 PM", + "latitude": -83.184502, + "longitude": -91.222471, + "tags": [ + "anim", + "incididunt", + "aliqua", + "id", + "reprehenderit", + "laboris", + "consequat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Maxwell Rocha" + }, + { + "id": 1, + "name": "Roach Bryan" + }, + { + "id": 2, + "name": "Woods Daugherty" + } + ], + "greeting": "Hello, Penelope! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8217abc85acd32fb42a", + "index": 62, + "guid": "cc58e868-7934-40a2-a350-13ad47e86a56", + "isActive": true, + "balance": "$3,734.05", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Kris", + "last": "Cotton" + }, + "company": "CORPORANA", + "email": "kris.cotton@corporana.biz", + "phone": "+1 (873) 412-2513", + "address": "650 Bushwick Court, Malott, Wisconsin, 9739", + "about": "Aliquip cupidatat exercitation exercitation consectetur. Sit id excepteur ea ut laborum irure ullamco laborum irure reprehenderit nisi aute eu. Ipsum do anim ea veniam do amet pariatur. Lorem consectetur labore deserunt anim deserunt aute.\r\n", + "registered": "Thursday, February 20, 2014 7:18 AM", + "latitude": 41.787092, + "longitude": -44.032192, + "tags": [ + "ex", + "incididunt", + "ut", + "cupidatat", + "commodo", + "commodo", + "occaecat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Angela Middleton" + }, + { + "id": 1, + "name": "Hicks Douglas" + }, + { + "id": 2, + "name": "Shaffer West" + } + ], + "greeting": "Hello, Kris! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821c20dda84da819450", + "index": 63, + "guid": "6ecab19a-0268-4d43-ad43-af2e4941b2e7", + "isActive": false, + "balance": "$2,748.00", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "William", + "last": "Haney" + }, + "company": "PROSURE", + "email": "william.haney@prosure.co.uk", + "phone": "+1 (890) 508-3193", + "address": "930 Hopkins Street, Bluetown, American Samoa, 9860", + "about": "Ad aute aliquip eiusmod tempor ullamco. Et ipsum consequat consequat magna do fugiat sint proident nostrud ad fugiat commodo dolor. Est anim do laboris id esse minim do voluptate occaecat nulla esse. Veniam sit dolore aliqua pariatur quis commodo enim nisi sint excepteur pariatur.\r\n", + "registered": "Friday, January 10, 2014 2:42 PM", + "latitude": 70.057151, + "longitude": -46.509685, + "tags": [ + "pariatur", + "est", + "adipisicing", + "aute", + "in", + "ex", + "eu" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Aida Lindsey" + }, + { + "id": 1, + "name": "Harper Roberson" + }, + { + "id": 2, + "name": "Flora Woods" + } + ], + "greeting": "Hello, William! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8219387ab6d8ebd8c1d", + "index": 64, + "guid": "732dfcb2-f8ab-44bf-a11b-d98f5b589993", + "isActive": true, + "balance": "$1,010.25", + "picture": "http://placehold.it/32x32", + "age": 26, + "eyeColor": "green", + "name": { + "first": "Barrera", + "last": "Sellers" + }, + "company": "KLUGGER", + "email": "barrera.sellers@klugger.tv", + "phone": "+1 (968) 510-3228", + "address": "954 Verona Place, Homeworth, Louisiana, 6862", + "about": "Lorem ex voluptate cupidatat minim officia voluptate enim proident qui mollit dolore ipsum. Non consectetur adipisicing quis consectetur. Non est Lorem ad qui nostrud aute aliqua labore exercitation ea aliquip irure dolor nisi. Excepteur anim ex exercitation velit adipisicing qui excepteur enim culpa consequat sint. Velit consectetur velit culpa eu sint irure culpa consequat anim incididunt ad amet excepteur. Non est anim id sint ipsum id officia dolor commodo dolore labore consectetur.\r\n", + "registered": "Sunday, September 14, 2014 1:44 AM", + "latitude": -88.561319, + "longitude": -44.881241, + "tags": [ + "aliquip", + "eiusmod", + "nisi", + "aliquip", + "minim", + "ullamco", + "commodo" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bond Goff" + }, + { + "id": 1, + "name": "Cathleen Hatfield" + }, + { + "id": 2, + "name": "Pansy Burke" + } + ], + "greeting": "Hello, Barrera! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821ab4d9564c853db57", + "index": 65, + "guid": "bba298e6-ebca-4334-afe9-91807ed1b672", + "isActive": true, + "balance": "$2,345.80", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Mullen", + "last": "Stephenson" + }, + "company": "POLARIUM", + "email": "mullen.stephenson@polarium.name", + "phone": "+1 (935) 461-2692", + "address": "228 Court Square, Beaulieu, Kansas, 9445", + "about": "Ex magna proident do dolore nostrud aliqua aute dolore enim mollit consectetur sunt pariatur. Ex id duis enim duis laborum do tempor proident exercitation duis. Aute dolor cillum anim incididunt voluptate. Qui proident consectetur sit laboris ex enim excepteur qui.\r\n", + "registered": "Thursday, May 1, 2014 7:57 PM", + "latitude": 34.294733, + "longitude": 138.270754, + "tags": [ + "minim", + "veniam", + "do", + "consequat", + "esse", + "sit", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bobbi Pate" + }, + { + "id": 1, + "name": "Moran Griffith" + }, + { + "id": 2, + "name": "Marian Hopkins" + } + ], + "greeting": "Hello, Mullen! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8218eb53a92791a3185", + "index": 66, + "guid": "d297bed2-0986-4d22-b120-0e04c253fb34", + "isActive": false, + "balance": "$2,586.70", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "brown", + "name": { + "first": "Leigh", + "last": "Kidd" + }, + "company": "SUNCLIPSE", + "email": "leigh.kidd@sunclipse.me", + "phone": "+1 (870) 427-3520", + "address": "156 Keen Court, Chamizal, Indiana, 4250", + "about": "Ut sunt elit irure eiusmod aliquip consectetur in. Dolore id exercitation irure consectetur. Pariatur occaecat cillum nulla cillum esse deserunt minim consectetur aliqua duis eiusmod. Ea proident aliquip cillum ullamco duis elit Lorem dolore aliqua. Do fugiat culpa reprehenderit ea eu non enim.\r\n", + "registered": "Wednesday, January 15, 2014 1:57 AM", + "latitude": 61.842523, + "longitude": -56.422447, + "tags": [ + "non", + "non", + "incididunt", + "elit", + "aliqua", + "proident", + "nostrud" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Jean Burns" + }, + { + "id": 1, + "name": "Patel Wilkinson" + }, + { + "id": 2, + "name": "Lillie Lane" + } + ], + "greeting": "Hello, Leigh! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8211c12bb8d837e265b", + "index": 67, + "guid": "6560e2f0-6bd2-4e19-bcd1-35297f162890", + "isActive": false, + "balance": "$2,040.59", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "green", + "name": { + "first": "Perry", + "last": "Leonard" + }, + "company": "CANDECOR", + "email": "perry.leonard@candecor.us", + "phone": "+1 (945) 556-2907", + "address": "359 Barbey Street, Grandview, Alaska, 9082", + "about": "Dolor fugiat consequat reprehenderit duis duis ex consectetur ea non ut. Irure et elit et mollit consequat dolor exercitation exercitation deserunt culpa mollit nulla. Est sunt deserunt incididunt exercitation eu aliqua qui elit labore id in eu ex.\r\n", + "registered": "Tuesday, August 26, 2014 11:34 PM", + "latitude": -67.974417, + "longitude": 97.189082, + "tags": [ + "sit", + "ex", + "ullamco", + "exercitation", + "adipisicing", + "non", + "laboris" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Alissa Ramsey" + }, + { + "id": 1, + "name": "Adela Bell" + }, + { + "id": 2, + "name": "Pearl Henderson" + } + ], + "greeting": "Hello, Perry! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8219a0b8e44380bd954", + "index": 68, + "guid": "4aa2bec3-3eaa-464f-9577-27f6c65e64b7", + "isActive": false, + "balance": "$3,911.59", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Barbra", + "last": "Mejia" + }, + "company": "ANIXANG", + "email": "barbra.mejia@anixang.ca", + "phone": "+1 (987) 517-2550", + "address": "635 Franklin Street, Allensworth, Florida, 1895", + "about": "Aliqua tempor excepteur velit do exercitation laborum commodo laboris aliqua nostrud. Aute aliquip nisi nulla labore id veniam ad voluptate non eiusmod minim mollit. Minim incididunt nostrud sint ex.\r\n", + "registered": "Thursday, January 16, 2014 11:04 PM", + "latitude": 44.320545, + "longitude": -61.392889, + "tags": [ + "cupidatat", + "excepteur", + "eu", + "consectetur", + "fugiat", + "aliquip", + "deserunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Potts Church" + }, + { + "id": 1, + "name": "Dianna Valentine" + }, + { + "id": 2, + "name": "Valeria Whitney" + } + ], + "greeting": "Hello, Barbra! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821b618d050a076972c", + "index": 69, + "guid": "ef47a627-c517-4f5a-931b-d67c8a18614b", + "isActive": false, + "balance": "$1,771.53", + "picture": "http://placehold.it/32x32", + "age": 32, + "eyeColor": "brown", + "name": { + "first": "Gonzales", + "last": "Walker" + }, + "company": "EVIDENDS", + "email": "gonzales.walker@evidends.org", + "phone": "+1 (984) 510-3347", + "address": "336 Pershing Loop, Croom, Georgia, 9128", + "about": "Quis reprehenderit consectetur ad aliqua ad amet incididunt aute irure Lorem veniam. Consequat consequat ut reprehenderit officia cupidatat irure aliqua nostrud veniam velit aliquip magna elit. Ullamco amet nostrud est cupidatat adipisicing fugiat magna anim eu occaecat incididunt. Ut ex ut quis veniam nulla ad ea magna elit incididunt Lorem anim ipsum elit. Aliqua laborum officia magna aliqua. Est quis adipisicing cillum cupidatat sunt velit fugiat mollit exercitation cupidatat sit.\r\n", + "registered": "Saturday, June 21, 2014 9:02 PM", + "latitude": -64.407501, + "longitude": -157.742045, + "tags": [ + "pariatur", + "minim", + "consectetur", + "consequat", + "ad", + "aliqua", + "ut" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tina Patterson" + }, + { + "id": 1, + "name": "Gabriela Nielsen" + }, + { + "id": 2, + "name": "Amalia Mueller" + } + ], + "greeting": "Hello, Gonzales! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8210426ced41d67a58b", + "index": 70, + "guid": "5d045ae8-c32c-43f6-b404-30a943205f5e", + "isActive": true, + "balance": "$2,054.02", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "green", + "name": { + "first": "Clarissa", + "last": "Madden" + }, + "company": "NIQUENT", + "email": "clarissa.madden@niquent.biz", + "phone": "+1 (910) 480-3769", + "address": "637 Scholes Street, Needmore, Alabama, 9344", + "about": "Sunt nulla ad aliquip incididunt ullamco culpa laboris. Consectetur non enim in officia incididunt deserunt. Quis est consequat ipsum ad. Pariatur nostrud voluptate magna occaecat minim irure sint nostrud voluptate ea labore ullamco quis. Mollit veniam consequat commodo sunt.\r\n", + "registered": "Wednesday, January 8, 2014 4:02 AM", + "latitude": 46.115788, + "longitude": 79.731859, + "tags": [ + "reprehenderit", + "nisi", + "id", + "consectetur", + "sunt", + "nostrud", + "laboris" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Mandy Buckner" + }, + { + "id": 1, + "name": "Hickman Brown" + }, + { + "id": 2, + "name": "Kemp Mclaughlin" + } + ], + "greeting": "Hello, Clarissa! You have 8 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8211049b550404f3ce5", + "index": 71, + "guid": "2f9a6201-6728-4509-8d88-c0a614649311", + "isActive": true, + "balance": "$1,324.10", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Joyce", + "last": "Callahan" + }, + "company": "OPTYK", + "email": "joyce.callahan@optyk.io", + "phone": "+1 (893) 544-2327", + "address": "567 Crystal Street, Freetown, District Of Columbia, 8319", + "about": "Deserunt in nisi id consequat qui. Sunt velit proident id culpa incididunt velit aute dolore labore. Deserunt qui ea adipisicing cillum irure sit sunt excepteur quis et quis nulla dolore pariatur. Consequat ut et veniam dolor velit nulla veniam fugiat commodo velit fugiat ad veniam ad. Anim consequat labore deserunt eiusmod esse. Laborum labore eu et incididunt commodo dolore eiusmod occaecat. Nisi elit duis mollit cillum id enim.\r\n", + "registered": "Tuesday, February 4, 2014 2:38 AM", + "latitude": -76.437449, + "longitude": -169.66079, + "tags": [ + "ullamco", + "non", + "officia", + "eiusmod", + "duis", + "cupidatat", + "mollit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hendricks Logan" + }, + { + "id": 1, + "name": "Dolly Baird" + }, + { + "id": 2, + "name": "Wendi Wallace" + } + ], + "greeting": "Hello, Joyce! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa82176dab97e42086b90", + "index": 72, + "guid": "9680920d-9303-471b-849e-c30e38e06d45", + "isActive": false, + "balance": "$2,696.40", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "blue", + "name": { + "first": "Felecia", + "last": "Gonzalez" + }, + "company": "CORIANDER", + "email": "felecia.gonzalez@coriander.net", + "phone": "+1 (923) 575-3582", + "address": "315 Borinquen Pl, Rosburg, Pennsylvania, 9619", + "about": "Laboris officia exercitation duis aliqua in sint consectetur. Ad ut labore ipsum ipsum ut culpa labore irure aute. Et exercitation tempor do dolor culpa ad ipsum pariatur adipisicing fugiat. Incididunt amet incididunt minim quis cupidatat pariatur enim fugiat reprehenderit est ipsum labore.\r\n", + "registered": "Monday, January 20, 2014 11:59 PM", + "latitude": -36.201421, + "longitude": 162.994705, + "tags": [ + "cillum", + "reprehenderit", + "non", + "est", + "tempor", + "exercitation", + "fugiat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Gould Waters" + }, + { + "id": 1, + "name": "Ramona Coffey" + }, + { + "id": 2, + "name": "Taylor Byers" + } + ], + "greeting": "Hello, Felecia! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821df55849ccd4ca74c", + "index": 73, + "guid": "01b7c49a-6dac-4b65-906b-4483de07a5e8", + "isActive": true, + "balance": "$2,037.32", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "blue", + "name": { + "first": "Stanton", + "last": "Rutledge" + }, + "company": "EXOBLUE", + "email": "stanton.rutledge@exoblue.com", + "phone": "+1 (817) 408-2566", + "address": "741 Crooke Avenue, Newry, Nevada, 8555", + "about": "Commodo adipisicing ea ipsum non irure quis excepteur. Qui laborum qui sit cupidatat dolore consectetur tempor esse occaecat cillum qui. Dolore in amet ea ex proident do nulla.\r\n", + "registered": "Thursday, August 28, 2014 8:35 PM", + "latitude": 87.299409, + "longitude": 22.167535, + "tags": [ + "commodo", + "aliqua", + "irure", + "ea", + "dolor", + "aute", + "non" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Best Klein" + }, + { + "id": 1, + "name": "Dickerson Mcknight" + }, + { + "id": 2, + "name": "Gayle Washington" + } + ], + "greeting": "Hello, Stanton! You have 6 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821f3516c63c6fa05e8", + "index": 74, + "guid": "c299ecf4-0528-4280-b6b4-909801b1e9dd", + "isActive": true, + "balance": "$1,695.83", + "picture": "http://placehold.it/32x32", + "age": 27, + "eyeColor": "green", + "name": { + "first": "Thelma", + "last": "Barnett" + }, + "company": "DUOFLEX", + "email": "thelma.barnett@duoflex.biz", + "phone": "+1 (947) 416-2234", + "address": "325 Tampa Court, Zortman, Illinois, 3609", + "about": "Exercitation esse culpa enim anim. Cillum voluptate quis tempor excepteur elit aliquip consequat officia cupidatat laborum ad cupidatat. Mollit exercitation esse fugiat do do id irure tempor et duis. Ut sit reprehenderit velit sit eiusmod in officia nisi commodo magna id in. Lorem sint velit adipisicing aute.\r\n", + "registered": "Tuesday, April 22, 2014 5:11 AM", + "latitude": 20.61329, + "longitude": 48.592063, + "tags": [ + "et", + "excepteur", + "nostrud", + "ullamco", + "quis", + "occaecat", + "in" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Amy Parks" + }, + { + "id": 1, + "name": "Greene Nunez" + }, + { + "id": 2, + "name": "Janna Roth" + } + ], + "greeting": "Hello, Thelma! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821637950981c0fa4c8", + "index": 75, + "guid": "bc58930c-58d8-4223-8a98-20295ac61c4e", + "isActive": true, + "balance": "$1,836.33", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "green", + "name": { + "first": "Nunez", + "last": "Freeman" + }, + "company": "ZILCH", + "email": "nunez.freeman@zilch.co.uk", + "phone": "+1 (959) 439-2497", + "address": "729 Varick Street, Marne, Tennessee, 1687", + "about": "Sunt nulla ipsum non in nulla. Dolore in fugiat in laborum veniam enim dolore cupidatat. Elit tempor ullamco id in minim excepteur et aute ut mollit aliquip qui consequat. Duis esse magna culpa aliqua ad ipsum deserunt laborum amet sint. Quis voluptate quis quis aute.\r\n", + "registered": "Monday, August 11, 2014 5:27 PM", + "latitude": 85.57657, + "longitude": -145.772132, + "tags": [ + "ullamco", + "tempor", + "et", + "non", + "magna", + "non", + "ex" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Latasha Randolph" + }, + { + "id": 1, + "name": "Neva Porter" + }, + { + "id": 2, + "name": "Drake Nicholson" + } + ], + "greeting": "Hello, Nunez! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821ed9d29afc180b718", + "index": 76, + "guid": "45fb73c9-30e9-4e61-b78d-41c8f79a282e", + "isActive": false, + "balance": "$1,298.91", + "picture": "http://placehold.it/32x32", + "age": 25, + "eyeColor": "green", + "name": { + "first": "Bentley", + "last": "Reyes" + }, + "company": "ZYPLE", + "email": "bentley.reyes@zyple.tv", + "phone": "+1 (919) 510-3585", + "address": "227 Oakland Place, Farmers, Iowa, 9827", + "about": "Cillum proident eiusmod id amet anim laboris elit sint ea et non. Aliqua et reprehenderit amet est ea fugiat aute. Minim aute aliquip nulla elit. Duis ad exercitation excepteur laborum anim occaecat nulla sunt. Quis pariatur nulla Lorem consectetur proident sunt amet est et elit eu sunt. Ut irure voluptate consequat amet sint deserunt quis. Incididunt ea culpa commodo fugiat qui veniam quis Lorem incididunt dolor.\r\n", + "registered": "Wednesday, April 9, 2014 1:19 AM", + "latitude": -82.587336, + "longitude": -74.931056, + "tags": [ + "dolore", + "nisi", + "exercitation", + "ullamco", + "excepteur", + "qui", + "ut" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Nadia Pearson" + }, + { + "id": 1, + "name": "Reba Frost" + }, + { + "id": 2, + "name": "Lilia Mcbride" + } + ], + "greeting": "Hello, Bentley! You have 7 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821a936a115315db0b9", + "index": 77, + "guid": "a1b82517-f6a6-437a-9f2d-2c0043591cfa", + "isActive": false, + "balance": "$1,016.00", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Morales", + "last": "Shields" + }, + "company": "CORECOM", + "email": "morales.shields@corecom.name", + "phone": "+1 (814) 407-2079", + "address": "472 Coleridge Street, Shasta, Massachusetts, 501", + "about": "Ipsum elit adipisicing nostrud quis ut nisi ullamco consectetur ex laborum reprehenderit anim magna fugiat. In nulla dolor esse exercitation elit exercitation. Laborum sit esse velit magna ea irure est ut velit id nisi sint qui. Laborum voluptate amet cupidatat laborum aute in id duis irure. Enim aliqua enim magna ut consequat. Tempor est commodo elit eu et ut occaecat culpa ex ex. Ullamco cillum sint ipsum tempor anim ullamco sit pariatur excepteur dolor mollit ad quis.\r\n", + "registered": "Saturday, July 5, 2014 5:10 AM", + "latitude": -20.583102, + "longitude": -23.34973, + "tags": [ + "nulla", + "proident", + "enim", + "dolor", + "elit", + "excepteur", + "dolore" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lacey Perry" + }, + { + "id": 1, + "name": "Stokes Foley" + }, + { + "id": 2, + "name": "Trisha Morales" + } + ], + "greeting": "Hello, Morales! You have 6 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8214ceb22176ac554f9", + "index": 78, + "guid": "1aa9258a-f508-4e73-adb9-5d2cce0dff79", + "isActive": true, + "balance": "$1,100.42", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "blue", + "name": { + "first": "Chandra", + "last": "Patel" + }, + "company": "PROVIDCO", + "email": "chandra.patel@providco.me", + "phone": "+1 (875) 430-3869", + "address": "543 Bainbridge Street, Kidder, Arkansas, 3624", + "about": "Magna ad tempor commodo esse labore mollit sit. Duis laborum excepteur dolor officia exercitation. Elit sunt ex voluptate anim consectetur in ullamco mollit qui non. Cupidatat ipsum et cupidatat cupidatat consequat nulla Lorem culpa minim dolore cupidatat ipsum sint.\r\n", + "registered": "Sunday, June 29, 2014 1:41 AM", + "latitude": 55.150075, + "longitude": -26.185265, + "tags": [ + "amet", + "consectetur", + "occaecat", + "cillum", + "enim", + "ut", + "occaecat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Geneva Snider" + }, + { + "id": 1, + "name": "Rose Michael" + }, + { + "id": 2, + "name": "Kirkland Mason" + } + ], + "greeting": "Hello, Chandra! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821050884ba49ad868d", + "index": 79, + "guid": "88a0d238-e3c9-4b1a-8d19-08c622f9eaae", + "isActive": false, + "balance": "$1,263.98", + "picture": "http://placehold.it/32x32", + "age": 30, + "eyeColor": "green", + "name": { + "first": "Elsa", + "last": "Chambers" + }, + "company": "VICON", + "email": "elsa.chambers@vicon.us", + "phone": "+1 (940) 436-3956", + "address": "190 Albemarle Terrace, Sheatown, Hawaii, 2654", + "about": "Est do esse elit consectetur elit. Aliqua esse duis est sint non. Enim minim laborum ad duis. Proident laboris quis ea amet nulla occaecat ex laboris duis ut velit.\r\n", + "registered": "Saturday, April 19, 2014 3:28 AM", + "latitude": 62.679122, + "longitude": 95.229313, + "tags": [ + "magna", + "ipsum", + "Lorem", + "ut", + "duis", + "elit", + "sit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Laurie Fuentes" + }, + { + "id": 1, + "name": "Juana Blevins" + }, + { + "id": 2, + "name": "Virginia Hester" + } + ], + "greeting": "Hello, Elsa! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821eb0dd28cfdc525a9", + "index": 80, + "guid": "e1574006-8d49-4399-8b59-92eaa0ed2be1", + "isActive": true, + "balance": "$2,395.40", + "picture": "http://placehold.it/32x32", + "age": 23, + "eyeColor": "brown", + "name": { + "first": "Morris", + "last": "Trujillo" + }, + "company": "ZOGAK", + "email": "morris.trujillo@zogak.ca", + "phone": "+1 (919) 553-3453", + "address": "127 Clove Road, Keyport, Puerto Rico, 5969", + "about": "Minim do veniam non elit excepteur enim sunt excepteur non amet. Duis eu consequat adipisicing nostrud ad cupidatat tempor occaecat. Ea id consectetur non anim. Irure dolor qui excepteur anim excepteur adipisicing ea sint id aliquip consequat eu id. Ea exercitation officia nostrud ipsum. Voluptate sunt irure aliqua tempor pariatur irure labore ea amet. Quis reprehenderit aliqua pariatur esse id anim laborum ullamco amet.\r\n", + "registered": "Sunday, February 2, 2014 12:52 PM", + "latitude": -86.963921, + "longitude": -157.636932, + "tags": [ + "occaecat", + "amet", + "ex", + "ea", + "exercitation", + "aliqua", + "elit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lester Watkins" + }, + { + "id": 1, + "name": "Kellie Clayton" + }, + { + "id": 2, + "name": "Valencia Edwards" + } + ], + "greeting": "Hello, Morris! You have 6 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821f5f0ae13a893ce36", + "index": 81, + "guid": "9dd75e1e-8f3c-473e-800e-518254719ca1", + "isActive": true, + "balance": "$1,956.52", + "picture": "http://placehold.it/32x32", + "age": 36, + "eyeColor": "brown", + "name": { + "first": "Gwen", + "last": "Park" + }, + "company": "INTERGEEK", + "email": "gwen.park@intergeek.org", + "phone": "+1 (830) 581-3561", + "address": "514 Beayer Place, Maxville, New Hampshire, 4162", + "about": "Consequat labore commodo nulla veniam aliqua. Tempor ipsum officia exercitation amet elit dolor labore eu voluptate cillum reprehenderit exercitation proident. Id cillum laborum cupidatat reprehenderit anim cillum exercitation culpa aliqua deserunt cupidatat. In proident ea eu nisi est enim. Quis dolor sit aliquip reprehenderit in id ipsum proident duis. Sit eu sint nisi velit. Minim elit nostrud aliquip anim.\r\n", + "registered": "Wednesday, September 17, 2014 9:38 AM", + "latitude": -66.199245, + "longitude": -52.824656, + "tags": [ + "ex", + "magna", + "incididunt", + "veniam", + "mollit", + "eu", + "sunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Nieves Potter" + }, + { + "id": 1, + "name": "Welch Reeves" + }, + { + "id": 2, + "name": "Hardy Forbes" + } + ], + "greeting": "Hello, Gwen! You have 8 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a20d620a7ec793ac", + "index": 82, + "guid": "1ae142ff-8d6a-4e72-a3f1-3ac4349ad0b7", + "isActive": false, + "balance": "$2,560.11", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Erickson", + "last": "Lancaster" + }, + "company": "SCENTRIC", + "email": "erickson.lancaster@scentric.biz", + "phone": "+1 (980) 465-3465", + "address": "412 Jackson Street, Chumuckla, South Carolina, 3216", + "about": "Id laborum consectetur pariatur non nulla incididunt labore magna minim duis. Ipsum laboris deserunt velit sunt voluptate. Laboris ipsum duis aliquip non aliqua. Commodo veniam mollit voluptate elit nisi nostrud laboris dolor tempor pariatur duis laborum.\r\n", + "registered": "Wednesday, May 7, 2014 8:50 PM", + "latitude": -57.590749, + "longitude": 117.31662, + "tags": [ + "ad", + "elit", + "non", + "fugiat", + "laborum", + "incididunt", + "enim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Reeves Nguyen" + }, + { + "id": 1, + "name": "Jo Christian" + }, + { + "id": 2, + "name": "Myers Lowe" + } + ], + "greeting": "Hello, Erickson! You have 10 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa8213747fd3443999762", + "index": 83, + "guid": "ec870637-4e65-49c4-abfb-7b99c2d2f094", + "isActive": true, + "balance": "$2,079.21", + "picture": "http://placehold.it/32x32", + "age": 21, + "eyeColor": "blue", + "name": { + "first": "Saundra", + "last": "Kemp" + }, + "company": "LUNCHPOD", + "email": "saundra.kemp@lunchpod.io", + "phone": "+1 (950) 433-2550", + "address": "593 Stuart Street, Edenburg, Texas, 6251", + "about": "Ipsum proident et duis reprehenderit in minim in sint dolore enim aute excepteur cillum eiusmod. Do nulla deserunt quis adipisicing sunt reprehenderit. Dolor pariatur tempor sint ullamco.\r\n", + "registered": "Tuesday, June 24, 2014 6:44 AM", + "latitude": 81.565149, + "longitude": -92.061448, + "tags": [ + "magna", + "commodo", + "esse", + "ad", + "labore", + "amet", + "mollit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Tommie Mays" + }, + { + "id": 1, + "name": "Lou Hubbard" + }, + { + "id": 2, + "name": "Jenna Gentry" + } + ], + "greeting": "Hello, Saundra! You have 9 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821524a5fd2a5f037a2", + "index": 84, + "guid": "e7894fe9-efd4-44ae-afd5-49bfde09b000", + "isActive": true, + "balance": "$2,320.08", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "green", + "name": { + "first": "Christensen", + "last": "Wolf" + }, + "company": "GLUKGLUK", + "email": "christensen.wolf@glukgluk.net", + "phone": "+1 (937) 519-3107", + "address": "460 Porter Avenue, Irwin, South Dakota, 2498", + "about": "Ullamco reprehenderit non aliquip amet do deserunt nostrud ea deserunt fugiat. Commodo pariatur est officia dolor dolore in excepteur pariatur laborum ut. Occaecat cillum ullamco nulla eu esse non nisi pariatur ipsum amet do ea ad culpa. Excepteur esse elit laborum deserunt ad consequat. Culpa esse esse nostrud commodo laborum officia sint ea mollit. Dolor culpa pariatur fugiat cillum quis proident non enim esse pariatur duis adipisicing amet. Consequat minim aliqua enim excepteur.\r\n", + "registered": "Saturday, February 8, 2014 10:20 PM", + "latitude": 22.478522, + "longitude": 30.070646, + "tags": [ + "quis", + "ad", + "adipisicing", + "exercitation", + "non", + "eu", + "anim" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Bright Moon" + }, + { + "id": 1, + "name": "Stephenson Sears" + }, + { + "id": 2, + "name": "Fletcher Swanson" + } + ], + "greeting": "Hello, Christensen! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa82160fa0826e90b747b", + "index": 85, + "guid": "af08cc83-e8c9-4bcd-84b3-2f3251cf9a02", + "isActive": true, + "balance": "$2,575.60", + "picture": "http://placehold.it/32x32", + "age": 29, + "eyeColor": "brown", + "name": { + "first": "Weaver", + "last": "Parker" + }, + "company": "RETROTEX", + "email": "weaver.parker@retrotex.com", + "phone": "+1 (951) 411-2545", + "address": "560 Madison Place, Sidman, Ohio, 5153", + "about": "Reprehenderit esse dolor tempor consectetur occaecat exercitation consectetur irure commodo. Et eiusmod aute ipsum eu commodo qui id anim proident. Excepteur labore laboris aliqua incididunt ut nisi consequat deserunt dolore officia velit sint consectetur. Laboris sint minim mollit duis. Excepteur nostrud incididunt aute consequat ad magna eu quis. Id dolor eu aliqua deserunt cillum sint ea et nostrud. Lorem amet cillum nulla tempor commodo mollit aliquip do est enim enim.\r\n", + "registered": "Thursday, February 6, 2014 12:55 PM", + "latitude": 87.778566, + "longitude": -122.687026, + "tags": [ + "eu", + "voluptate", + "commodo", + "magna", + "ullamco", + "nulla", + "ea" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Ratliff Mckinney" + }, + { + "id": 1, + "name": "Walker Frye" + }, + { + "id": 2, + "name": "Mcgowan Daniel" + } + ], + "greeting": "Hello, Weaver! You have 5 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8217a543d20bd4b10c1", + "index": 86, + "guid": "5b670ab7-4ee6-4b8e-b379-4e1849b6e329", + "isActive": false, + "balance": "$3,114.39", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "green", + "name": { + "first": "Annabelle", + "last": "Sanders" + }, + "company": "PHORMULA", + "email": "annabelle.sanders@phormula.biz", + "phone": "+1 (982) 489-2678", + "address": "970 Llama Court, Moraida, Maine, 8694", + "about": "Nostrud adipisicing magna nulla magna voluptate duis eu voluptate cupidatat ut dolore excepteur esse dolor. Aliquip exercitation occaecat amet excepteur sit. Velit adipisicing esse labore veniam duis ullamco in ea. Adipisicing eiusmod cillum veniam nostrud sint laboris sit id officia. Esse esse anim sint do ea id. Esse ipsum mollit sit laborum nostrud mollit nulla id.\r\n", + "registered": "Tuesday, January 7, 2014 7:34 AM", + "latitude": 9.515348, + "longitude": -99.138606, + "tags": [ + "ipsum", + "sint", + "dolor", + "laborum", + "est", + "consequat", + "magna" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Juliet Clements" + }, + { + "id": 1, + "name": "Jeannine Pruitt" + }, + { + "id": 2, + "name": "Chambers Warren" + } + ], + "greeting": "Hello, Annabelle! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa82179c1fe89e1ccec4d", + "index": 87, + "guid": "31ecaae4-6443-4c00-a20d-82100c49b488", + "isActive": true, + "balance": "$3,803.83", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "blue", + "name": { + "first": "Kelley", + "last": "Miles" + }, + "company": "AQUASURE", + "email": "kelley.miles@aquasure.co.uk", + "phone": "+1 (819) 529-2967", + "address": "680 Monument Walk, Wakulla, Vermont, 8903", + "about": "Ea sint dolor nostrud dolor id commodo esse nisi. Reprehenderit minim dolore nostrud sint incididunt excepteur reprehenderit enim velit velit. Proident officia velit Lorem dolore ullamco occaecat.\r\n", + "registered": "Saturday, May 3, 2014 12:23 AM", + "latitude": 73.767872, + "longitude": -118.631186, + "tags": [ + "consectetur", + "irure", + "nostrud", + "nostrud", + "aliquip", + "quis", + "reprehenderit" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Maynard Townsend" + }, + { + "id": 1, + "name": "Carlene Molina" + }, + { + "id": 2, + "name": "Mai Bentley" + } + ], + "greeting": "Hello, Kelley! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8218de20c1f1e1e93fa", + "index": 88, + "guid": "8f6b0aac-f2ba-45aa-a7c8-76c413bdeb7a", + "isActive": true, + "balance": "$1,898.86", + "picture": "http://placehold.it/32x32", + "age": 38, + "eyeColor": "brown", + "name": { + "first": "Mckay", + "last": "Velasquez" + }, + "company": "NORALEX", + "email": "mckay.velasquez@noralex.tv", + "phone": "+1 (973) 599-3463", + "address": "152 Roebling Street, Mathews, Delaware, 7958", + "about": "Nostrud esse dolor excepteur cillum aliqua. Ea nulla elit minim sint non culpa id. Et ullamco aute laborum incididunt sint quis. Tempor tempor aliqua in sunt. Minim elit dolor quis excepteur exercitation adipisicing. Pariatur incididunt tempor irure proident exercitation deserunt sint.\r\n", + "registered": "Monday, August 4, 2014 5:00 AM", + "latitude": 81.463276, + "longitude": -66.291508, + "tags": [ + "nisi", + "anim", + "qui", + "est", + "qui", + "ipsum", + "ullamco" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Myrna Rollins" + }, + { + "id": 1, + "name": "Tricia Gilliam" + }, + { + "id": 2, + "name": "Collins Obrien" + } + ], + "greeting": "Hello, Mckay! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821b3490524365bc954", + "index": 89, + "guid": "43825015-fed6-40c8-bd80-8aaf228067f9", + "isActive": false, + "balance": "$3,035.21", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "blue", + "name": { + "first": "Brandy", + "last": "Hayden" + }, + "company": "OMNIGOG", + "email": "brandy.hayden@omnigog.name", + "phone": "+1 (827) 481-2334", + "address": "537 Pioneer Street, Saticoy, Palau, 803", + "about": "Ea velit consectetur ipsum ut elit pariatur id labore sunt eu incididunt est aliqua. Veniam esse officia do non officia cupidatat proident id officia esse tempor non mollit dolore. Elit voluptate nulla exercitation laboris ex ad irure est do enim aute velit aute. Cillum adipisicing nisi dolor velit duis ad fugiat deserunt non commodo Lorem fugiat sint qui. Aute consectetur magna incididunt tempor in esse consectetur magna qui sit. Culpa consequat laborum duis adipisicing dolor in deserunt ut velit ea ex dolore ullamco esse.\r\n", + "registered": "Wednesday, February 12, 2014 7:32 PM", + "latitude": 4.319892, + "longitude": 81.048442, + "tags": [ + "labore", + "ullamco", + "commodo", + "quis", + "aute", + "nulla", + "do" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Riggs Flynn" + }, + { + "id": 1, + "name": "Kidd Guerrero" + }, + { + "id": 2, + "name": "Rosario Wade" + } + ], + "greeting": "Hello, Brandy! You have 10 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8211b737d086264c9a9", + "index": 90, + "guid": "ec5fe190-d16f-4728-bce1-c5140852c583", + "isActive": true, + "balance": "$2,932.98", + "picture": "http://placehold.it/32x32", + "age": 28, + "eyeColor": "green", + "name": { + "first": "Sonia", + "last": "Orr" + }, + "company": "MUSANPOLY", + "email": "sonia.orr@musanpoly.me", + "phone": "+1 (822) 422-2010", + "address": "908 Dekalb Avenue, Elfrida, Arizona, 6925", + "about": "Ut cillum ex irure amet aliquip voluptate Lorem fugiat reprehenderit sunt reprehenderit quis. Nulla laborum sunt elit ad labore ut cupidatat cillum eiusmod est. Ea irure amet excepteur mollit eu ipsum id adipisicing occaecat. Cupidatat sunt do veniam esse enim sint qui voluptate sint. Qui officia ad cupidatat mollit laboris. Duis tempor fugiat ea mollit cupidatat exercitation sunt incididunt.\r\n", + "registered": "Thursday, September 25, 2014 8:21 PM", + "latitude": 71.876999, + "longitude": 79.322401, + "tags": [ + "pariatur", + "sit", + "culpa", + "dolore", + "cupidatat", + "minim", + "cillum" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Morgan Pittman" + }, + { + "id": 1, + "name": "Bullock Cannon" + }, + { + "id": 2, + "name": "Lakeisha Lynch" + } + ], + "greeting": "Hello, Sonia! You have 9 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821b271c92236e5e9b0", + "index": 91, + "guid": "57cff12c-2a18-48c2-b15a-3c393de707b6", + "isActive": false, + "balance": "$1,874.97", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "green", + "name": { + "first": "Stephens", + "last": "Whitaker" + }, + "company": "EARTHPLEX", + "email": "stephens.whitaker@earthplex.us", + "phone": "+1 (960) 419-2183", + "address": "368 Division Avenue, Fivepointville, Colorado, 8609", + "about": "Do aliquip laboris irure consectetur esse reprehenderit. Cillum ex deserunt fugiat ut dolore excepteur culpa eiusmod sit ullamco velit consequat consequat aliquip. Nostrud officia est enim velit fugiat laboris.\r\n", + "registered": "Monday, May 26, 2014 8:04 PM", + "latitude": 7.153481, + "longitude": 100.823578, + "tags": [ + "laborum", + "irure", + "dolore", + "enim", + "nisi", + "cillum", + "deserunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Perkins Campos" + }, + { + "id": 1, + "name": "Murray Randall" + }, + { + "id": 2, + "name": "Wallace Blackwell" + } + ], + "greeting": "Hello, Stephens! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821c115ee94a728efa4", + "index": 92, + "guid": "7bbe7ace-73f9-4d25-8f20-b50f4b9d4e60", + "isActive": true, + "balance": "$1,168.18", + "picture": "http://placehold.it/32x32", + "age": 33, + "eyeColor": "green", + "name": { + "first": "Shepard", + "last": "Sanchez" + }, + "company": "YOGASM", + "email": "shepard.sanchez@yogasm.ca", + "phone": "+1 (959) 436-3299", + "address": "154 Seeley Street, Witmer, Missouri, 994", + "about": "Adipisicing ut id nulla occaecat enim officia reprehenderit non magna dolor Lorem. Culpa ad proident duis cupidatat nostrud occaecat esse elit pariatur quis Lorem. Velit exercitation anim dolore nisi labore consequat cupidatat magna nostrud sint ut deserunt enim.\r\n", + "registered": "Saturday, February 22, 2014 10:23 PM", + "latitude": -81.429217, + "longitude": -27.374426, + "tags": [ + "aliqua", + "minim", + "consequat", + "aliqua", + "qui", + "proident", + "consequat" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Sara Bruce" + }, + { + "id": 1, + "name": "Kimberley Mcdaniel" + }, + { + "id": 2, + "name": "Tammi England" + } + ], + "greeting": "Hello, Shepard! You have 7 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821a094fdd2592addbd", + "index": 93, + "guid": "69bc8858-bd55-4fa0-95f9-d2cb680a390c", + "isActive": false, + "balance": "$1,746.96", + "picture": "http://placehold.it/32x32", + "age": 35, + "eyeColor": "blue", + "name": { + "first": "Kristy", + "last": "Johnston" + }, + "company": "KONGLE", + "email": "kristy.johnston@kongle.org", + "phone": "+1 (823) 558-3033", + "address": "122 Cadman Plaza, Suitland, Minnesota, 3918", + "about": "Laboris excepteur ea nostrud incididunt est laborum dolor. Consequat ad duis aute proident incididunt commodo adipisicing. Enim voluptate sunt et est excepteur eiusmod commodo. Mollit fugiat reprehenderit ex ullamco magna laboris commodo mollit. Cupidatat tempor tempor minim dolore. Excepteur ipsum esse ipsum nulla.\r\n", + "registered": "Wednesday, January 29, 2014 11:15 PM", + "latitude": 15.335329, + "longitude": -78.001472, + "tags": [ + "veniam", + "consequat", + "ex", + "anim", + "culpa", + "ullamco", + "ex" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Johns Cochran" + }, + { + "id": 1, + "name": "Sheppard Hicks" + }, + { + "id": 2, + "name": "Cervantes Donovan" + } + ], + "greeting": "Hello, Kristy! You have 10 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821311b6242dc75f718", + "index": 94, + "guid": "9de76ab8-2ef7-4b46-a478-5dea8732650a", + "isActive": false, + "balance": "$2,549.05", + "picture": "http://placehold.it/32x32", + "age": 40, + "eyeColor": "blue", + "name": { + "first": "Melendez", + "last": "Golden" + }, + "company": "GRONK", + "email": "melendez.golden@gronk.biz", + "phone": "+1 (986) 553-2124", + "address": "647 Broadway , Reno, Northern Mariana Islands, 9487", + "about": "Aliquip pariatur deserunt culpa eu nostrud eu amet. Adipisicing elit occaecat aliqua ipsum ut. Cillum magna consectetur elit esse sint laboris duis. In ea enim aute eiusmod culpa. Labore quis sunt aliquip excepteur tempor irure amet consequat eu excepteur et occaecat.\r\n", + "registered": "Monday, April 14, 2014 9:41 AM", + "latitude": 55.489443, + "longitude": 3.620039, + "tags": [ + "pariatur", + "magna", + "quis", + "in", + "irure", + "amet", + "esse" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hilary Dennis" + }, + { + "id": 1, + "name": "Tyler Burgess" + }, + { + "id": 2, + "name": "Eugenia Donaldson" + } + ], + "greeting": "Hello, Melendez! You have 9 unread messages.", + "favoriteFruit": "strawberry" + }, + { + "_id": "543fa821c394a9871527c834", + "index": 95, + "guid": "ec0f31d4-7264-4649-9af9-2716082915d3", + "isActive": true, + "balance": "$2,810.24", + "picture": "http://placehold.it/32x32", + "age": 34, + "eyeColor": "brown", + "name": { + "first": "Eleanor", + "last": "Curtis" + }, + "company": "ZAPPIX", + "email": "eleanor.curtis@zappix.io", + "phone": "+1 (824) 461-3776", + "address": "981 Lyme Avenue, Oneida, Marshall Islands, 6015", + "about": "Commodo labore do pariatur exercitation voluptate ea velit velit qui ex duis. Commodo est excepteur ad labore aute enim eu. Adipisicing irure exercitation sint consectetur exercitation occaecat aute exercitation commodo.\r\n", + "registered": "Monday, May 5, 2014 7:51 AM", + "latitude": -5.604007, + "longitude": -125.532894, + "tags": [ + "aliqua", + "commodo", + "non", + "magna", + "quis", + "velit", + "irure" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Monica Carr" + }, + { + "id": 1, + "name": "Coleman Simpson" + }, + { + "id": 2, + "name": "Wilma Fisher" + } + ], + "greeting": "Hello, Eleanor! You have 7 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa821a5b5e35d24f265b3", + "index": 96, + "guid": "c865c0ff-8505-4d02-af0a-c5a275d03fd5", + "isActive": true, + "balance": "$3,192.24", + "picture": "http://placehold.it/32x32", + "age": 22, + "eyeColor": "blue", + "name": { + "first": "Terrell", + "last": "Greene" + }, + "company": "AUTOGRATE", + "email": "terrell.greene@autograte.net", + "phone": "+1 (933) 557-3825", + "address": "435 Bridgewater Street, Fresno, New Jersey, 8124", + "about": "Elit sit ut anim reprehenderit anim ipsum esse tempor aliqua id ullamco. Exercitation est velit aliquip est elit mollit magna velit. Elit eiusmod esse voluptate consectetur enim id exercitation adipisicing et laborum. Irure fugiat ut sint exercitation dolor nostrud qui mollit eiusmod cupidatat. Commodo ad dolore incididunt ex quis nostrud veniam pariatur aliquip ea reprehenderit reprehenderit.\r\n", + "registered": "Sunday, September 21, 2014 3:31 AM", + "latitude": 76.638623, + "longitude": 147.966829, + "tags": [ + "aliquip", + "qui", + "nisi", + "ut", + "non", + "proident", + "et" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Lucile Jordan" + }, + { + "id": 1, + "name": "Bender Sheppard" + }, + { + "id": 2, + "name": "Milagros Francis" + } + ], + "greeting": "Hello, Terrell! You have 8 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa8216cc444257e522ec0", + "index": 97, + "guid": "90c19881-6521-4012-aba7-f4d143953965", + "isActive": false, + "balance": "$1,509.96", + "picture": "http://placehold.it/32x32", + "age": 31, + "eyeColor": "green", + "name": { + "first": "Holman", + "last": "Hines" + }, + "company": "NORSUP", + "email": "holman.hines@norsup.com", + "phone": "+1 (925) 565-2825", + "address": "902 Monroe Street, Marienthal, Virgin Islands, 2354", + "about": "Eu cupidatat consectetur labore voluptate nulla non amet incididunt labore non magna tempor. Veniam et sint qui reprehenderit reprehenderit sint ut laboris elit. Pariatur in eu dolore culpa nisi laboris officia magna do velit. Cupidatat proident excepteur officia labore irure sunt elit velit dolor commodo. Minim quis sint minim non incididunt Lorem elit cupidatat adipisicing quis esse non sint et. Laborum culpa incididunt incididunt fugiat minim ex deserunt et.\r\n", + "registered": "Sunday, April 27, 2014 8:05 PM", + "latitude": 61.825518, + "longitude": -28.393161, + "tags": [ + "voluptate", + "veniam", + "Lorem", + "ex", + "in", + "est", + "exercitation" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Cecelia Chapman" + }, + { + "id": 1, + "name": "Isabella Castaneda" + }, + { + "id": 2, + "name": "Montoya Chen" + } + ], + "greeting": "Hello, Holman! You have 9 unread messages.", + "favoriteFruit": "apple" + }, + { + "_id": "543fa8210659aa5cdca41f7e", + "index": 98, + "guid": "81adf15d-24b6-454c-a75f-f5be12f6ac75", + "isActive": false, + "balance": "$2,564.95", + "picture": "http://placehold.it/32x32", + "age": 39, + "eyeColor": "green", + "name": { + "first": "Horton", + "last": "Poole" + }, + "company": "GROK", + "email": "horton.poole@grok.biz", + "phone": "+1 (908) 520-3683", + "address": "141 Gatling Place, Hackneyville, Oregon, 2976", + "about": "Consequat nisi reprehenderit incididunt id minim cillum. Lorem reprehenderit fugiat irure dolor excepteur velit. Est nostrud culpa ut reprehenderit in duis id voluptate pariatur voluptate. Exercitation Lorem esse exercitation Lorem esse. Excepteur mollit sit ut voluptate ipsum pariatur anim sint sunt cillum sit consequat.\r\n", + "registered": "Thursday, September 18, 2014 6:11 AM", + "latitude": -3.525694, + "longitude": -103.351985, + "tags": [ + "deserunt", + "voluptate", + "cillum", + "id", + "magna", + "deserunt", + "incididunt" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Hope Lara" + }, + { + "id": 1, + "name": "French Garner" + }, + { + "id": 2, + "name": "Stein Sykes" + } + ], + "greeting": "Hello, Horton! You have 5 unread messages.", + "favoriteFruit": "banana" + }, + { + "_id": "543fa821cf1b15f13d8c3938", + "index": 99, + "guid": "44533089-c11a-4656-b2d1-9ab6be887f30", + "isActive": false, + "balance": "$3,192.31", + "picture": "http://placehold.it/32x32", + "age": 37, + "eyeColor": "green", + "name": { + "first": "Wanda", + "last": "Wiggins" + }, + "company": "ZEPITOPE", + "email": "wanda.wiggins@zepitope.co.uk", + "phone": "+1 (998) 461-3780", + "address": "427 Canton Court, Heil, Utah, 8283", + "about": "Do officia et exercitation dolor esse. Ut nisi eiusmod dolore laborum ad ex mollit minim. Pariatur qui culpa ullamco ex eiusmod.\r\n", + "registered": "Tuesday, August 26, 2014 6:59 PM", + "latitude": 44.770809, + "longitude": 150.936963, + "tags": [ + "consectetur", + "mollit", + "laborum", + "ipsum", + "quis", + "cupidatat", + "in" + ], + "range": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "friends": [ + { + "id": 0, + "name": "Kristie Cain" + }, + { + "id": 1, + "name": "Geraldine Zimmerman" + }, + { + "id": 2, + "name": "Ingrid Harper" + } + ], + "greeting": "Hello, Wanda! You have 5 unread messages.", + "favoriteFruit": "strawberry" + } +] \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore new file mode 100644 index 0000000..042091d --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS new file mode 100644 index 0000000..fd1c6f6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS new file mode 100644 index 0000000..a29b133 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE new file mode 100644 index 0000000..7e69e1a --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/snappy/README b/vendor/github.com/klauspost/compress/snappy/README new file mode 100644 index 0000000..9138fe8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/klauspost/compress/snappy/cmd/snappytool/main.cpp b/vendor/github.com/klauspost/compress/snappy/cmd/snappytool/main.cpp new file mode 100644 index 0000000..fc31f51 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/cmd/snappytool/main.cpp @@ -0,0 +1,77 @@ +/* +To build the snappytool binary: +g++ main.cpp /usr/lib/libsnappy.a -o snappytool +or, if you have built the C++ snappy library from source: +g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool +after running "make" from your snappy checkout directory. +*/ + +#include +#include +#include +#include + +#include "snappy.h" + +#define N 1000000 + +char dst[N]; +char src[N]; + +int main(int argc, char** argv) { + // Parse args. + if (argc != 2) { + fprintf(stderr, "exactly one of -d or -e must be given\n"); + return 1; + } + bool decode = strcmp(argv[1], "-d") == 0; + bool encode = strcmp(argv[1], "-e") == 0; + if (decode == encode) { + fprintf(stderr, "exactly one of -d or -e must be given\n"); + return 1; + } + + // Read all of stdin into src[:s]. + size_t s = 0; + while (1) { + if (s == N) { + fprintf(stderr, "input too large\n"); + return 1; + } + ssize_t n = read(0, src+s, N-s); + if (n == 0) { + break; + } + if (n < 0) { + fprintf(stderr, "read error: %s\n", strerror(errno)); + // TODO: handle EAGAIN, EINTR? + return 1; + } + s += n; + } + + // Encode or decode src[:s] to dst[:d], and write to stdout. + size_t d = 0; + if (encode) { + if (N < snappy::MaxCompressedLength(s)) { + fprintf(stderr, "input too large after encoding\n"); + return 1; + } + snappy::RawCompress(src, s, dst, &d); + } else { + if (!snappy::GetUncompressedLength(src, s, &d)) { + fprintf(stderr, "could not get uncompressed length\n"); + return 1; + } + if (N < d) { + fprintf(stderr, "input too large after decoding\n"); + return 1; + } + if (!snappy::RawUncompress(src, s, dst)) { + fprintf(stderr, "input was not valid Snappy-compressed data\n"); + return 1; + } + } + write(1, dst, d); + return 0; +} diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 0000000..72efb03 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.go b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go new file mode 100644 index 0000000..fcd192b --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s new file mode 100644 index 0000000..e6179f6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/decode_other.go b/vendor/github.com/klauspost/compress/snappy/decode_other.go new file mode 100644 index 0000000..8c9f204 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 0000000..8749689 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer than can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.go b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go new file mode 100644 index 0000000..150d91b --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/klauspost/compress/snappy/encode_amd64.s b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s new file mode 100644 index 0000000..adfd979 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/snappy/encode_other.go b/vendor/github.com/klauspost/compress/snappy/encode_other.go new file mode 100644 index 0000000..dbcae90 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/snappy/golden_test.go b/vendor/github.com/klauspost/compress/snappy/golden_test.go new file mode 100644 index 0000000..e4496f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/golden_test.go @@ -0,0 +1,1965 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +// extendMatchGoldenTestCases is the i and j arguments, and the returned value, +// for every extendMatch call issued when encoding the +// testdata/Mark.Twain-Tom.Sawyer.txt file. It is used to benchmark the +// extendMatch implementation. +// +// It was generated manually by adding some print statements to the (pure Go) +// extendMatch implementation: +// +// func extendMatch(src []byte, i, j int) int { +// i0, j0 := i, j +// for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { +// } +// println("{", i0, ",", j0, ",", j, "},") +// return j +// } +// +// and running "go test -test.run=EncodeGoldenInput -tags=noasm". +var extendMatchGoldenTestCases = []struct { + i, j, want int +}{ + {11, 61, 62}, + {80, 81, 82}, + {86, 87, 101}, + {85, 133, 149}, + {152, 153, 162}, + {133, 168, 193}, + {168, 207, 225}, + {81, 255, 275}, + {278, 279, 283}, + {306, 417, 417}, + {373, 428, 430}, + {389, 444, 447}, + {474, 510, 512}, + {465, 533, 533}, + {47, 547, 547}, + {307, 551, 554}, + {420, 582, 587}, + {309, 604, 604}, + {604, 625, 625}, + {538, 629, 629}, + {328, 640, 640}, + {573, 645, 645}, + {319, 657, 657}, + {30, 664, 664}, + {45, 679, 680}, + {621, 684, 684}, + {376, 700, 700}, + {33, 707, 708}, + {601, 733, 733}, + {334, 744, 745}, + {625, 758, 759}, + {382, 763, 763}, + {550, 769, 771}, + {533, 789, 789}, + {804, 813, 813}, + {342, 841, 842}, + {742, 847, 847}, + {74, 852, 852}, + {810, 864, 864}, + {758, 868, 869}, + {714, 883, 883}, + {582, 889, 891}, + {61, 934, 935}, + {894, 942, 942}, + {939, 949, 949}, + {785, 956, 957}, + {886, 978, 978}, + {792, 998, 998}, + {998, 1005, 1005}, + {572, 1032, 1032}, + {698, 1051, 1053}, + {599, 1067, 1069}, + {1056, 1079, 1079}, + {942, 1089, 1090}, + {831, 1094, 1096}, + {1088, 1100, 1103}, + {732, 1113, 1114}, + {1037, 1118, 1118}, + {872, 1128, 1130}, + {1079, 1140, 1142}, + {332, 1162, 1162}, + {207, 1168, 1186}, + {1189, 1190, 1225}, + {105, 1229, 1230}, + {79, 1256, 1257}, + {1190, 1261, 1283}, + {255, 1306, 1306}, + {1319, 1339, 1358}, + {364, 1370, 1370}, + {955, 1378, 1380}, + {122, 1403, 1403}, + {1325, 1407, 1419}, + {664, 1423, 1424}, + {941, 1461, 1463}, + {867, 1477, 1478}, + {757, 1488, 1489}, + {1140, 1499, 1499}, + {31, 1506, 1506}, + {1487, 1510, 1512}, + {1089, 1520, 1521}, + {1467, 1525, 1529}, + {1394, 1537, 1537}, + {1499, 1541, 1541}, + {367, 1558, 1558}, + {1475, 1564, 1564}, + {1525, 1568, 1571}, + {1541, 1582, 1583}, + {864, 1587, 1588}, + {704, 1597, 1597}, + {336, 1602, 1602}, + {1383, 1613, 1613}, + {1498, 1617, 1618}, + {1051, 1623, 1625}, + {401, 1643, 1645}, + {1072, 1654, 1655}, + {1067, 1667, 1669}, + {699, 1673, 1674}, + {1587, 1683, 1684}, + {920, 1696, 1696}, + {1505, 1710, 1710}, + {1550, 1723, 1723}, + {996, 1727, 1727}, + {833, 1733, 1734}, + {1638, 1739, 1740}, + {1654, 1744, 1744}, + {753, 1761, 1761}, + {1548, 1773, 1773}, + {1568, 1777, 1780}, + {1683, 1793, 1794}, + {948, 1801, 1801}, + {1666, 1805, 1808}, + {1502, 1814, 1814}, + {1696, 1822, 1822}, + {502, 1836, 1837}, + {917, 1843, 1843}, + {1733, 1854, 1855}, + {970, 1859, 1859}, + {310, 1863, 1863}, + {657, 1872, 1872}, + {1005, 1876, 1876}, + {1662, 1880, 1880}, + {904, 1892, 1892}, + {1427, 1910, 1910}, + {1772, 1929, 1930}, + {1822, 1937, 1940}, + {1858, 1949, 1950}, + {1602, 1956, 1956}, + {1150, 1962, 1962}, + {1504, 1966, 1967}, + {51, 1971, 1971}, + {1605, 1979, 1979}, + {1458, 1983, 1988}, + {1536, 2001, 2006}, + {1373, 2014, 2018}, + {1494, 2025, 2025}, + {1667, 2029, 2031}, + {1592, 2035, 2035}, + {330, 2045, 2045}, + {1376, 2053, 2053}, + {1991, 2058, 2059}, + {1635, 2065, 2065}, + {1992, 2073, 2074}, + {2014, 2080, 2081}, + {1546, 2085, 2087}, + {59, 2099, 2099}, + {1996, 2106, 2106}, + {1836, 2110, 2110}, + {2068, 2114, 2114}, + {1338, 2122, 2122}, + {1562, 2128, 2130}, + {1934, 2134, 2134}, + {2114, 2141, 2142}, + {977, 2149, 2150}, + {956, 2154, 2155}, + {1407, 2162, 2162}, + {1773, 2166, 2166}, + {883, 2171, 2171}, + {623, 2175, 2178}, + {1520, 2191, 2192}, + {1162, 2200, 2200}, + {912, 2204, 2204}, + {733, 2208, 2208}, + {1777, 2212, 2215}, + {1532, 2219, 2219}, + {718, 2223, 2225}, + {2069, 2229, 2229}, + {2207, 2245, 2246}, + {1139, 2264, 2264}, + {677, 2274, 2274}, + {2099, 2279, 2279}, + {1863, 2283, 2283}, + {1966, 2305, 2306}, + {2279, 2313, 2313}, + {1628, 2319, 2319}, + {755, 2329, 2329}, + {1461, 2334, 2334}, + {2117, 2340, 2340}, + {2313, 2349, 2349}, + {1859, 2353, 2353}, + {1048, 2362, 2362}, + {895, 2366, 2366}, + {2278, 2373, 2373}, + {1884, 2377, 2377}, + {1402, 2387, 2392}, + {700, 2398, 2398}, + {1971, 2402, 2402}, + {2009, 2419, 2419}, + {1441, 2426, 2428}, + {2208, 2432, 2432}, + {2038, 2436, 2436}, + {932, 2443, 2443}, + {1759, 2447, 2448}, + {744, 2452, 2452}, + {1875, 2458, 2458}, + {2405, 2468, 2468}, + {1596, 2472, 2473}, + {1953, 2480, 2482}, + {736, 2487, 2487}, + {1913, 2493, 2493}, + {774, 2497, 2497}, + {1484, 2506, 2508}, + {2432, 2512, 2512}, + {752, 2519, 2519}, + {2497, 2523, 2523}, + {2409, 2528, 2529}, + {2122, 2533, 2533}, + {2396, 2537, 2538}, + {2410, 2547, 2548}, + {1093, 2555, 2560}, + {551, 2564, 2565}, + {2268, 2569, 2569}, + {1362, 2580, 2580}, + {1916, 2584, 2585}, + {994, 2589, 2590}, + {1979, 2596, 2596}, + {1041, 2602, 2602}, + {2104, 2614, 2616}, + {2609, 2621, 2628}, + {2329, 2638, 2638}, + {2211, 2657, 2658}, + {2638, 2662, 2667}, + {2578, 2676, 2679}, + {2153, 2685, 2686}, + {2608, 2696, 2697}, + {598, 2712, 2712}, + {2620, 2719, 2720}, + {1888, 2724, 2728}, + {2709, 2732, 2732}, + {1365, 2739, 2739}, + {784, 2747, 2748}, + {424, 2753, 2753}, + {2204, 2759, 2759}, + {812, 2768, 2769}, + {2455, 2773, 2773}, + {1722, 2781, 2781}, + {1917, 2792, 2792}, + {2705, 2799, 2799}, + {2685, 2806, 2807}, + {2742, 2811, 2811}, + {1370, 2818, 2818}, + {2641, 2830, 2830}, + {2512, 2837, 2837}, + {2457, 2841, 2841}, + {2756, 2845, 2845}, + {2719, 2855, 2855}, + {1423, 2859, 2859}, + {2849, 2863, 2865}, + {1474, 2871, 2871}, + {1161, 2875, 2876}, + {2282, 2880, 2881}, + {2746, 2888, 2888}, + {1783, 2893, 2893}, + {2401, 2899, 2900}, + {2632, 2920, 2923}, + {2422, 2928, 2930}, + {2715, 2939, 2939}, + {2162, 2943, 2943}, + {2859, 2947, 2947}, + {1910, 2951, 2951}, + {1431, 2955, 2956}, + {1439, 2964, 2964}, + {2501, 2968, 2969}, + {2029, 2973, 2976}, + {689, 2983, 2984}, + {1658, 2988, 2988}, + {1031, 2996, 2996}, + {2149, 3001, 3002}, + {25, 3009, 3013}, + {2964, 3023, 3023}, + {953, 3027, 3028}, + {2359, 3036, 3036}, + {3023, 3049, 3049}, + {2880, 3055, 3056}, + {2973, 3076, 3077}, + {2874, 3090, 3090}, + {2871, 3094, 3094}, + {2532, 3100, 3100}, + {2938, 3107, 3108}, + {350, 3115, 3115}, + {2196, 3119, 3121}, + {1133, 3127, 3129}, + {1797, 3134, 3150}, + {3032, 3158, 3158}, + {3016, 3172, 3172}, + {2533, 3179, 3179}, + {3055, 3187, 3188}, + {1384, 3192, 3193}, + {2799, 3199, 3199}, + {2126, 3203, 3207}, + {2334, 3215, 3215}, + {2105, 3220, 3221}, + {3199, 3229, 3229}, + {2891, 3233, 3233}, + {855, 3240, 3240}, + {1852, 3253, 3256}, + {2140, 3263, 3263}, + {1682, 3268, 3270}, + {3243, 3274, 3274}, + {924, 3279, 3279}, + {2212, 3283, 3283}, + {2596, 3287, 3287}, + {2999, 3291, 3291}, + {2353, 3295, 3295}, + {2480, 3302, 3304}, + {1959, 3308, 3311}, + {3000, 3318, 3318}, + {845, 3330, 3330}, + {2283, 3334, 3334}, + {2519, 3342, 3342}, + {3325, 3346, 3348}, + {2397, 3353, 3354}, + {2763, 3358, 3358}, + {3198, 3363, 3364}, + {3211, 3368, 3372}, + {2950, 3376, 3377}, + {3245, 3388, 3391}, + {2264, 3398, 3398}, + {795, 3403, 3403}, + {3287, 3407, 3407}, + {3358, 3411, 3411}, + {3317, 3415, 3415}, + {3232, 3431, 3431}, + {2128, 3435, 3437}, + {3236, 3441, 3441}, + {3398, 3445, 3446}, + {2814, 3450, 3450}, + {3394, 3466, 3466}, + {2425, 3470, 3470}, + {3330, 3476, 3476}, + {1612, 3480, 3480}, + {1004, 3485, 3486}, + {2732, 3490, 3490}, + {1117, 3494, 3495}, + {629, 3501, 3501}, + {3087, 3514, 3514}, + {684, 3518, 3518}, + {3489, 3522, 3524}, + {1760, 3529, 3529}, + {617, 3537, 3537}, + {3431, 3541, 3541}, + {997, 3547, 3547}, + {882, 3552, 3553}, + {2419, 3558, 3558}, + {610, 3562, 3563}, + {1903, 3567, 3569}, + {3005, 3575, 3575}, + {3076, 3585, 3586}, + {3541, 3590, 3590}, + {3490, 3594, 3594}, + {1899, 3599, 3599}, + {3545, 3606, 3606}, + {3290, 3614, 3615}, + {2056, 3619, 3620}, + {3556, 3625, 3625}, + {3294, 3632, 3633}, + {637, 3643, 3644}, + {3609, 3648, 3650}, + {3175, 3658, 3658}, + {3498, 3665, 3665}, + {1597, 3669, 3669}, + {1983, 3673, 3673}, + {3215, 3682, 3682}, + {3544, 3689, 3689}, + {3694, 3698, 3698}, + {3228, 3715, 3716}, + {2594, 3720, 3722}, + {3573, 3726, 3726}, + {2479, 3732, 3735}, + {3191, 3741, 3742}, + {1113, 3746, 3747}, + {2844, 3751, 3751}, + {3445, 3756, 3757}, + {3755, 3766, 3766}, + {3421, 3775, 3780}, + {3593, 3784, 3786}, + {3263, 3796, 3796}, + {3469, 3806, 3806}, + {2602, 3815, 3815}, + {723, 3819, 3821}, + {1608, 3826, 3826}, + {3334, 3830, 3830}, + {2198, 3835, 3835}, + {2635, 3840, 3840}, + {3702, 3852, 3853}, + {3406, 3858, 3859}, + {3681, 3867, 3870}, + {3407, 3880, 3880}, + {340, 3889, 3889}, + {3772, 3893, 3893}, + {593, 3897, 3897}, + {2563, 3914, 3916}, + {2981, 3929, 3929}, + {1835, 3933, 3934}, + {3906, 3951, 3951}, + {1459, 3958, 3958}, + {3889, 3974, 3974}, + {2188, 3982, 3982}, + {3220, 3986, 3987}, + {3585, 3991, 3993}, + {3712, 3997, 4001}, + {2805, 4007, 4007}, + {1879, 4012, 4013}, + {3618, 4018, 4018}, + {1145, 4031, 4032}, + {3901, 4037, 4037}, + {2772, 4046, 4047}, + {2802, 4053, 4054}, + {3299, 4058, 4058}, + {3725, 4066, 4066}, + {2271, 4070, 4070}, + {385, 4075, 4076}, + {3624, 4089, 4090}, + {3745, 4096, 4098}, + {1563, 4102, 4102}, + {4045, 4106, 4111}, + {3696, 4115, 4119}, + {3376, 4125, 4126}, + {1880, 4130, 4130}, + {2048, 4140, 4141}, + {2724, 4149, 4149}, + {1767, 4156, 4156}, + {2601, 4164, 4164}, + {2757, 4168, 4168}, + {3974, 4172, 4172}, + {3914, 4178, 4178}, + {516, 4185, 4185}, + {1032, 4189, 4190}, + {3462, 4197, 4198}, + {3805, 4202, 4203}, + {3910, 4207, 4212}, + {3075, 4221, 4221}, + {3756, 4225, 4226}, + {1872, 4236, 4237}, + {3844, 4241, 4241}, + {3991, 4245, 4249}, + {2203, 4258, 4258}, + {3903, 4267, 4268}, + {705, 4272, 4272}, + {1896, 4276, 4276}, + {1955, 4285, 4288}, + {3746, 4302, 4303}, + {2672, 4311, 4311}, + {3969, 4317, 4317}, + {3883, 4322, 4322}, + {1920, 4339, 4340}, + {3527, 4344, 4346}, + {1160, 4358, 4358}, + {3648, 4364, 4366}, + {2711, 4387, 4387}, + {3619, 4391, 4392}, + {1944, 4396, 4396}, + {4369, 4400, 4400}, + {2736, 4404, 4407}, + {2546, 4411, 4412}, + {4390, 4422, 4422}, + {3610, 4426, 4427}, + {4058, 4431, 4431}, + {4374, 4435, 4435}, + {3463, 4445, 4446}, + {1813, 4452, 4452}, + {3669, 4456, 4456}, + {3830, 4460, 4460}, + {421, 4464, 4465}, + {1719, 4471, 4471}, + {3880, 4475, 4475}, + {1834, 4485, 4487}, + {3590, 4491, 4491}, + {442, 4496, 4497}, + {4435, 4501, 4501}, + {3814, 4509, 4509}, + {987, 4513, 4513}, + {4494, 4518, 4521}, + {3218, 4526, 4529}, + {4221, 4537, 4537}, + {2778, 4543, 4545}, + {4422, 4552, 4552}, + {4031, 4558, 4559}, + {4178, 4563, 4563}, + {3726, 4567, 4574}, + {4027, 4578, 4578}, + {4339, 4585, 4587}, + {3796, 4592, 4595}, + {543, 4600, 4613}, + {2855, 4620, 4621}, + {2795, 4627, 4627}, + {3440, 4631, 4632}, + {4279, 4636, 4639}, + {4245, 4643, 4645}, + {4516, 4649, 4650}, + {3133, 4654, 4654}, + {4042, 4658, 4659}, + {3422, 4663, 4663}, + {4046, 4667, 4668}, + {4267, 4672, 4672}, + {4004, 4676, 4677}, + {2490, 4682, 4682}, + {2451, 4697, 4697}, + {3027, 4705, 4705}, + {4028, 4717, 4717}, + {4460, 4721, 4721}, + {2471, 4725, 4727}, + {3090, 4735, 4735}, + {3192, 4739, 4740}, + {3835, 4760, 4760}, + {4540, 4764, 4764}, + {4007, 4772, 4774}, + {619, 4784, 4784}, + {3561, 4789, 4791}, + {3367, 4805, 4805}, + {4490, 4810, 4811}, + {2402, 4815, 4815}, + {3352, 4819, 4822}, + {2773, 4828, 4828}, + {4552, 4832, 4832}, + {2522, 4840, 4841}, + {316, 4847, 4852}, + {4715, 4858, 4858}, + {2959, 4862, 4862}, + {4858, 4868, 4869}, + {2134, 4873, 4873}, + {578, 4878, 4878}, + {4189, 4889, 4890}, + {2229, 4894, 4894}, + {4501, 4898, 4898}, + {2297, 4903, 4903}, + {2933, 4909, 4909}, + {3008, 4913, 4913}, + {3153, 4917, 4917}, + {4819, 4921, 4921}, + {4921, 4932, 4933}, + {4920, 4944, 4945}, + {4814, 4954, 4955}, + {576, 4966, 4966}, + {1854, 4970, 4971}, + {1374, 4975, 4976}, + {3307, 4980, 4980}, + {974, 4984, 4988}, + {4721, 4992, 4992}, + {4898, 4996, 4996}, + {4475, 5006, 5006}, + {3819, 5012, 5012}, + {1948, 5019, 5021}, + {4954, 5027, 5029}, + {3740, 5038, 5040}, + {4763, 5044, 5045}, + {1936, 5051, 5051}, + {4844, 5055, 5060}, + {4215, 5069, 5072}, + {1146, 5076, 5076}, + {3845, 5082, 5082}, + {4865, 5090, 5090}, + {4624, 5094, 5094}, + {4815, 5098, 5098}, + {5006, 5105, 5105}, + {4980, 5109, 5109}, + {4795, 5113, 5115}, + {5043, 5119, 5121}, + {4782, 5129, 5129}, + {3826, 5139, 5139}, + {3876, 5156, 5156}, + {3111, 5167, 5171}, + {1470, 5177, 5177}, + {4431, 5181, 5181}, + {546, 5189, 5189}, + {4225, 5193, 5193}, + {1672, 5199, 5201}, + {4207, 5205, 5209}, + {4220, 5216, 5217}, + {4658, 5224, 5225}, + {3295, 5235, 5235}, + {2436, 5239, 5239}, + {2349, 5246, 5246}, + {2175, 5250, 5250}, + {5180, 5257, 5258}, + {3161, 5263, 5263}, + {5105, 5272, 5272}, + {3552, 5282, 5282}, + {4944, 5299, 5300}, + {4130, 5312, 5313}, + {902, 5323, 5323}, + {913, 5327, 5327}, + {2987, 5333, 5334}, + {5150, 5344, 5344}, + {5249, 5348, 5348}, + {1965, 5358, 5359}, + {5330, 5364, 5364}, + {2012, 5373, 5377}, + {712, 5384, 5386}, + {5235, 5390, 5390}, + {5044, 5398, 5399}, + {564, 5406, 5406}, + {39, 5410, 5410}, + {4642, 5422, 5425}, + {4421, 5437, 5438}, + {2347, 5449, 5449}, + {5333, 5453, 5454}, + {4136, 5458, 5459}, + {3793, 5468, 5468}, + {2243, 5480, 5480}, + {4889, 5492, 5493}, + {4295, 5504, 5504}, + {2785, 5511, 5511}, + {2377, 5518, 5518}, + {3662, 5525, 5525}, + {5097, 5529, 5530}, + {4781, 5537, 5538}, + {4697, 5547, 5548}, + {436, 5552, 5553}, + {5542, 5558, 5558}, + {3692, 5562, 5562}, + {2696, 5568, 5569}, + {4620, 5578, 5578}, + {2898, 5590, 5590}, + {5557, 5596, 5618}, + {2797, 5623, 5625}, + {2792, 5629, 5629}, + {5243, 5633, 5633}, + {5348, 5637, 5637}, + {5547, 5643, 5643}, + {4296, 5654, 5655}, + {5568, 5662, 5662}, + {3001, 5670, 5671}, + {3794, 5679, 5679}, + {4006, 5685, 5686}, + {4969, 5690, 5692}, + {687, 5704, 5704}, + {4563, 5708, 5708}, + {1723, 5738, 5738}, + {649, 5742, 5742}, + {5163, 5748, 5755}, + {3907, 5759, 5759}, + {3074, 5764, 5764}, + {5326, 5771, 5771}, + {2951, 5776, 5776}, + {5181, 5780, 5780}, + {2614, 5785, 5788}, + {4709, 5794, 5794}, + {2784, 5799, 5799}, + {5518, 5803, 5803}, + {4155, 5812, 5815}, + {921, 5819, 5819}, + {5224, 5823, 5824}, + {2853, 5830, 5836}, + {5776, 5840, 5840}, + {2955, 5844, 5845}, + {5745, 5853, 5853}, + {3291, 5857, 5857}, + {2988, 5861, 5861}, + {2647, 5865, 5865}, + {5398, 5869, 5870}, + {1085, 5874, 5875}, + {4906, 5881, 5881}, + {802, 5886, 5886}, + {5119, 5890, 5893}, + {5802, 5899, 5900}, + {3415, 5904, 5904}, + {5629, 5908, 5908}, + {3714, 5912, 5914}, + {5558, 5921, 5921}, + {2710, 5927, 5928}, + {1094, 5932, 5934}, + {2653, 5940, 5941}, + {4735, 5954, 5954}, + {5861, 5958, 5958}, + {1040, 5971, 5971}, + {5514, 5977, 5977}, + {5048, 5981, 5982}, + {5953, 5992, 5993}, + {3751, 5997, 5997}, + {4991, 6001, 6002}, + {5885, 6006, 6007}, + {5529, 6011, 6012}, + {4974, 6019, 6020}, + {5857, 6024, 6024}, + {3483, 6032, 6032}, + {3594, 6036, 6036}, + {1997, 6040, 6040}, + {5997, 6044, 6047}, + {5197, 6051, 6051}, + {1764, 6055, 6055}, + {6050, 6059, 6059}, + {5239, 6063, 6063}, + {5049, 6067, 6067}, + {5957, 6073, 6074}, + {1022, 6078, 6078}, + {3414, 6083, 6084}, + {3809, 6090, 6090}, + {4562, 6095, 6096}, + {5878, 6104, 6104}, + {594, 6108, 6109}, + {3353, 6115, 6116}, + {4992, 6120, 6121}, + {2424, 6125, 6125}, + {4484, 6130, 6130}, + {3900, 6134, 6135}, + {5793, 6139, 6141}, + {3562, 6145, 6145}, + {1438, 6152, 6153}, + {6058, 6157, 6158}, + {4411, 6162, 6163}, + {4590, 6167, 6171}, + {4748, 6175, 6175}, + {5517, 6183, 6184}, + {6095, 6191, 6192}, + {1471, 6203, 6203}, + {2643, 6209, 6210}, + {450, 6220, 6220}, + {5266, 6226, 6226}, + {2576, 6233, 6233}, + {2607, 6239, 6240}, + {5164, 6244, 6251}, + {6054, 6255, 6255}, + {1789, 6260, 6261}, + {5250, 6265, 6265}, + {6062, 6273, 6278}, + {5990, 6282, 6282}, + {3283, 6286, 6286}, + {5436, 6290, 6290}, + {6059, 6294, 6294}, + {5668, 6298, 6300}, + {3072, 6324, 6329}, + {3132, 6338, 6339}, + {3246, 6343, 6344}, + {28, 6348, 6349}, + {1503, 6353, 6355}, + {6067, 6359, 6359}, + {3384, 6364, 6364}, + {545, 6375, 6376}, + {5803, 6380, 6380}, + {5522, 6384, 6385}, + {5908, 6389, 6389}, + {2796, 6393, 6396}, + {4831, 6403, 6404}, + {6388, 6412, 6412}, + {6005, 6417, 6420}, + {4450, 6430, 6430}, + {4050, 6435, 6435}, + {5372, 6441, 6441}, + {4378, 6447, 6447}, + {6199, 6452, 6452}, + {3026, 6456, 6456}, + {2642, 6460, 6462}, + {6392, 6470, 6470}, + {6459, 6474, 6474}, + {2829, 6487, 6488}, + {2942, 6499, 6504}, + {5069, 6508, 6511}, + {5341, 6515, 6516}, + {5853, 6521, 6525}, + {6104, 6531, 6531}, + {5759, 6535, 6538}, + {4672, 6542, 6543}, + {2443, 6550, 6550}, + {5109, 6554, 6554}, + {6494, 6558, 6560}, + {6006, 6570, 6572}, + {6424, 6576, 6580}, + {4693, 6591, 6592}, + {6439, 6596, 6597}, + {3179, 6601, 6601}, + {5299, 6606, 6607}, + {4148, 6612, 6613}, + {3774, 6617, 6617}, + {3537, 6623, 6624}, + {4975, 6628, 6629}, + {3848, 6636, 6636}, + {856, 6640, 6640}, + {5724, 6645, 6645}, + {6632, 6651, 6651}, + {4630, 6656, 6658}, + {1440, 6662, 6662}, + {4281, 6666, 6667}, + {4302, 6671, 6672}, + {2589, 6676, 6677}, + {5647, 6681, 6687}, + {6082, 6691, 6693}, + {6144, 6698, 6698}, + {6103, 6709, 6710}, + {3710, 6714, 6714}, + {4253, 6718, 6721}, + {2467, 6730, 6730}, + {4778, 6734, 6734}, + {6528, 6738, 6738}, + {4358, 6747, 6747}, + {5889, 6753, 6753}, + {5193, 6757, 6757}, + {5797, 6761, 6761}, + {3858, 6765, 6766}, + {5951, 6776, 6776}, + {6487, 6781, 6782}, + {3282, 6786, 6787}, + {4667, 6797, 6799}, + {1927, 6803, 6806}, + {6583, 6810, 6810}, + {4937, 6814, 6814}, + {6099, 6824, 6824}, + {4415, 6835, 6836}, + {6332, 6840, 6841}, + {5160, 6850, 6850}, + {4764, 6854, 6854}, + {6814, 6858, 6859}, + {3018, 6864, 6864}, + {6293, 6868, 6869}, + {6359, 6877, 6877}, + {3047, 6884, 6886}, + {5262, 6890, 6891}, + {5471, 6900, 6900}, + {3268, 6910, 6912}, + {1047, 6916, 6916}, + {5904, 6923, 6923}, + {5798, 6933, 6938}, + {4149, 6942, 6942}, + {1821, 6946, 6946}, + {3599, 6952, 6952}, + {6470, 6957, 6957}, + {5562, 6961, 6961}, + {6268, 6965, 6967}, + {6389, 6971, 6971}, + {6596, 6975, 6976}, + {6553, 6980, 6981}, + {6576, 6985, 6989}, + {1375, 6993, 6993}, + {652, 6998, 6998}, + {4876, 7002, 7003}, + {5768, 7011, 7013}, + {3973, 7017, 7017}, + {6802, 7025, 7025}, + {6955, 7034, 7036}, + {6974, 7040, 7040}, + {5944, 7044, 7044}, + {6992, 7048, 7054}, + {6872, 7059, 7059}, + {2943, 7063, 7063}, + {6923, 7067, 7067}, + {5094, 7071, 7071}, + {4873, 7075, 7075}, + {5819, 7079, 7079}, + {5945, 7085, 7085}, + {1540, 7090, 7091}, + {2090, 7095, 7095}, + {5024, 7104, 7105}, + {6900, 7109, 7109}, + {6024, 7113, 7114}, + {6000, 7118, 7120}, + {2187, 7124, 7125}, + {6760, 7129, 7130}, + {5898, 7134, 7136}, + {7032, 7144, 7144}, + {4271, 7148, 7148}, + {3706, 7152, 7152}, + {6970, 7156, 7157}, + {7088, 7161, 7163}, + {2718, 7168, 7169}, + {5674, 7175, 7175}, + {4631, 7182, 7182}, + {7070, 7188, 7189}, + {6220, 7196, 7196}, + {3458, 7201, 7202}, + {2041, 7211, 7212}, + {1454, 7216, 7216}, + {5199, 7225, 7227}, + {3529, 7234, 7234}, + {6890, 7238, 7238}, + {3815, 7242, 7243}, + {5490, 7250, 7253}, + {6554, 7257, 7263}, + {5890, 7267, 7269}, + {6877, 7273, 7273}, + {4877, 7277, 7277}, + {2502, 7285, 7285}, + {1483, 7289, 7295}, + {7210, 7304, 7308}, + {6845, 7313, 7316}, + {7219, 7320, 7320}, + {7001, 7325, 7329}, + {6853, 7333, 7334}, + {6120, 7338, 7338}, + {6606, 7342, 7343}, + {7020, 7348, 7350}, + {3509, 7354, 7354}, + {7133, 7359, 7363}, + {3434, 7371, 7374}, + {2787, 7384, 7384}, + {7044, 7388, 7388}, + {6960, 7394, 7395}, + {6676, 7399, 7400}, + {7161, 7404, 7404}, + {7285, 7417, 7418}, + {4558, 7425, 7426}, + {4828, 7430, 7430}, + {6063, 7436, 7436}, + {3597, 7442, 7442}, + {914, 7446, 7446}, + {7320, 7452, 7454}, + {7267, 7458, 7460}, + {5076, 7464, 7464}, + {7430, 7468, 7469}, + {6273, 7473, 7474}, + {7440, 7478, 7487}, + {7348, 7491, 7494}, + {1021, 7510, 7510}, + {7473, 7515, 7515}, + {2823, 7519, 7519}, + {6264, 7527, 7527}, + {7302, 7531, 7531}, + {7089, 7535, 7535}, + {7342, 7540, 7541}, + {3688, 7547, 7551}, + {3054, 7558, 7560}, + {4177, 7566, 7567}, + {6691, 7574, 7575}, + {7156, 7585, 7586}, + {7147, 7590, 7592}, + {7407, 7598, 7598}, + {7403, 7602, 7603}, + {6868, 7607, 7607}, + {6636, 7611, 7611}, + {4805, 7617, 7617}, + {5779, 7623, 7623}, + {7063, 7627, 7627}, + {5079, 7632, 7632}, + {7377, 7637, 7637}, + {7337, 7641, 7642}, + {6738, 7655, 7655}, + {7338, 7659, 7659}, + {6541, 7669, 7671}, + {595, 7675, 7675}, + {7658, 7679, 7680}, + {7647, 7685, 7686}, + {2477, 7690, 7690}, + {5823, 7694, 7694}, + {4156, 7699, 7699}, + {5931, 7703, 7706}, + {6854, 7712, 7712}, + {4931, 7718, 7718}, + {6979, 7722, 7722}, + {5085, 7727, 7727}, + {6965, 7732, 7732}, + {7201, 7736, 7737}, + {3639, 7741, 7743}, + {7534, 7749, 7749}, + {4292, 7753, 7753}, + {3427, 7759, 7763}, + {7273, 7767, 7767}, + {940, 7778, 7778}, + {4838, 7782, 7785}, + {4216, 7790, 7792}, + {922, 7800, 7801}, + {7256, 7810, 7811}, + {7789, 7815, 7819}, + {7225, 7823, 7825}, + {7531, 7829, 7829}, + {6997, 7833, 7833}, + {7757, 7837, 7838}, + {4129, 7842, 7842}, + {7333, 7848, 7849}, + {6776, 7855, 7855}, + {7527, 7859, 7859}, + {4370, 7863, 7863}, + {4512, 7868, 7868}, + {5679, 7880, 7880}, + {3162, 7884, 7885}, + {3933, 7892, 7894}, + {7804, 7899, 7902}, + {6363, 7906, 7907}, + {7848, 7911, 7912}, + {5584, 7917, 7921}, + {874, 7926, 7926}, + {3342, 7930, 7930}, + {4507, 7935, 7937}, + {3672, 7943, 7944}, + {7911, 7948, 7949}, + {6402, 7956, 7956}, + {7940, 7960, 7960}, + {7113, 7964, 7964}, + {1073, 7968, 7968}, + {7740, 7974, 7974}, + {7601, 7978, 7982}, + {6797, 7987, 7988}, + {3528, 7994, 7995}, + {5483, 7999, 7999}, + {5717, 8011, 8011}, + {5480, 8017, 8017}, + {7770, 8023, 8030}, + {2452, 8034, 8034}, + {5282, 8047, 8047}, + {7967, 8051, 8051}, + {1128, 8058, 8066}, + {6348, 8070, 8070}, + {8055, 8077, 8077}, + {7925, 8081, 8086}, + {6810, 8090, 8090}, + {5051, 8101, 8101}, + {4696, 8109, 8110}, + {5129, 8119, 8119}, + {4449, 8123, 8123}, + {7222, 8127, 8127}, + {4649, 8131, 8134}, + {7994, 8138, 8138}, + {5954, 8148, 8148}, + {475, 8152, 8153}, + {7906, 8157, 8157}, + {7458, 8164, 8166}, + {7632, 8171, 8173}, + {3874, 8177, 8183}, + {4391, 8187, 8187}, + {561, 8191, 8191}, + {2417, 8195, 8195}, + {2357, 8204, 8204}, + {2269, 8216, 8218}, + {3968, 8222, 8222}, + {2200, 8226, 8227}, + {3453, 8247, 8247}, + {2439, 8251, 8252}, + {7175, 8257, 8257}, + {976, 8262, 8264}, + {4953, 8273, 8273}, + {4219, 8278, 8278}, + {6, 8285, 8291}, + {5703, 8295, 8296}, + {5272, 8300, 8300}, + {8037, 8304, 8304}, + {8186, 8314, 8314}, + {8304, 8318, 8318}, + {8051, 8326, 8326}, + {8318, 8330, 8330}, + {2671, 8334, 8335}, + {2662, 8339, 8339}, + {8081, 8349, 8350}, + {3328, 8356, 8356}, + {2879, 8360, 8362}, + {8050, 8370, 8371}, + {8330, 8375, 8376}, + {8375, 8386, 8386}, + {4961, 8390, 8390}, + {1017, 8403, 8405}, + {3533, 8416, 8416}, + {4555, 8422, 8422}, + {6445, 8426, 8426}, + {8169, 8432, 8432}, + {990, 8436, 8436}, + {4102, 8440, 8440}, + {7398, 8444, 8446}, + {3480, 8450, 8450}, + {6324, 8462, 8462}, + {7948, 8466, 8467}, + {5950, 8471, 8471}, + {5189, 8476, 8476}, + {4026, 8490, 8490}, + {8374, 8494, 8495}, + {4682, 8501, 8501}, + {7387, 8506, 8506}, + {8164, 8510, 8515}, + {4079, 8524, 8524}, + {8360, 8529, 8531}, + {7446, 8540, 8543}, + {7971, 8547, 8548}, + {4311, 8552, 8552}, + {5204, 8556, 8557}, + {7968, 8562, 8562}, + {7847, 8571, 8573}, + {8547, 8577, 8577}, + {5320, 8581, 8581}, + {8556, 8585, 8586}, + {8504, 8590, 8590}, + {7669, 8602, 8604}, + {5874, 8608, 8609}, + {5828, 8613, 8613}, + {7998, 8617, 8617}, + {8519, 8625, 8625}, + {7250, 8637, 8637}, + {426, 8641, 8641}, + {8436, 8645, 8645}, + {5986, 8649, 8656}, + {8157, 8660, 8660}, + {7182, 8665, 8665}, + {8421, 8675, 8675}, + {8509, 8681, 8681}, + {5137, 8688, 8689}, + {8625, 8694, 8695}, + {5228, 8701, 8702}, + {6661, 8714, 8714}, + {1010, 8719, 8719}, + {6648, 8723, 8723}, + {3500, 8728, 8728}, + {2442, 8735, 8735}, + {8494, 8740, 8741}, + {8171, 8753, 8755}, + {7242, 8763, 8764}, + {4739, 8768, 8769}, + {7079, 8773, 8773}, + {8386, 8777, 8777}, + {8624, 8781, 8787}, + {661, 8791, 8794}, + {8631, 8801, 8801}, + {7753, 8805, 8805}, + {4783, 8809, 8810}, + {1673, 8814, 8815}, + {6623, 8819, 8819}, + {4404, 8823, 8823}, + {8089, 8827, 8828}, + {8773, 8832, 8832}, + {5394, 8836, 8836}, + {6231, 8841, 8843}, + {1015, 8852, 8853}, + {6873, 8857, 8857}, + {6289, 8865, 8865}, + {8577, 8869, 8869}, + {8114, 8873, 8875}, + {8534, 8883, 8883}, + {3007, 8887, 8888}, + {8827, 8892, 8893}, + {4788, 8897, 8900}, + {5698, 8906, 8907}, + {7690, 8911, 8911}, + {6643, 8919, 8919}, + {7206, 8923, 8924}, + {7866, 8929, 8931}, + {8880, 8942, 8942}, + {8630, 8951, 8952}, + {6027, 8958, 8958}, + {7749, 8966, 8967}, + {4932, 8972, 8973}, + {8892, 8980, 8981}, + {634, 9003, 9003}, + {8109, 9007, 9008}, + {8777, 9012, 9012}, + {3981, 9016, 9017}, + {5723, 9025, 9025}, + {7662, 9034, 9038}, + {8955, 9042, 9042}, + {8070, 9060, 9062}, + {8910, 9066, 9066}, + {5363, 9070, 9071}, + {7699, 9075, 9076}, + {8991, 9081, 9081}, + {6850, 9085, 9085}, + {5811, 9092, 9094}, + {9079, 9098, 9102}, + {6456, 9106, 9106}, + {2259, 9111, 9111}, + {4752, 9116, 9116}, + {9060, 9120, 9123}, + {8090, 9127, 9127}, + {5305, 9131, 9132}, + {8623, 9137, 9137}, + {7417, 9141, 9141}, + {6564, 9148, 9149}, + {9126, 9157, 9158}, + {4285, 9169, 9170}, + {8698, 9174, 9174}, + {8869, 9178, 9178}, + {2572, 9182, 9183}, + {6482, 9188, 9190}, + {9181, 9201, 9201}, + {2968, 9208, 9209}, + {2506, 9213, 9215}, + {9127, 9219, 9219}, + {7910, 9225, 9227}, + {5422, 9235, 9239}, + {8813, 9244, 9246}, + {9178, 9250, 9250}, + {8748, 9255, 9255}, + {7354, 9265, 9265}, + {7767, 9269, 9269}, + {7710, 9281, 9283}, + {8826, 9288, 9290}, + {861, 9295, 9295}, + {4482, 9301, 9301}, + {9264, 9305, 9306}, + {8805, 9310, 9310}, + {4995, 9314, 9314}, + {6730, 9318, 9318}, + {7457, 9328, 9328}, + {2547, 9335, 9336}, + {6298, 9340, 9343}, + {9305, 9353, 9354}, + {9269, 9358, 9358}, + {6338, 9370, 9370}, + {7289, 9376, 9379}, + {5780, 9383, 9383}, + {7607, 9387, 9387}, + {2065, 9392, 9392}, + {7238, 9396, 9396}, + {8856, 9400, 9400}, + {8069, 9412, 9413}, + {611, 9420, 9420}, + {7071, 9424, 9424}, + {3089, 9430, 9431}, + {7117, 9435, 9438}, + {1976, 9445, 9445}, + {6640, 9449, 9449}, + {5488, 9453, 9453}, + {8739, 9457, 9459}, + {5958, 9466, 9466}, + {7985, 9470, 9470}, + {8735, 9475, 9475}, + {5009, 9479, 9479}, + {8073, 9483, 9484}, + {2328, 9490, 9491}, + {9250, 9495, 9495}, + {4043, 9502, 9502}, + {7712, 9506, 9506}, + {9012, 9510, 9510}, + {9028, 9514, 9515}, + {2190, 9521, 9524}, + {9029, 9528, 9528}, + {9519, 9532, 9532}, + {9495, 9536, 9536}, + {8527, 9540, 9540}, + {2137, 9550, 9550}, + {8419, 9557, 9557}, + {9383, 9561, 9562}, + {8970, 9575, 9578}, + {8911, 9582, 9582}, + {7828, 9595, 9596}, + {6180, 9600, 9600}, + {8738, 9604, 9607}, + {7540, 9611, 9612}, + {9599, 9616, 9618}, + {9187, 9623, 9623}, + {9294, 9628, 9629}, + {4536, 9639, 9639}, + {3867, 9643, 9643}, + {6305, 9648, 9648}, + {1617, 9654, 9657}, + {5762, 9666, 9666}, + {8314, 9670, 9670}, + {9666, 9674, 9675}, + {9506, 9679, 9679}, + {9669, 9685, 9686}, + {9683, 9690, 9690}, + {8763, 9697, 9698}, + {7468, 9702, 9702}, + {460, 9707, 9707}, + {3115, 9712, 9712}, + {9424, 9716, 9717}, + {7359, 9721, 9724}, + {7547, 9728, 9729}, + {7151, 9733, 9738}, + {7627, 9742, 9742}, + {2822, 9747, 9747}, + {8247, 9751, 9753}, + {9550, 9758, 9758}, + {7585, 9762, 9763}, + {1002, 9767, 9767}, + {7168, 9772, 9773}, + {6941, 9777, 9780}, + {9728, 9784, 9786}, + {9770, 9792, 9796}, + {6411, 9801, 9802}, + {3689, 9806, 9808}, + {9575, 9814, 9816}, + {7025, 9820, 9821}, + {2776, 9826, 9826}, + {9806, 9830, 9830}, + {9820, 9834, 9835}, + {9800, 9839, 9847}, + {9834, 9851, 9852}, + {9829, 9856, 9862}, + {1400, 9866, 9866}, + {3197, 9870, 9871}, + {9851, 9875, 9876}, + {9742, 9883, 9884}, + {3362, 9888, 9889}, + {9883, 9893, 9893}, + {5711, 9899, 9910}, + {7806, 9915, 9915}, + {9120, 9919, 9919}, + {9715, 9925, 9934}, + {2580, 9938, 9938}, + {4907, 9942, 9944}, + {6239, 9953, 9954}, + {6961, 9963, 9963}, + {5295, 9967, 9968}, + {1915, 9972, 9973}, + {3426, 9983, 9985}, + {9875, 9994, 9995}, + {6942, 9999, 9999}, + {6621, 10005, 10005}, + {7589, 10010, 10012}, + {9286, 10020, 10020}, + {838, 10024, 10024}, + {9980, 10028, 10031}, + {9994, 10035, 10041}, + {2702, 10048, 10051}, + {2621, 10059, 10059}, + {10054, 10065, 10065}, + {8612, 10073, 10074}, + {7033, 10078, 10078}, + {916, 10082, 10082}, + {10035, 10086, 10087}, + {8613, 10097, 10097}, + {9919, 10107, 10108}, + {6133, 10114, 10115}, + {10059, 10119, 10119}, + {10065, 10126, 10127}, + {7732, 10131, 10131}, + {7155, 10135, 10136}, + {6728, 10140, 10140}, + {6162, 10144, 10145}, + {4724, 10150, 10150}, + {1665, 10154, 10154}, + {10126, 10163, 10163}, + {9783, 10168, 10168}, + {1715, 10172, 10173}, + {7152, 10177, 10182}, + {8760, 10187, 10187}, + {7829, 10191, 10191}, + {9679, 10196, 10196}, + {9369, 10201, 10201}, + {2928, 10206, 10208}, + {6951, 10214, 10217}, + {5633, 10221, 10221}, + {7199, 10225, 10225}, + {10118, 10230, 10231}, + {9999, 10235, 10236}, + {10045, 10240, 10249}, + {5565, 10256, 10256}, + {9866, 10261, 10261}, + {10163, 10268, 10268}, + {9869, 10272, 10272}, + {9789, 10276, 10283}, + {10235, 10287, 10288}, + {10214, 10298, 10299}, + {6971, 10303, 10303}, + {3346, 10307, 10307}, + {10185, 10311, 10312}, + {9993, 10318, 10320}, + {2779, 10332, 10334}, + {1726, 10338, 10338}, + {741, 10354, 10360}, + {10230, 10372, 10373}, + {10260, 10384, 10385}, + {10131, 10389, 10398}, + {6946, 10406, 10409}, + {10158, 10413, 10420}, + {10123, 10424, 10424}, + {6157, 10428, 10429}, + {4518, 10434, 10434}, + {9893, 10438, 10438}, + {9865, 10442, 10446}, + {7558, 10454, 10454}, + {10434, 10460, 10460}, + {10064, 10466, 10468}, + {2703, 10472, 10474}, + {9751, 10478, 10479}, + {6714, 10485, 10485}, + {8020, 10490, 10490}, + {10303, 10494, 10494}, + {3521, 10499, 10500}, + {9281, 10513, 10515}, + {6028, 10519, 10523}, + {9387, 10527, 10527}, + {7614, 10531, 10531}, + {3611, 10536, 10536}, + {9162, 10540, 10540}, + {10081, 10546, 10547}, + {10034, 10560, 10562}, + {6726, 10567, 10571}, + {8237, 10575, 10575}, + {10438, 10579, 10583}, + {10140, 10587, 10587}, + {5784, 10592, 10592}, + {9819, 10597, 10600}, + {10567, 10604, 10608}, + {9335, 10613, 10613}, + {8300, 10617, 10617}, + {10575, 10621, 10621}, + {9678, 10625, 10626}, + {9962, 10632, 10633}, + {10535, 10637, 10638}, + {8199, 10642, 10642}, + {10372, 10647, 10648}, + {10637, 10656, 10657}, + {10579, 10667, 10668}, + {10465, 10677, 10680}, + {6702, 10684, 10685}, + {10073, 10691, 10692}, + {4505, 10696, 10697}, + {9042, 10701, 10701}, + {6460, 10705, 10706}, + {10010, 10714, 10716}, + {10656, 10720, 10722}, + {7282, 10727, 10729}, + {2327, 10733, 10733}, + {2491, 10740, 10741}, + {10704, 10748, 10750}, + {6465, 10754, 10754}, + {10647, 10758, 10759}, + {10424, 10763, 10763}, + {10748, 10776, 10776}, + {10546, 10780, 10781}, + {10758, 10785, 10786}, + {10287, 10790, 10797}, + {10785, 10801, 10807}, + {10240, 10811, 10826}, + {9509, 10830, 10830}, + {2579, 10836, 10838}, + {9801, 10843, 10845}, + {7555, 10849, 10850}, + {10776, 10860, 10865}, + {8023, 10869, 10869}, + {10046, 10876, 10884}, + {10253, 10888, 10892}, + {9941, 10897, 10897}, + {7898, 10901, 10905}, + {6725, 10909, 10913}, + {10757, 10921, 10923}, + {10160, 10931, 10931}, + {10916, 10935, 10942}, + {10261, 10946, 10946}, + {10318, 10952, 10954}, + {5911, 10959, 10961}, + {10801, 10965, 10966}, + {10946, 10970, 10977}, + {10592, 10982, 10984}, + {9913, 10988, 10990}, + {8510, 10994, 10996}, + {9419, 11000, 11001}, + {6765, 11006, 11007}, + {10725, 11011, 11011}, + {5537, 11017, 11019}, + {9208, 11024, 11025}, + {5850, 11030, 11030}, + {9610, 11034, 11036}, + {8846, 11041, 11047}, + {9697, 11051, 11051}, + {1622, 11055, 11058}, + {2370, 11062, 11062}, + {8393, 11067, 11067}, + {9756, 11071, 11071}, + {10172, 11076, 11076}, + {27, 11081, 11081}, + {7357, 11087, 11092}, + {8151, 11104, 11106}, + {6115, 11110, 11110}, + {10667, 11114, 11115}, + {11099, 11121, 11123}, + {10705, 11127, 11127}, + {8938, 11131, 11131}, + {11114, 11135, 11136}, + {1390, 11140, 11141}, + {10964, 11146, 11148}, + {11140, 11152, 11155}, + {9813, 11159, 11166}, + {624, 11171, 11172}, + {3118, 11177, 11179}, + {11029, 11184, 11186}, + {10186, 11190, 11190}, + {10306, 11196, 11196}, + {8665, 11201, 11201}, + {7382, 11205, 11205}, + {1100, 11210, 11210}, + {2337, 11216, 11217}, + {1609, 11221, 11223}, + {5763, 11228, 11229}, + {5220, 11233, 11233}, + {11061, 11241, 11241}, + {10617, 11246, 11246}, + {11190, 11250, 11251}, + {10144, 11255, 11256}, + {11232, 11260, 11260}, + {857, 11264, 11265}, + {10994, 11269, 11271}, + {3879, 11280, 11281}, + {11184, 11287, 11289}, + {9611, 11293, 11295}, + {11250, 11299, 11299}, + {4495, 11304, 11304}, + {7574, 11308, 11309}, + {9814, 11315, 11317}, + {1713, 11321, 11324}, + {1905, 11328, 11328}, + {8745, 11335, 11340}, + {8883, 11351, 11351}, + {8119, 11358, 11358}, + {1842, 11363, 11364}, + {11237, 11368, 11368}, + {8814, 11373, 11374}, + {5684, 11378, 11378}, + {11011, 11382, 11382}, + {6520, 11389, 11389}, + {11183, 11393, 11396}, + {1790, 11404, 11404}, + {9536, 11408, 11408}, + {11298, 11418, 11419}, + {3929, 11425, 11425}, + {5588, 11429, 11429}, + {8476, 11436, 11436}, + {4096, 11440, 11442}, + {11084, 11446, 11454}, + {10603, 11458, 11463}, + {7332, 11472, 11474}, + {7611, 11483, 11486}, + {4836, 11490, 11491}, + {10024, 11495, 11495}, + {4917, 11501, 11506}, + {6486, 11510, 11512}, + {11269, 11516, 11518}, + {3603, 11522, 11525}, + {11126, 11535, 11535}, + {11418, 11539, 11541}, + {11408, 11545, 11545}, + {9021, 11549, 11552}, + {6745, 11557, 11557}, + {5118, 11561, 11564}, + {7590, 11568, 11569}, + {4426, 11573, 11578}, + {9790, 11582, 11583}, + {6447, 11587, 11587}, + {10229, 11591, 11594}, + {10457, 11598, 11598}, + {10168, 11604, 11604}, + {10543, 11608, 11608}, + {7404, 11612, 11612}, + {11127, 11616, 11616}, + {3337, 11620, 11620}, + {11501, 11624, 11628}, + {4543, 11633, 11635}, + {8449, 11642, 11642}, + {4943, 11646, 11648}, + {10526, 11652, 11654}, + {11620, 11659, 11659}, + {8927, 11664, 11669}, + {532, 11673, 11673}, + {10513, 11677, 11679}, + {10428, 11683, 11683}, + {10999, 11689, 11690}, + {9469, 11695, 11695}, + {3606, 11699, 11699}, + {9560, 11708, 11709}, + {1564, 11714, 11714}, + {10527, 11718, 11718}, + {3071, 11723, 11726}, + {11590, 11731, 11732}, + {6605, 11737, 11737}, + {11624, 11741, 11745}, + {7822, 11749, 11752}, + {5269, 11757, 11758}, + {1339, 11767, 11767}, + {1363, 11771, 11773}, + {3704, 11777, 11777}, + {10952, 11781, 11783}, + {6764, 11793, 11795}, + {8675, 11800, 11800}, + {9963, 11804, 11804}, + {11573, 11808, 11809}, + {9548, 11813, 11813}, + {11591, 11817, 11818}, + {11446, 11822, 11822}, + {9224, 11828, 11828}, + {3158, 11836, 11836}, + {10830, 11840, 11840}, + {7234, 11846, 11846}, + {11299, 11850, 11850}, + {11544, 11854, 11855}, + {11498, 11859, 11859}, + {10993, 11865, 11868}, + {9720, 11872, 11878}, + {10489, 11882, 11890}, + {11712, 11898, 11904}, + {11516, 11908, 11910}, + {11568, 11914, 11915}, + {10177, 11919, 11924}, + {11363, 11928, 11929}, + {10494, 11933, 11933}, + {9870, 11937, 11938}, + {9427, 11942, 11942}, + {11481, 11949, 11949}, + {6030, 11955, 11957}, + {11718, 11961, 11961}, + {10531, 11965, 11983}, + {5126, 11987, 11987}, + {7515, 11991, 11991}, + {10646, 11996, 11997}, + {2947, 12001, 12001}, + {9582, 12009, 12010}, + {6202, 12017, 12018}, + {11714, 12022, 12022}, + {9235, 12033, 12037}, + {9721, 12041, 12044}, + {11932, 12051, 12052}, + {12040, 12056, 12056}, + {12051, 12060, 12060}, + {11601, 12066, 12066}, + {8426, 12070, 12070}, + {4053, 12077, 12077}, + {4262, 12081, 12081}, + {9761, 12086, 12088}, + {11582, 12092, 12093}, + {10965, 12097, 12098}, + {11803, 12103, 12104}, + {11933, 12108, 12109}, + {10688, 12117, 12117}, + {12107, 12125, 12126}, + {6774, 12130, 12132}, + {6286, 12137, 12137}, + {9543, 12141, 12141}, + {12097, 12145, 12146}, + {10790, 12150, 12150}, + {10125, 12154, 12156}, + {12125, 12164, 12164}, + {12064, 12168, 12172}, + {10811, 12178, 12188}, + {12092, 12192, 12193}, + {10058, 12197, 12198}, + {11611, 12211, 12212}, + {3459, 12216, 12216}, + {10291, 12225, 12228}, + {12191, 12232, 12234}, + {12145, 12238, 12238}, + {12001, 12242, 12250}, + {3840, 12255, 12255}, + {12216, 12259, 12259}, + {674, 12272, 12272}, + {12141, 12276, 12276}, + {10766, 12280, 12280}, + {11545, 12284, 12284}, + {6496, 12290, 12290}, + {11381, 12294, 12295}, + {603, 12302, 12303}, + {12276, 12308, 12308}, + {11850, 12313, 12314}, + {565, 12319, 12319}, + {9351, 12324, 12324}, + {11822, 12328, 12328}, + {2691, 12333, 12334}, + {11840, 12338, 12338}, + {11070, 12343, 12343}, + {9510, 12347, 12347}, + {11024, 12352, 12353}, + {7173, 12359, 12359}, + {517, 12363, 12363}, + {6311, 12367, 12368}, + {11367, 12372, 12373}, + {12008, 12377, 12377}, + {11372, 12382, 12384}, + {11358, 12391, 12392}, + {11382, 12396, 12396}, + {6882, 12400, 12401}, + {11246, 12405, 12405}, + {8359, 12409, 12412}, + {10154, 12418, 12418}, + {12016, 12425, 12426}, + {8972, 12434, 12435}, + {10478, 12439, 12440}, + {12395, 12449, 12449}, + {11612, 12454, 12454}, + {12347, 12458, 12458}, + {10700, 12466, 12467}, + {3637, 12471, 12476}, + {1042, 12480, 12481}, + {6747, 12488, 12488}, + {12396, 12492, 12493}, + {9420, 12497, 12497}, + {11285, 12501, 12510}, + {4470, 12515, 12515}, + {9374, 12519, 12519}, + {11293, 12528, 12528}, + {2058, 12534, 12535}, + {6521, 12539, 12539}, + {12492, 12543, 12543}, + {3043, 12547, 12547}, + {2982, 12551, 12553}, + {11030, 12557, 12563}, + {7636, 12568, 12568}, + {9639, 12572, 12572}, + {12543, 12576, 12576}, + {5989, 12580, 12583}, + {11051, 12587, 12587}, + {1061, 12592, 12594}, + {12313, 12599, 12601}, + {11846, 12605, 12605}, + {12576, 12609, 12609}, + {11040, 12618, 12625}, + {12479, 12629, 12629}, + {6903, 12633, 12633}, + {12322, 12639, 12639}, + {12253, 12643, 12645}, + {5594, 12651, 12651}, + {12522, 12655, 12655}, + {11703, 12659, 12659}, + {1377, 12665, 12665}, + {8022, 12669, 12669}, + {12280, 12674, 12674}, + {9023, 12680, 12681}, + {12328, 12685, 12685}, + {3085, 12689, 12693}, + {4700, 12698, 12698}, + {10224, 12702, 12702}, + {8781, 12706, 12706}, + {1651, 12710, 12710}, + {12458, 12714, 12714}, + {12005, 12718, 12721}, + {11908, 12725, 12726}, + {8202, 12733, 12733}, + {11708, 12739, 12740}, + {12599, 12744, 12745}, + {12284, 12749, 12749}, + {5285, 12756, 12756}, + {12055, 12775, 12777}, + {6919, 12782, 12782}, + {12242, 12786, 12786}, + {12009, 12790, 12790}, + {9628, 12794, 12796}, + {11354, 12801, 12802}, + {10225, 12806, 12807}, + {579, 12813, 12813}, + {8935, 12817, 12822}, + {8753, 12827, 12829}, + {11006, 12835, 12835}, + {858, 12841, 12845}, + {476, 12849, 12849}, + {7667, 12854, 12854}, + {12760, 12860, 12871}, + {11677, 12875, 12877}, + {12714, 12881, 12881}, + {12731, 12885, 12890}, + {7108, 12894, 12896}, + {1165, 12900, 12900}, + {4021, 12906, 12906}, + {10829, 12910, 12911}, + {12331, 12915, 12915}, + {8887, 12919, 12921}, + {11639, 12925, 12925}, + {7964, 12929, 12929}, + {12528, 12937, 12937}, + {8148, 12941, 12941}, + {12770, 12948, 12950}, + {12609, 12954, 12954}, + {12685, 12958, 12958}, + {2803, 12962, 12962}, + {9561, 12966, 12966}, + {6671, 12972, 12973}, + {12056, 12977, 12977}, + {6380, 12981, 12981}, + {12048, 12985, 12985}, + {11961, 12989, 12993}, + {3368, 12997, 12999}, + {6634, 13004, 13004}, + {6775, 13009, 13010}, + {12136, 13014, 13019}, + {10341, 13023, 13023}, + {13002, 13027, 13027}, + {10587, 13031, 13031}, + {10307, 13035, 13035}, + {12736, 13039, 13039}, + {12744, 13043, 13044}, + {6175, 13048, 13048}, + {9702, 13053, 13054}, + {662, 13059, 13061}, + {12718, 13065, 13068}, + {12893, 13072, 13075}, + {8299, 13086, 13091}, + {12604, 13095, 13096}, + {12848, 13100, 13101}, + {12749, 13105, 13105}, + {12526, 13109, 13114}, + {9173, 13122, 13122}, + {12769, 13128, 13128}, + {13038, 13132, 13132}, + {12725, 13136, 13137}, + {12639, 13146, 13146}, + {9711, 13150, 13151}, + {12137, 13155, 13155}, + {13039, 13159, 13159}, + {4681, 13163, 13164}, + {12954, 13168, 13168}, + {13158, 13175, 13176}, + {13105, 13180, 13180}, + {10754, 13184, 13184}, + {13167, 13188, 13188}, + {12658, 13192, 13192}, + {4294, 13199, 13200}, + {11682, 13204, 13205}, + {11695, 13209, 13209}, + {11076, 13214, 13214}, + {12232, 13218, 13218}, + {9399, 13223, 13224}, + {12880, 13228, 13229}, + {13048, 13234, 13234}, + {9701, 13238, 13239}, + {13209, 13243, 13243}, + {3658, 13248, 13248}, + {3698, 13252, 13254}, + {12237, 13260, 13260}, + {8872, 13266, 13266}, + {12957, 13272, 13273}, + {1393, 13281, 13281}, + {2013, 13285, 13288}, + {4244, 13296, 13299}, + {9428, 13303, 13303}, + {12702, 13307, 13307}, + {13078, 13311, 13311}, + {6071, 13315, 13315}, + {3061, 13319, 13319}, + {2051, 13324, 13324}, + {11560, 13328, 13331}, + {6584, 13336, 13336}, + {8482, 13340, 13340}, + {5331, 13344, 13344}, + {4171, 13348, 13348}, + {8501, 13352, 13352}, + {9219, 13356, 13356}, + {9473, 13360, 13363}, + {12881, 13367, 13367}, + {13065, 13371, 13375}, + {2979, 13379, 13384}, + {1518, 13388, 13388}, + {11177, 13392, 13392}, + {9457, 13398, 13398}, + {12293, 13407, 13410}, + {3697, 13414, 13417}, + {10338, 13425, 13425}, + {13367, 13429, 13429}, + {11074, 13433, 13437}, + {4201, 13441, 13443}, + {1812, 13447, 13448}, + {13360, 13452, 13456}, + {13188, 13463, 13463}, + {9732, 13470, 13470}, + {11332, 13477, 13477}, + {9918, 13487, 13487}, + {6337, 13497, 13497}, + {13429, 13501, 13501}, + {11413, 13505, 13505}, + {4685, 13512, 13513}, + {13136, 13517, 13519}, + {7416, 13528, 13530}, + {12929, 13534, 13534}, + {11110, 13539, 13539}, + {11521, 13543, 13543}, + {12825, 13553, 13553}, + {13447, 13557, 13558}, + {12299, 13562, 13563}, + {9003, 13570, 13570}, + {12500, 13577, 13577}, + {13501, 13581, 13581}, + {9392, 13586, 13586}, + {12454, 13590, 13590}, + {6189, 13595, 13595}, + {13053, 13599, 13599}, + {11881, 13604, 13604}, + {13159, 13608, 13608}, + {4894, 13612, 13612}, + {13221, 13621, 13621}, + {8950, 13625, 13625}, + {13533, 13629, 13629}, + {9633, 13633, 13633}, + {7892, 13637, 13639}, + {13581, 13643, 13643}, + {13616, 13647, 13649}, + {12794, 13653, 13654}, + {8919, 13659, 13659}, + {9674, 13663, 13663}, + {13577, 13668, 13668}, + {12966, 13672, 13672}, + {12659, 13676, 13683}, + {6124, 13688, 13688}, + {9225, 13693, 13695}, + {11833, 13702, 13702}, + {12904, 13709, 13717}, + {13647, 13721, 13722}, + {11687, 13726, 13727}, + {12434, 13731, 13732}, + {12689, 13736, 13742}, + {13168, 13746, 13746}, + {6151, 13751, 13752}, + {11821, 13756, 13757}, + {6467, 13764, 13764}, + {5730, 13769, 13769}, + {5136, 13780, 13780}, + {724, 13784, 13785}, + {13517, 13789, 13791}, + {640, 13795, 13796}, + {7721, 13800, 13802}, + {11121, 13806, 13807}, + {5791, 13811, 13815}, + {12894, 13819, 13819}, + {11100, 13824, 13824}, + {7011, 13830, 13830}, + {7129, 13834, 13837}, + {13833, 13841, 13841}, + {11276, 13847, 13847}, + {13621, 13853, 13853}, + {13589, 13862, 13863}, + {12989, 13867, 13867}, + {12789, 13871, 13871}, + {1239, 13875, 13875}, + {4675, 13879, 13881}, + {4686, 13885, 13885}, + {707, 13889, 13889}, + {5449, 13897, 13898}, + {13867, 13902, 13903}, + {10613, 13908, 13908}, + {13789, 13912, 13914}, + {4451, 13918, 13919}, + {9200, 13924, 13924}, + {2011, 13930, 13930}, + {11433, 13934, 13936}, + {4695, 13942, 13943}, + {9435, 13948, 13951}, + {13688, 13955, 13957}, + {11694, 13961, 13962}, + {5712, 13966, 13966}, + {5991, 13970, 13972}, + {13477, 13976, 13976}, + {10213, 13987, 13987}, + {11839, 13991, 13993}, + {12272, 13997, 13997}, + {6206, 14001, 14001}, + {13179, 14006, 14007}, + {2939, 14011, 14011}, + {12972, 14016, 14017}, + {13918, 14021, 14022}, + {7436, 14026, 14027}, + {7678, 14032, 14034}, + {13586, 14040, 14040}, + {13347, 14044, 14044}, + {13109, 14048, 14051}, + {9244, 14055, 14057}, + {13315, 14061, 14061}, + {13276, 14067, 14067}, + {11435, 14073, 14074}, + {13853, 14078, 14078}, + {13452, 14082, 14082}, + {14044, 14087, 14087}, + {4440, 14091, 14095}, + {4479, 14100, 14103}, + {9395, 14107, 14109}, + {6834, 14119, 14119}, + {10458, 14123, 14124}, + {1429, 14129, 14129}, + {8443, 14135, 14135}, + {10365, 14140, 14140}, + {5267, 14145, 14145}, + {11834, 14151, 14153}, +} diff --git a/vendor/github.com/klauspost/compress/snappy/runbench.cmd b/vendor/github.com/klauspost/compress/snappy/runbench.cmd new file mode 100644 index 0000000..d24eb4b --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/runbench.cmd @@ -0,0 +1,2 @@ +del old.txt +go test -bench=. >>old.txt && go test -bench=. >>old.txt && go test -bench=. >>old.txt && benchstat -delta-test=ttest old.txt new.txt diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go new file mode 100644 index 0000000..c7f445f --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at https://github.com/google/snappy +package snappy + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/snappy/snappy_test.go b/vendor/github.com/klauspost/compress/snappy/snappy_test.go new file mode 100644 index 0000000..2712710 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy_test.go @@ -0,0 +1,1353 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "bytes" + "encoding/binary" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" +) + +var ( + download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") + testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data") + benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data") +) + +// goEncoderShouldMatchCppEncoder is whether to test that the algorithm used by +// Go's encoder matches byte-for-byte what the C++ snappy encoder produces, on +// this GOARCH. There is more than one valid encoding of any given input, and +// there is more than one good algorithm along the frontier of trading off +// throughput for output size. Nonetheless, we presume that the C++ encoder's +// algorithm is a good one and has been tested on a wide range of inputs, so +// matching that exactly should mean that the Go encoder's algorithm is also +// good, without needing to gather our own corpus of test data. +// +// The exact algorithm used by the C++ code is potentially endian dependent, as +// it puns a byte pointer to a uint32 pointer to load, hash and compare 4 bytes +// at a time. The Go implementation is endian agnostic, in that its output is +// the same (as little-endian C++ code), regardless of the CPU's endianness. +// +// Thus, when comparing Go's output to C++ output generated beforehand, such as +// the "testdata/pi.txt.rawsnappy" file generated by C++ code on a little- +// endian system, we can run that test regardless of the runtime.GOARCH value. +// +// When comparing Go's output to dynamically generated C++ output, i.e. the +// result of fork/exec'ing a C++ program, we can run that test only on +// little-endian systems, because the C++ output might be different on +// big-endian systems. The runtime package doesn't export endianness per se, +// but we can restrict this match-C++ test to common little-endian systems. +const goEncoderShouldMatchCppEncoder = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm" + +func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) { + got := maxEncodedLenOfMaxBlockSize + want := MaxEncodedLen(maxBlockSize) + if got != want { + t.Fatalf("got %d, want %d", got, want) + } +} + +func cmp(a, b []byte) error { + if bytes.Equal(a, b) { + return nil + } + if len(a) != len(b) { + return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) + } + for i := range a { + if a[i] != b[i] { + return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) + } + } + return nil +} + +func roundtrip(b, ebuf, dbuf []byte) error { + d, err := Decode(dbuf, Encode(ebuf, b)) + if err != nil { + return fmt.Errorf("decoding error: %v", err) + } + if err := cmp(d, b); err != nil { + return fmt.Errorf("roundtrip mismatch: %v", err) + } + return nil +} + +func TestEmpty(t *testing.T) { + if err := roundtrip(nil, nil, nil); err != nil { + t.Fatal(err) + } +} + +func TestSmallCopy(t *testing.T) { + for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for i := 0; i < 32; i++ { + s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" + if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { + t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) + } + } + } + } +} + +func TestSmallRand(t *testing.T) { + rng := rand.New(rand.NewSource(1)) + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(rng.Intn(256)) + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestSmallRegular(t *testing.T) { + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(i%10 + 'a') + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestInvalidVarint(t *testing.T) { + testCases := []struct { + desc string + input string + }{{ + "invalid varint, final byte has continuation bit set", + "\xff", + }, { + "invalid varint, value overflows uint64", + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00", + }, { + // https://github.com/google/snappy/blob/master/format_description.txt + // says that "the stream starts with the uncompressed length [as a + // varint] (up to a maximum of 2^32 - 1)". + "valid varint (as uint64), but value overflows uint32", + "\x80\x80\x80\x80\x10", + }} + + for _, tc := range testCases { + input := []byte(tc.input) + if _, err := DecodedLen(input); err != ErrCorrupt { + t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err) + } + if _, err := Decode(nil, input); err != ErrCorrupt { + t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err) + } + } +} + +func TestDecode(t *testing.T) { + lit40Bytes := make([]byte, 40) + for i := range lit40Bytes { + lit40Bytes[i] = byte(i) + } + lit40 := string(lit40Bytes) + + testCases := []struct { + desc string + input string + want string + wantErr error + }{{ + `decodedLen=0; valid input`, + "\x00", + "", + nil, + }, { + `decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`, + "\x03" + "\x08\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`, + "\x02" + "\x08\xff\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`, + "\x03" + "\x08\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`, + "\x28" + "\x9c" + lit40, + lit40, + nil, + }, { + `decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`, + "\x01" + "\xf0", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`, + "\x03" + "\xf0\x02\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`, + "\x01" + "\xf4\x00", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`, + "\x03" + "\xf4\x02\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`, + "\x01" + "\xf8\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`, + "\x03" + "\xf8\x02\x00\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`, + "\x01" + "\xfc\x00\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`, + "\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`, + "\x04" + "\xfc\x02\x00\x00\x00\xff", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`, + "\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`, + "\x04" + "\x01", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`, + "\x04" + "\x02\x00", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`, + "\x04" + "\x03\x00\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`, + "\x04" + "\x0cabcd", + "abcd", + nil, + }, { + `decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`, + "\x0d" + "\x0cabcd" + "\x15\x04", + "abcdabcdabcda", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`, + "\x08" + "\x0cabcd" + "\x01\x04", + "abcdabcd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`, + "\x08" + "\x0cabcd" + "\x01\x02", + "abcdcdcd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`, + "\x08" + "\x0cabcd" + "\x01\x01", + "abcddddd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`, + "\x08" + "\x0cabcd" + "\x01\x00", + "", + ErrCorrupt, + }, { + `decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`, + "\x09" + "\x0cabcd" + "\x01\x04", + "", + ErrCorrupt, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`, + "\x08" + "\x0cabcd" + "\x01\x05", + "", + ErrCorrupt, + }, { + `decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`, + "\x07" + "\x0cabcd" + "\x01\x04", + "", + ErrCorrupt, + }, { + `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`, + "\x06" + "\x0cabcd" + "\x06\x03\x00", + "abcdbc", + nil, + }, { + `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`, + "\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00", + "abcdbc", + nil, + }} + + const ( + // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are + // not present in either the input or the output. It is written to dBuf + // to check that Decode does not write bytes past the end of + // dBuf[:dLen]. + // + // The magic number 37 was chosen because it is prime. A more 'natural' + // number like 32 might lead to a false negative if, for example, a + // byte was incorrectly copied 4*8 bytes later. + notPresentBase = 0xa0 + notPresentLen = 37 + ) + + var dBuf [100]byte +loop: + for i, tc := range testCases { + input := []byte(tc.input) + for _, x := range input { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input) + continue loop + } + } + + dLen, n := binary.Uvarint(input) + if n <= 0 { + t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc) + continue + } + if dLen > uint64(len(dBuf)) { + t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen) + continue + } + + for j := range dBuf { + dBuf[j] = byte(notPresentBase + j%notPresentLen) + } + g, gotErr := Decode(dBuf[:], input) + if got := string(g); got != tc.want || gotErr != tc.wantErr { + t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v", + i, tc.desc, got, gotErr, tc.want, tc.wantErr) + continue + } + for j, x := range dBuf { + if uint64(j) < dLen { + continue + } + if w := byte(notPresentBase + j%notPresentLen); x != w { + t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x", + i, tc.desc, j, x, w, dBuf) + continue loop + } + } + } +} + +func TestDecodeCopy4(t *testing.T) { + dots := strings.Repeat(".", 65536) + + input := strings.Join([]string{ + "\x89\x80\x04", // decodedLen = 65545. + "\x0cpqrs", // 4-byte literal "pqrs". + "\xf4\xff\xff" + dots, // 65536-byte literal dots. + "\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540. + }, "") + + gotBytes, err := Decode(nil, []byte(input)) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + want := "pqrs" + dots + "pqrs." + if len(got) != len(want) { + t.Fatalf("got %d bytes, want %d", len(got), len(want)) + } + if got != want { + for i := 0; i < len(got); i++ { + if g, w := got[i], want[i]; g != w { + t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w) + } + } + } +} + +// TestDecodeLengthOffset tests decoding an encoding of the form literal + +// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB". +func TestDecodeLengthOffset(t *testing.T) { + const ( + prefix = "abcdefghijklmnopqr" + suffix = "ABCDEFGHIJKLMNOPQR" + + // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are + // not present in either the input or the output. It is written to + // gotBuf to check that Decode does not write bytes past the end of + // gotBuf[:totalLen]. + // + // The magic number 37 was chosen because it is prime. A more 'natural' + // number like 32 might lead to a false negative if, for example, a + // byte was incorrectly copied 4*8 bytes later. + notPresentBase = 0xa0 + notPresentLen = 37 + ) + var gotBuf, wantBuf, inputBuf [128]byte + for length := 1; length <= 18; length++ { + for offset := 1; offset <= 18; offset++ { + loop: + for suffixLen := 0; suffixLen <= 18; suffixLen++ { + totalLen := len(prefix) + length + suffixLen + + inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen)) + inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1) + inputLen++ + inputLen += copy(inputBuf[inputLen:], prefix) + inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1) + inputBuf[inputLen+1] = byte(offset) + inputBuf[inputLen+2] = 0x00 + inputLen += 3 + if suffixLen > 0 { + inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1) + inputLen++ + inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen]) + } + input := inputBuf[:inputLen] + + for i := range gotBuf { + gotBuf[i] = byte(notPresentBase + i%notPresentLen) + } + got, err := Decode(gotBuf[:], input) + if err != nil { + t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err) + continue + } + + wantLen := 0 + wantLen += copy(wantBuf[wantLen:], prefix) + for i := 0; i < length; i++ { + wantBuf[wantLen] = wantBuf[wantLen-offset] + wantLen++ + } + wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen]) + want := wantBuf[:wantLen] + + for _, x := range input { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x", + length, offset, suffixLen, x, input) + continue loop + } + } + for i, x := range gotBuf { + if i < totalLen { + continue + } + if w := byte(notPresentBase + i%notPresentLen); x != w { + t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+ + "Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x", + length, offset, suffixLen, totalLen, i, x, w, gotBuf) + continue loop + } + } + for _, x := range want { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x", + length, offset, suffixLen, x, want) + continue loop + } + } + + if !bytes.Equal(got, want) { + t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x", + length, offset, suffixLen, input, got, want) + continue + } + } + } + } +} + +const ( + goldenText = "Mark.Twain-Tom.Sawyer.txt" + goldenCompressed = goldenText + ".rawsnappy" +) + +func TestDecodeGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + got, err := Decode(nil, src) + if err != nil { + t.Fatalf("Decode: %v", err) + } + want, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if err := cmp(got, want); err != nil { + t.Fatal(err) + } +} + +func TestEncodeGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + got := Encode(nil, src) + want, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if err := cmp(got, want); err != nil { + t.Fatal(err) + } +} + +func TestExtendMatchGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + for i, tc := range extendMatchGoldenTestCases { + got := extendMatch(src, tc.i, tc.j) + if got != tc.want { + t.Errorf("test #%d: i, j = %5d, %5d: got %5d (= j + %6d), want %5d (= j + %6d)", + i, tc.i, tc.j, got, got-tc.j, tc.want, tc.want-tc.j) + } + } +} + +func TestExtendMatch(t *testing.T) { + // ref is a simple, reference implementation of extendMatch. + ref := func(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j + } + + nums := []int{0, 1, 2, 7, 8, 9, 29, 30, 31, 32, 33, 34, 38, 39, 40} + for yIndex := 40; yIndex > 30; yIndex-- { + xxx := bytes.Repeat([]byte("x"), 40) + if yIndex < len(xxx) { + xxx[yIndex] = 'y' + } + for _, i := range nums { + for _, j := range nums { + if i >= j { + continue + } + got := extendMatch(xxx, i, j) + want := ref(xxx, i, j) + if got != want { + t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want) + } + } + } + } +} + +const snappytoolCmdName = "cmd/snappytool/snappytool" + +func skipTestSameEncodingAsCpp() (msg string) { + if !goEncoderShouldMatchCppEncoder { + return fmt.Sprintf("skipping testing that the encoding is byte-for-byte identical to C++: GOARCH=%s", runtime.GOARCH) + } + if _, err := os.Stat(snappytoolCmdName); err != nil { + return fmt.Sprintf("could not find snappytool: %v", err) + } + return "" +} + +func runTestSameEncodingAsCpp(src []byte) error { + got := Encode(nil, src) + + cmd := exec.Command(snappytoolCmdName, "-e") + cmd.Stdin = bytes.NewReader(src) + want, err := cmd.Output() + if err != nil { + return fmt.Errorf("could not run snappytool: %v", err) + } + return cmp(got, want) +} + +func TestSameEncodingAsCppShortCopies(t *testing.T) { + if msg := skipTestSameEncodingAsCpp(); msg != "" { + t.Skip(msg) + } + src := bytes.Repeat([]byte{'a'}, 20) + for i := 0; i <= len(src); i++ { + if err := runTestSameEncodingAsCpp(src[:i]); err != nil { + t.Errorf("i=%d: %v", i, err) + } + } +} + +func TestSameEncodingAsCppLongFiles(t *testing.T) { + if msg := skipTestSameEncodingAsCpp(); msg != "" { + t.Skip(msg) + } + bDir := filepath.FromSlash(*benchdataDir) + failed := false + for i, tf := range testFiles { + if err := downloadBenchmarkFiles(t, tf.filename); err != nil { + t.Fatalf("failed to download testdata: %s", err) + } + data := readFile(t, filepath.Join(bDir, tf.filename)) + if n := tf.sizeLimit; 0 < n && n < len(data) { + data = data[:n] + } + if err := runTestSameEncodingAsCpp(data); err != nil { + t.Errorf("i=%d: %v", i, err) + failed = true + } + } + if failed { + t.Errorf("was the snappytool program built against the C++ snappy library version " + + "d53de187 or later, commited on 2016-04-05? See " + + "https://github.com/google/snappy/commit/d53de18799418e113e44444252a39b12a0e4e0cc") + } +} + +// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm +// described in decode_amd64.s and its claim of a 10 byte overrun worst case. +func TestSlowForwardCopyOverrun(t *testing.T) { + const base = 100 + + for length := 1; length < 18; length++ { + for offset := 1; offset < 18; offset++ { + highWaterMark := base + d := base + l := length + o := offset + + // makeOffsetAtLeast8 + for o < 8 { + if end := d + 8; highWaterMark < end { + highWaterMark = end + } + l -= o + d += o + o += o + } + + // fixUpSlowForwardCopy + a := d + d += l + + // finishSlowForwardCopy + for l > 0 { + if end := a + 8; highWaterMark < end { + highWaterMark = end + } + a += 8 + l -= 8 + } + + dWant := base + length + overrun := highWaterMark - dWant + if d != dWant || overrun < 0 || 10 < overrun { + t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])", + length, offset, d, overrun, dWant) + } + } + } +} + +// TestEncodeNoiseThenRepeats encodes input for which the first half is very +// incompressible and the second half is very compressible. The encoded form's +// length should be closer to 50% of the original length than 100%. +func TestEncodeNoiseThenRepeats(t *testing.T) { + for _, origLen := range []int{256 * 1024, 2048 * 1024} { + src := make([]byte, origLen) + rng := rand.New(rand.NewSource(1)) + firstHalf, secondHalf := src[:origLen/2], src[origLen/2:] + for i := range firstHalf { + firstHalf[i] = uint8(rng.Intn(256)) + } + for i := range secondHalf { + secondHalf[i] = uint8(i >> 8) + } + dst := Encode(nil, src) + if got, want := len(dst), origLen*3/4; got >= want { + t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want) + } + } +} + +func TestFramingFormat(t *testing.T) { + // src is comprised of alternating 1e5-sized sequences of random + // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen + // because it is larger than maxBlockSize (64k). + src := make([]byte, 1e6) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < 10; i++ { + if i%2 == 0 { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(rng.Intn(256)) + } + } else { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(i) + } + } + } + + buf := new(bytes.Buffer) + if _, err := NewWriter(buf).Write(src); err != nil { + t.Fatalf("Write: encoding: %v", err) + } + dst, err := ioutil.ReadAll(NewReader(buf)) + if err != nil { + t.Fatalf("ReadAll: decoding: %v", err) + } + if err := cmp(dst, src); err != nil { + t.Fatal(err) + } +} + +func TestWriterGoldenOutput(t *testing.T) { + buf := new(bytes.Buffer) + w := NewBufferedWriter(buf) + defer w.Close() + w.Write([]byte("abcd")) // Not compressible. + w.Flush() + w.Write(bytes.Repeat([]byte{'A'}, 150)) // Compressible. + w.Flush() + // The next chunk is also compressible, but a naive, greedy encoding of the + // overall length 67 copy as a length 64 copy (the longest expressible as a + // tagCopy1 or tagCopy2) plus a length 3 remainder would be two 3-byte + // tagCopy2 tags (6 bytes), since the minimum length for a tagCopy1 is 4 + // bytes. Instead, we could do it shorter, in 5 bytes: a 3-byte tagCopy2 + // (of length 60) and a 2-byte tagCopy1 (of length 7). + w.Write(bytes.Repeat([]byte{'B'}, 68)) + w.Write([]byte("efC")) // Not compressible. + w.Write(bytes.Repeat([]byte{'C'}, 20)) // Compressible. + w.Write(bytes.Repeat([]byte{'B'}, 20)) // Compressible. + w.Write([]byte("g")) // Not compressible. + w.Flush() + + got := buf.String() + want := strings.Join([]string{ + magicChunk, + "\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum). + "\x68\x10\xe6\xb6", // Checksum. + "\x61\x62\x63\x64", // Uncompressed payload: "abcd". + "\x00\x11\x00\x00", // Compressed chunk, 17 bytes long (including 4 byte checksum). + "\x5f\xeb\xf2\x10", // Checksum. + "\x96\x01", // Compressed payload: Uncompressed length (varint encoded): 150. + "\x00\x41", // Compressed payload: tagLiteral, length=1, "A". + "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. + "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. + "\x52\x01\x00", // Compressed payload: tagCopy2, length=21, offset=1. + "\x00\x18\x00\x00", // Compressed chunk, 24 bytes long (including 4 byte checksum). + "\x30\x85\x69\xeb", // Checksum. + "\x70", // Compressed payload: Uncompressed length (varint encoded): 112. + "\x00\x42", // Compressed payload: tagLiteral, length=1, "B". + "\xee\x01\x00", // Compressed payload: tagCopy2, length=60, offset=1. + "\x0d\x01", // Compressed payload: tagCopy1, length=7, offset=1. + "\x08\x65\x66\x43", // Compressed payload: tagLiteral, length=3, "efC". + "\x4e\x01\x00", // Compressed payload: tagCopy2, length=20, offset=1. + "\x4e\x5a\x00", // Compressed payload: tagCopy2, length=20, offset=90. + "\x00\x67", // Compressed payload: tagLiteral, length=1, "g". + }, "") + if got != want { + t.Fatalf("\ngot: % x\nwant: % x", got, want) + } +} + +func TestEmitLiteral(t *testing.T) { + testCases := []struct { + length int + want string + }{ + {1, "\x00"}, + {2, "\x04"}, + {59, "\xe8"}, + {60, "\xec"}, + {61, "\xf0\x3c"}, + {62, "\xf0\x3d"}, + {254, "\xf0\xfd"}, + {255, "\xf0\xfe"}, + {256, "\xf0\xff"}, + {257, "\xf4\x00\x01"}, + {65534, "\xf4\xfd\xff"}, + {65535, "\xf4\xfe\xff"}, + {65536, "\xf4\xff\xff"}, + } + + dst := make([]byte, 70000) + nines := bytes.Repeat([]byte{0x99}, 65536) + for _, tc := range testCases { + lit := nines[:tc.length] + n := emitLiteral(dst, lit) + if !bytes.HasSuffix(dst[:n], lit) { + t.Errorf("length=%d: did not end with that many literal bytes", tc.length) + continue + } + got := string(dst[:n-tc.length]) + if got != tc.want { + t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want) + continue + } + } +} + +func TestEmitCopy(t *testing.T) { + testCases := []struct { + offset int + length int + want string + }{ + {8, 04, "\x01\x08"}, + {8, 11, "\x1d\x08"}, + {8, 12, "\x2e\x08\x00"}, + {8, 13, "\x32\x08\x00"}, + {8, 59, "\xea\x08\x00"}, + {8, 60, "\xee\x08\x00"}, + {8, 61, "\xf2\x08\x00"}, + {8, 62, "\xf6\x08\x00"}, + {8, 63, "\xfa\x08\x00"}, + {8, 64, "\xfe\x08\x00"}, + {8, 65, "\xee\x08\x00\x05\x08"}, + {8, 66, "\xee\x08\x00\x09\x08"}, + {8, 67, "\xee\x08\x00\x0d\x08"}, + {8, 68, "\xfe\x08\x00\x01\x08"}, + {8, 69, "\xfe\x08\x00\x05\x08"}, + {8, 80, "\xfe\x08\x00\x3e\x08\x00"}, + + {256, 04, "\x21\x00"}, + {256, 11, "\x3d\x00"}, + {256, 12, "\x2e\x00\x01"}, + {256, 13, "\x32\x00\x01"}, + {256, 59, "\xea\x00\x01"}, + {256, 60, "\xee\x00\x01"}, + {256, 61, "\xf2\x00\x01"}, + {256, 62, "\xf6\x00\x01"}, + {256, 63, "\xfa\x00\x01"}, + {256, 64, "\xfe\x00\x01"}, + {256, 65, "\xee\x00\x01\x25\x00"}, + {256, 66, "\xee\x00\x01\x29\x00"}, + {256, 67, "\xee\x00\x01\x2d\x00"}, + {256, 68, "\xfe\x00\x01\x21\x00"}, + {256, 69, "\xfe\x00\x01\x25\x00"}, + {256, 80, "\xfe\x00\x01\x3e\x00\x01"}, + + {2048, 04, "\x0e\x00\x08"}, + {2048, 11, "\x2a\x00\x08"}, + {2048, 12, "\x2e\x00\x08"}, + {2048, 13, "\x32\x00\x08"}, + {2048, 59, "\xea\x00\x08"}, + {2048, 60, "\xee\x00\x08"}, + {2048, 61, "\xf2\x00\x08"}, + {2048, 62, "\xf6\x00\x08"}, + {2048, 63, "\xfa\x00\x08"}, + {2048, 64, "\xfe\x00\x08"}, + {2048, 65, "\xee\x00\x08\x12\x00\x08"}, + {2048, 66, "\xee\x00\x08\x16\x00\x08"}, + {2048, 67, "\xee\x00\x08\x1a\x00\x08"}, + {2048, 68, "\xfe\x00\x08\x0e\x00\x08"}, + {2048, 69, "\xfe\x00\x08\x12\x00\x08"}, + {2048, 80, "\xfe\x00\x08\x3e\x00\x08"}, + } + + dst := make([]byte, 1024) + for _, tc := range testCases { + n := emitCopy(dst, tc.offset, tc.length) + got := string(dst[:n]) + if got != tc.want { + t.Errorf("offset=%d, length=%d:\ngot % x\nwant % x", tc.offset, tc.length, got, tc.want) + } + } +} + +func TestNewBufferedWriter(t *testing.T) { + // Test all 32 possible sub-sequences of these 5 input slices. + // + // Their lengths sum to 400,000, which is over 6 times the Writer ibuf + // capacity: 6 * maxBlockSize is 393,216. + inputs := [][]byte{ + bytes.Repeat([]byte{'a'}, 40000), + bytes.Repeat([]byte{'b'}, 150000), + bytes.Repeat([]byte{'c'}, 60000), + bytes.Repeat([]byte{'d'}, 120000), + bytes.Repeat([]byte{'e'}, 30000), + } +loop: + for i := 0; i < 1< 0; { + i := copy(x, src) + x = x[i:] + } + return dst +} + +func benchWords(b *testing.B, n int, decode bool) { + // Note: the file is OS-language dependent so the resulting values are not + // directly comparable for non-US-English OS installations. + data := expand(readFile(b, "/usr/share/dict/words"), n) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) } +func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) } +func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } +func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } +func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } +func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } +func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) } +func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) } +func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } +func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } +func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } +func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } + +func BenchmarkRandomEncode(b *testing.B) { + rng := rand.New(rand.NewSource(1)) + data := make([]byte, 1<<20) + for i := range data { + data[i] = uint8(rng.Intn(256)) + } + benchEncode(b, data) +} + +// testFiles' values are copied directly from +// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc +// The label field is unused in snappy-go. +var testFiles = []struct { + label string + filename string + sizeLimit int +}{ + {"html", "html", 0}, + {"urls", "urls.10K", 0}, + {"jpg", "fireworks.jpeg", 0}, + {"jpg_200", "fireworks.jpeg", 200}, + {"pdf", "paper-100k.pdf", 0}, + {"html4", "html_x_4", 0}, + {"txt1", "alice29.txt", 0}, + {"txt2", "asyoulik.txt", 0}, + {"txt3", "lcet10.txt", 0}, + {"txt4", "plrabn12.txt", 0}, + {"pb", "geo.protodata", 0}, + {"gaviota", "kppkn.gtb", 0}, +} + +const ( + // The benchmark data files are at this canonical URL. + benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" +) + +func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) { + bDir := filepath.FromSlash(*benchdataDir) + filename := filepath.Join(bDir, basename) + if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { + return nil + } + + if !*download { + b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b)) + } + // Download the official snappy C++ implementation reference test data + // files for benchmarking. + if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) { + return fmt.Errorf("failed to create %s: %s", bDir, err) + } + + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("failed to create %s: %s", filename, err) + } + defer f.Close() + defer func() { + if errRet != nil { + os.Remove(filename) + } + }() + url := benchURL + basename + resp, err := http.Get(url) + if err != nil { + return fmt.Errorf("failed to download %s: %s", url, err) + } + defer resp.Body.Close() + if s := resp.StatusCode; s != http.StatusOK { + return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) + } + _, err = io.Copy(f, resp.Body) + if err != nil { + return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) + } + return nil +} + +func benchFile(b *testing.B, i int, decode bool) { + if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil { + b.Fatalf("failed to download testdata: %s", err) + } + bDir := filepath.FromSlash(*benchdataDir) + data := readFile(b, filepath.Join(bDir, testFiles[i].filename)) + if n := testFiles[i].sizeLimit; 0 < n && n < len(data) { + data = data[:n] + } + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +// Naming convention is kept similar to what snappy's C++ implementation uses. +func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } +func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } +func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } +func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } +func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } +func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } +func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } +func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } +func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } +func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } +func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } +func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } +func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } +func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } +func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } +func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } +func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } +func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } +func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } +func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } +func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } +func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } +func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } +func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } + +func BenchmarkExtendMatch(b *testing.B) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + b.Fatalf("ReadFile: %v", err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, tc := range extendMatchGoldenTestCases { + extendMatch(src, tc.i, tc.j) + } + } +} diff --git a/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt new file mode 100644 index 0000000..86a1875 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt @@ -0,0 +1,396 @@ +Produced by David Widger. The previous edition was updated by Jose +Menendez. + + + + + + THE ADVENTURES OF TOM SAWYER + BY + MARK TWAIN + (Samuel Langhorne Clemens) + + + + + P R E F A C E + +MOST of the adventures recorded in this book really occurred; one or +two were experiences of my own, the rest those of boys who were +schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but +not from an individual--he is a combination of the characteristics of +three boys whom I knew, and therefore belongs to the composite order of +architecture. + +The odd superstitions touched upon were all prevalent among children +and slaves in the West at the period of this story--that is to say, +thirty or forty years ago. + +Although my book is intended mainly for the entertainment of boys and +girls, I hope it will not be shunned by men and women on that account, +for part of my plan has been to try to pleasantly remind adults of what +they once were themselves, and of how they felt and thought and talked, +and what queer enterprises they sometimes engaged in. + + THE AUTHOR. + +HARTFORD, 1876. + + + + T O M S A W Y E R + + + +CHAPTER I + +"TOM!" + +No answer. + +"TOM!" + +No answer. + +"What's gone with that boy, I wonder? You TOM!" + +No answer. + +The old lady pulled her spectacles down and looked over them about the +room; then she put them up and looked out under them. She seldom or +never looked THROUGH them for so small a thing as a boy; they were her +state pair, the pride of her heart, and were built for "style," not +service--she could have seen through a pair of stove-lids just as well. +She looked perplexed for a moment, and then said, not fiercely, but +still loud enough for the furniture to hear: + +"Well, I lay if I get hold of you I'll--" + +She did not finish, for by this time she was bending down and punching +under the bed with the broom, and so she needed breath to punctuate the +punches with. She resurrected nothing but the cat. + +"I never did see the beat of that boy!" + +She went to the open door and stood in it and looked out among the +tomato vines and "jimpson" weeds that constituted the garden. No Tom. +So she lifted up her voice at an angle calculated for distance and +shouted: + +"Y-o-u-u TOM!" + +There was a slight noise behind her and she turned just in time to +seize a small boy by the slack of his roundabout and arrest his flight. + +"There! I might 'a' thought of that closet. What you been doing in +there?" + +"Nothing." + +"Nothing! Look at your hands. And look at your mouth. What IS that +truck?" + +"I don't know, aunt." + +"Well, I know. It's jam--that's what it is. Forty times I've said if +you didn't let that jam alone I'd skin you. Hand me that switch." + +The switch hovered in the air--the peril was desperate-- + +"My! Look behind you, aunt!" + +The old lady whirled round, and snatched her skirts out of danger. The +lad fled on the instant, scrambled up the high board-fence, and +disappeared over it. + +His aunt Polly stood surprised a moment, and then broke into a gentle +laugh. + +"Hang the boy, can't I never learn anything? Ain't he played me tricks +enough like that for me to be looking out for him by this time? But old +fools is the biggest fools there is. Can't learn an old dog new tricks, +as the saying is. But my goodness, he never plays them alike, two days, +and how is a body to know what's coming? He 'pears to know just how +long he can torment me before I get my dander up, and he knows if he +can make out to put me off for a minute or make me laugh, it's all down +again and I can't hit him a lick. I ain't doing my duty by that boy, +and that's the Lord's truth, goodness knows. Spare the rod and spile +the child, as the Good Book says. I'm a laying up sin and suffering for +us both, I know. He's full of the Old Scratch, but laws-a-me! he's my +own dead sister's boy, poor thing, and I ain't got the heart to lash +him, somehow. Every time I let him off, my conscience does hurt me so, +and every time I hit him my old heart most breaks. Well-a-well, man +that is born of woman is of few days and full of trouble, as the +Scripture says, and I reckon it's so. He'll play hookey this evening, * +and [* Southwestern for "afternoon"] I'll just be obleeged to make him +work, to-morrow, to punish him. It's mighty hard to make him work +Saturdays, when all the boys is having holiday, but he hates work more +than he hates anything else, and I've GOT to do some of my duty by him, +or I'll be the ruination of the child." + +Tom did play hookey, and he had a very good time. He got back home +barely in season to help Jim, the small colored boy, saw next-day's +wood and split the kindlings before supper--at least he was there in +time to tell his adventures to Jim while Jim did three-fourths of the +work. Tom's younger brother (or rather half-brother) Sid was already +through with his part of the work (picking up chips), for he was a +quiet boy, and had no adventurous, troublesome ways. + +While Tom was eating his supper, and stealing sugar as opportunity +offered, Aunt Polly asked him questions that were full of guile, and +very deep--for she wanted to trap him into damaging revealments. Like +many other simple-hearted souls, it was her pet vanity to believe she +was endowed with a talent for dark and mysterious diplomacy, and she +loved to contemplate her most transparent devices as marvels of low +cunning. Said she: + +"Tom, it was middling warm in school, warn't it?" + +"Yes'm." + +"Powerful warm, warn't it?" + +"Yes'm." + +"Didn't you want to go in a-swimming, Tom?" + +A bit of a scare shot through Tom--a touch of uncomfortable suspicion. +He searched Aunt Polly's face, but it told him nothing. So he said: + +"No'm--well, not very much." + +The old lady reached out her hand and felt Tom's shirt, and said: + +"But you ain't too warm now, though." And it flattered her to reflect +that she had discovered that the shirt was dry without anybody knowing +that that was what she had in her mind. But in spite of her, Tom knew +where the wind lay, now. So he forestalled what might be the next move: + +"Some of us pumped on our heads--mine's damp yet. See?" + +Aunt Polly was vexed to think she had overlooked that bit of +circumstantial evidence, and missed a trick. Then she had a new +inspiration: + +"Tom, you didn't have to undo your shirt collar where I sewed it, to +pump on your head, did you? Unbutton your jacket!" + +The trouble vanished out of Tom's face. He opened his jacket. His +shirt collar was securely sewed. + +"Bother! Well, go 'long with you. I'd made sure you'd played hookey +and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a +singed cat, as the saying is--better'n you look. THIS time." + +She was half sorry her sagacity had miscarried, and half glad that Tom +had stumbled into obedient conduct for once. + +But Sidney said: + +"Well, now, if I didn't think you sewed his collar with white thread, +but it's black." + +"Why, I did sew it with white! Tom!" + +But Tom did not wait for the rest. As he went out at the door he said: + +"Siddy, I'll lick you for that." + +In a safe place Tom examined two large needles which were thrust into +the lapels of his jacket, and had thread bound about them--one needle +carried white thread and the other black. He said: + +"She'd never noticed if it hadn't been for Sid. Confound it! sometimes +she sews it with white, and sometimes she sews it with black. I wish to +geeminy she'd stick to one or t'other--I can't keep the run of 'em. But +I bet you I'll lam Sid for that. I'll learn him!" + +He was not the Model Boy of the village. He knew the model boy very +well though--and loathed him. + +Within two minutes, or even less, he had forgotten all his troubles. +Not because his troubles were one whit less heavy and bitter to him +than a man's are to a man, but because a new and powerful interest bore +them down and drove them out of his mind for the time--just as men's +misfortunes are forgotten in the excitement of new enterprises. This +new interest was a valued novelty in whistling, which he had just +acquired from a negro, and he was suffering to practise it undisturbed. +It consisted in a peculiar bird-like turn, a sort of liquid warble, +produced by touching the tongue to the roof of the mouth at short +intervals in the midst of the music--the reader probably remembers how +to do it, if he has ever been a boy. Diligence and attention soon gave +him the knack of it, and he strode down the street with his mouth full +of harmony and his soul full of gratitude. He felt much as an +astronomer feels who has discovered a new planet--no doubt, as far as +strong, deep, unalloyed pleasure is concerned, the advantage was with +the boy, not the astronomer. + +The summer evenings were long. It was not dark, yet. Presently Tom +checked his whistle. A stranger was before him--a boy a shade larger +than himself. A new-comer of any age or either sex was an impressive +curiosity in the poor little shabby village of St. Petersburg. This boy +was well dressed, too--well dressed on a week-day. This was simply +astounding. His cap was a dainty thing, his close-buttoned blue cloth +roundabout was new and natty, and so were his pantaloons. He had shoes +on--and it was only Friday. He even wore a necktie, a bright bit of +ribbon. He had a citified air about him that ate into Tom's vitals. The +more Tom stared at the splendid marvel, the higher he turned up his +nose at his finery and the shabbier and shabbier his own outfit seemed +to him to grow. Neither boy spoke. If one moved, the other moved--but +only sidewise, in a circle; they kept face to face and eye to eye all +the time. Finally Tom said: + +"I can lick you!" + +"I'd like to see you try it." + +"Well, I can do it." + +"No you can't, either." + +"Yes I can." + +"No you can't." + +"I can." + +"You can't." + +"Can!" + +"Can't!" + +An uncomfortable pause. Then Tom said: + +"What's your name?" + +"'Tisn't any of your business, maybe." + +"Well I 'low I'll MAKE it my business." + +"Well why don't you?" + +"If you say much, I will." + +"Much--much--MUCH. There now." + +"Oh, you think you're mighty smart, DON'T you? I could lick you with +one hand tied behind me, if I wanted to." + +"Well why don't you DO it? You SAY you can do it." + +"Well I WILL, if you fool with me." + +"Oh yes--I've seen whole families in the same fix." + +"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!" + +"You can lump that hat if you don't like it. I dare you to knock it +off--and anybody that'll take a dare will suck eggs." + +"You're a liar!" + +"You're another." + +"You're a fighting liar and dasn't take it up." + +"Aw--take a walk!" + +"Say--if you give me much more of your sass I'll take and bounce a +rock off'n your head." + +"Oh, of COURSE you will." + +"Well I WILL." + +"Well why don't you DO it then? What do you keep SAYING you will for? +Why don't you DO it? It's because you're afraid." + +"I AIN'T afraid." + +"You are." + +"I ain't." + +"You are." + +Another pause, and more eying and sidling around each other. Presently +they were shoulder to shoulder. Tom said: + +"Get away from here!" + +"Go away yourself!" + +"I won't." + +"I won't either." + +So they stood, each with a foot placed at an angle as a brace, and +both shoving with might and main, and glowering at each other with +hate. But neither could get an advantage. After struggling till both +were hot and flushed, each relaxed his strain with watchful caution, +and Tom said: + +"You're a coward and a pup. I'll tell my big brother on you, and he +can thrash you with his little finger, and I'll make him do it, too." + +"What do I care for your big brother? I've got a brother that's bigger +than he is--and what's more, he can throw him over that fence, too." +[Both brothers were imaginary.] + +"That's a lie." + +"YOUR saying so don't make it so." + +Tom drew a line in the dust with his big toe, and said: + +"I dare you to step over that, and I'll lick you till you can't stand +up. Anybody that'll take a dare will steal sheep." + +The new boy stepped over promptly, and said: + +"Now you said you'd do it, now let's see you do it." + +"Don't you crowd me now; you better look out." + +"Well, you SAID you'd do it--why don't you do it?" + +"By jingo! for two cents I WILL do it." + +The new boy took two broad coppers out of his pocket and held them out +with derision. Tom struck them to the ground. In an instant both boys +were rolling and tumbling in the dirt, gripped together like cats; and +for the space of a minute they tugged and tore at each other's hair and +clothes, punched and scratched each other's nose, and covered +themselves with dust and glory. Presently the confusion took form, and +through the fog of battle Tom appeared, seated astride the new boy, and +pounding him with his fists. "Holler 'nuff!" said he. + +The boy only struggled to free himself. He was crying--mainly from rage. + +"Holler 'nuff!"--and the pounding went on. + +At last the stranger got out a smothered "'Nuff!" and Tom let him up +and said: + +"Now that'll learn you. Better look out who you're fooling with next +time." + +The new boy went off brushing the dust from his clothes, sobbing, +snuffling, and occasionally looking back and shaking his head and +threatening what he would do to Tom the "next time he caught him out." +To which Tom responded with jeers, and started off in high feather, and +as soon as his back was turned the new boy snatched up a stone, threw +it and hit him between the shoulders and then turned tail and ran like +an antelope. Tom chased the traitor home, and thus found out where he +lived. He then held a position at the gate for some time, daring the +enemy to come outside, but the enemy only made faces at him through the +window and declined. At last the enemy's mother appeared, and called +Tom a bad, vicious, vulgar child, and ordered him away. So he went +away; but he said he "'lowed" to "lay" for that boy. + +He got home pretty late that night, and when he climbed cautiously in +at the window, he uncovered an ambuscade, in the person of his aunt; +and when she saw the state his clothes were in her resolution to turn +his Saturday holiday into captivity at hard labor became adamantine in +its firmness. diff --git a/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy b/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy new file mode 100644 index 0000000..9c56d98 Binary files /dev/null and b/vendor/github.com/klauspost/compress/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy differ diff --git a/vendor/github.com/klauspost/compress/snappy/testdata/random b/vendor/github.com/klauspost/compress/snappy/testdata/random new file mode 100644 index 0000000..d5e9606 Binary files /dev/null and b/vendor/github.com/klauspost/compress/snappy/testdata/random differ diff --git a/vendor/github.com/klauspost/compress/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/github.com/klauspost/compress/testdata/Mark.Twain-Tom.Sawyer.txt new file mode 100644 index 0000000..565627a --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/Mark.Twain-Tom.Sawyer.txt @@ -0,0 +1,8472 @@ +Produced by David Widger. The previous edition was updated by Jose +Menendez. + + + + + + THE ADVENTURES OF TOM SAWYER + BY + MARK TWAIN + (Samuel Langhorne Clemens) + + + + + P R E F A C E + +MOST of the adventures recorded in this book really occurred; one or +two were experiences of my own, the rest those of boys who were +schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but +not from an individual--he is a combination of the characteristics of +three boys whom I knew, and therefore belongs to the composite order of +architecture. + +The odd superstitions touched upon were all prevalent among children +and slaves in the West at the period of this story--that is to say, +thirty or forty years ago. + +Although my book is intended mainly for the entertainment of boys and +girls, I hope it will not be shunned by men and women on that account, +for part of my plan has been to try to pleasantly remind adults of what +they once were themselves, and of how they felt and thought and talked, +and what queer enterprises they sometimes engaged in. + + THE AUTHOR. + +HARTFORD, 1876. + + + + T O M S A W Y E R + + + +CHAPTER I + +"TOM!" + +No answer. + +"TOM!" + +No answer. + +"What's gone with that boy, I wonder? You TOM!" + +No answer. + +The old lady pulled her spectacles down and looked over them about the +room; then she put them up and looked out under them. She seldom or +never looked THROUGH them for so small a thing as a boy; they were her +state pair, the pride of her heart, and were built for "style," not +service--she could have seen through a pair of stove-lids just as well. +She looked perplexed for a moment, and then said, not fiercely, but +still loud enough for the furniture to hear: + +"Well, I lay if I get hold of you I'll--" + +She did not finish, for by this time she was bending down and punching +under the bed with the broom, and so she needed breath to punctuate the +punches with. She resurrected nothing but the cat. + +"I never did see the beat of that boy!" + +She went to the open door and stood in it and looked out among the +tomato vines and "jimpson" weeds that constituted the garden. No Tom. +So she lifted up her voice at an angle calculated for distance and +shouted: + +"Y-o-u-u TOM!" + +There was a slight noise behind her and she turned just in time to +seize a small boy by the slack of his roundabout and arrest his flight. + +"There! I might 'a' thought of that closet. What you been doing in +there?" + +"Nothing." + +"Nothing! Look at your hands. And look at your mouth. What IS that +truck?" + +"I don't know, aunt." + +"Well, I know. It's jam--that's what it is. Forty times I've said if +you didn't let that jam alone I'd skin you. Hand me that switch." + +The switch hovered in the air--the peril was desperate-- + +"My! Look behind you, aunt!" + +The old lady whirled round, and snatched her skirts out of danger. The +lad fled on the instant, scrambled up the high board-fence, and +disappeared over it. + +His aunt Polly stood surprised a moment, and then broke into a gentle +laugh. + +"Hang the boy, can't I never learn anything? Ain't he played me tricks +enough like that for me to be looking out for him by this time? But old +fools is the biggest fools there is. Can't learn an old dog new tricks, +as the saying is. But my goodness, he never plays them alike, two days, +and how is a body to know what's coming? He 'pears to know just how +long he can torment me before I get my dander up, and he knows if he +can make out to put me off for a minute or make me laugh, it's all down +again and I can't hit him a lick. I ain't doing my duty by that boy, +and that's the Lord's truth, goodness knows. Spare the rod and spile +the child, as the Good Book says. I'm a laying up sin and suffering for +us both, I know. He's full of the Old Scratch, but laws-a-me! he's my +own dead sister's boy, poor thing, and I ain't got the heart to lash +him, somehow. Every time I let him off, my conscience does hurt me so, +and every time I hit him my old heart most breaks. Well-a-well, man +that is born of woman is of few days and full of trouble, as the +Scripture says, and I reckon it's so. He'll play hookey this evening, * +and [* Southwestern for "afternoon"] I'll just be obleeged to make him +work, to-morrow, to punish him. It's mighty hard to make him work +Saturdays, when all the boys is having holiday, but he hates work more +than he hates anything else, and I've GOT to do some of my duty by him, +or I'll be the ruination of the child." + +Tom did play hookey, and he had a very good time. He got back home +barely in season to help Jim, the small colored boy, saw next-day's +wood and split the kindlings before supper--at least he was there in +time to tell his adventures to Jim while Jim did three-fourths of the +work. Tom's younger brother (or rather half-brother) Sid was already +through with his part of the work (picking up chips), for he was a +quiet boy, and had no adventurous, troublesome ways. + +While Tom was eating his supper, and stealing sugar as opportunity +offered, Aunt Polly asked him questions that were full of guile, and +very deep--for she wanted to trap him into damaging revealments. Like +many other simple-hearted souls, it was her pet vanity to believe she +was endowed with a talent for dark and mysterious diplomacy, and she +loved to contemplate her most transparent devices as marvels of low +cunning. Said she: + +"Tom, it was middling warm in school, warn't it?" + +"Yes'm." + +"Powerful warm, warn't it?" + +"Yes'm." + +"Didn't you want to go in a-swimming, Tom?" + +A bit of a scare shot through Tom--a touch of uncomfortable suspicion. +He searched Aunt Polly's face, but it told him nothing. So he said: + +"No'm--well, not very much." + +The old lady reached out her hand and felt Tom's shirt, and said: + +"But you ain't too warm now, though." And it flattered her to reflect +that she had discovered that the shirt was dry without anybody knowing +that that was what she had in her mind. But in spite of her, Tom knew +where the wind lay, now. So he forestalled what might be the next move: + +"Some of us pumped on our heads--mine's damp yet. See?" + +Aunt Polly was vexed to think she had overlooked that bit of +circumstantial evidence, and missed a trick. Then she had a new +inspiration: + +"Tom, you didn't have to undo your shirt collar where I sewed it, to +pump on your head, did you? Unbutton your jacket!" + +The trouble vanished out of Tom's face. He opened his jacket. His +shirt collar was securely sewed. + +"Bother! Well, go 'long with you. I'd made sure you'd played hookey +and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a +singed cat, as the saying is--better'n you look. THIS time." + +She was half sorry her sagacity had miscarried, and half glad that Tom +had stumbled into obedient conduct for once. + +But Sidney said: + +"Well, now, if I didn't think you sewed his collar with white thread, +but it's black." + +"Why, I did sew it with white! Tom!" + +But Tom did not wait for the rest. As he went out at the door he said: + +"Siddy, I'll lick you for that." + +In a safe place Tom examined two large needles which were thrust into +the lapels of his jacket, and had thread bound about them--one needle +carried white thread and the other black. He said: + +"She'd never noticed if it hadn't been for Sid. Confound it! sometimes +she sews it with white, and sometimes she sews it with black. I wish to +geeminy she'd stick to one or t'other--I can't keep the run of 'em. But +I bet you I'll lam Sid for that. I'll learn him!" + +He was not the Model Boy of the village. He knew the model boy very +well though--and loathed him. + +Within two minutes, or even less, he had forgotten all his troubles. +Not because his troubles were one whit less heavy and bitter to him +than a man's are to a man, but because a new and powerful interest bore +them down and drove them out of his mind for the time--just as men's +misfortunes are forgotten in the excitement of new enterprises. This +new interest was a valued novelty in whistling, which he had just +acquired from a negro, and he was suffering to practise it undisturbed. +It consisted in a peculiar bird-like turn, a sort of liquid warble, +produced by touching the tongue to the roof of the mouth at short +intervals in the midst of the music--the reader probably remembers how +to do it, if he has ever been a boy. Diligence and attention soon gave +him the knack of it, and he strode down the street with his mouth full +of harmony and his soul full of gratitude. He felt much as an +astronomer feels who has discovered a new planet--no doubt, as far as +strong, deep, unalloyed pleasure is concerned, the advantage was with +the boy, not the astronomer. + +The summer evenings were long. It was not dark, yet. Presently Tom +checked his whistle. A stranger was before him--a boy a shade larger +than himself. A new-comer of any age or either sex was an impressive +curiosity in the poor little shabby village of St. Petersburg. This boy +was well dressed, too--well dressed on a week-day. This was simply +astounding. His cap was a dainty thing, his close-buttoned blue cloth +roundabout was new and natty, and so were his pantaloons. He had shoes +on--and it was only Friday. He even wore a necktie, a bright bit of +ribbon. He had a citified air about him that ate into Tom's vitals. The +more Tom stared at the splendid marvel, the higher he turned up his +nose at his finery and the shabbier and shabbier his own outfit seemed +to him to grow. Neither boy spoke. If one moved, the other moved--but +only sidewise, in a circle; they kept face to face and eye to eye all +the time. Finally Tom said: + +"I can lick you!" + +"I'd like to see you try it." + +"Well, I can do it." + +"No you can't, either." + +"Yes I can." + +"No you can't." + +"I can." + +"You can't." + +"Can!" + +"Can't!" + +An uncomfortable pause. Then Tom said: + +"What's your name?" + +"'Tisn't any of your business, maybe." + +"Well I 'low I'll MAKE it my business." + +"Well why don't you?" + +"If you say much, I will." + +"Much--much--MUCH. There now." + +"Oh, you think you're mighty smart, DON'T you? I could lick you with +one hand tied behind me, if I wanted to." + +"Well why don't you DO it? You SAY you can do it." + +"Well I WILL, if you fool with me." + +"Oh yes--I've seen whole families in the same fix." + +"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!" + +"You can lump that hat if you don't like it. I dare you to knock it +off--and anybody that'll take a dare will suck eggs." + +"You're a liar!" + +"You're another." + +"You're a fighting liar and dasn't take it up." + +"Aw--take a walk!" + +"Say--if you give me much more of your sass I'll take and bounce a +rock off'n your head." + +"Oh, of COURSE you will." + +"Well I WILL." + +"Well why don't you DO it then? What do you keep SAYING you will for? +Why don't you DO it? It's because you're afraid." + +"I AIN'T afraid." + +"You are." + +"I ain't." + +"You are." + +Another pause, and more eying and sidling around each other. Presently +they were shoulder to shoulder. Tom said: + +"Get away from here!" + +"Go away yourself!" + +"I won't." + +"I won't either." + +So they stood, each with a foot placed at an angle as a brace, and +both shoving with might and main, and glowering at each other with +hate. But neither could get an advantage. After struggling till both +were hot and flushed, each relaxed his strain with watchful caution, +and Tom said: + +"You're a coward and a pup. I'll tell my big brother on you, and he +can thrash you with his little finger, and I'll make him do it, too." + +"What do I care for your big brother? I've got a brother that's bigger +than he is--and what's more, he can throw him over that fence, too." +[Both brothers were imaginary.] + +"That's a lie." + +"YOUR saying so don't make it so." + +Tom drew a line in the dust with his big toe, and said: + +"I dare you to step over that, and I'll lick you till you can't stand +up. Anybody that'll take a dare will steal sheep." + +The new boy stepped over promptly, and said: + +"Now you said you'd do it, now let's see you do it." + +"Don't you crowd me now; you better look out." + +"Well, you SAID you'd do it--why don't you do it?" + +"By jingo! for two cents I WILL do it." + +The new boy took two broad coppers out of his pocket and held them out +with derision. Tom struck them to the ground. In an instant both boys +were rolling and tumbling in the dirt, gripped together like cats; and +for the space of a minute they tugged and tore at each other's hair and +clothes, punched and scratched each other's nose, and covered +themselves with dust and glory. Presently the confusion took form, and +through the fog of battle Tom appeared, seated astride the new boy, and +pounding him with his fists. "Holler 'nuff!" said he. + +The boy only struggled to free himself. He was crying--mainly from rage. + +"Holler 'nuff!"--and the pounding went on. + +At last the stranger got out a smothered "'Nuff!" and Tom let him up +and said: + +"Now that'll learn you. Better look out who you're fooling with next +time." + +The new boy went off brushing the dust from his clothes, sobbing, +snuffling, and occasionally looking back and shaking his head and +threatening what he would do to Tom the "next time he caught him out." +To which Tom responded with jeers, and started off in high feather, and +as soon as his back was turned the new boy snatched up a stone, threw +it and hit him between the shoulders and then turned tail and ran like +an antelope. Tom chased the traitor home, and thus found out where he +lived. He then held a position at the gate for some time, daring the +enemy to come outside, but the enemy only made faces at him through the +window and declined. At last the enemy's mother appeared, and called +Tom a bad, vicious, vulgar child, and ordered him away. So he went +away; but he said he "'lowed" to "lay" for that boy. + +He got home pretty late that night, and when he climbed cautiously in +at the window, he uncovered an ambuscade, in the person of his aunt; +and when she saw the state his clothes were in her resolution to turn +his Saturday holiday into captivity at hard labor became adamantine in +its firmness. + + + +CHAPTER II + +SATURDAY morning was come, and all the summer world was bright and +fresh, and brimming with life. There was a song in every heart; and if +the heart was young the music issued at the lips. There was cheer in +every face and a spring in every step. The locust-trees were in bloom +and the fragrance of the blossoms filled the air. Cardiff Hill, beyond +the village and above it, was green with vegetation and it lay just far +enough away to seem a Delectable Land, dreamy, reposeful, and inviting. + +Tom appeared on the sidewalk with a bucket of whitewash and a +long-handled brush. He surveyed the fence, and all gladness left him and +a deep melancholy settled down upon his spirit. Thirty yards of board +fence nine feet high. Life to him seemed hollow, and existence but a +burden. Sighing, he dipped his brush and passed it along the topmost +plank; repeated the operation; did it again; compared the insignificant +whitewashed streak with the far-reaching continent of unwhitewashed +fence, and sat down on a tree-box discouraged. Jim came skipping out at +the gate with a tin pail, and singing Buffalo Gals. Bringing water from +the town pump had always been hateful work in Tom's eyes, before, but +now it did not strike him so. He remembered that there was company at +the pump. White, mulatto, and negro boys and girls were always there +waiting their turns, resting, trading playthings, quarrelling, +fighting, skylarking. And he remembered that although the pump was only +a hundred and fifty yards off, Jim never got back with a bucket of +water under an hour--and even then somebody generally had to go after +him. Tom said: + +"Say, Jim, I'll fetch the water if you'll whitewash some." + +Jim shook his head and said: + +"Can't, Mars Tom. Ole missis, she tole me I got to go an' git dis +water an' not stop foolin' roun' wid anybody. She say she spec' Mars +Tom gwine to ax me to whitewash, an' so she tole me go 'long an' 'tend +to my own business--she 'lowed SHE'D 'tend to de whitewashin'." + +"Oh, never you mind what she said, Jim. That's the way she always +talks. Gimme the bucket--I won't be gone only a a minute. SHE won't +ever know." + +"Oh, I dasn't, Mars Tom. Ole missis she'd take an' tar de head off'n +me. 'Deed she would." + +"SHE! She never licks anybody--whacks 'em over the head with her +thimble--and who cares for that, I'd like to know. She talks awful, but +talk don't hurt--anyways it don't if she don't cry. Jim, I'll give you +a marvel. I'll give you a white alley!" + +Jim began to waver. + +"White alley, Jim! And it's a bully taw." + +"My! Dat's a mighty gay marvel, I tell you! But Mars Tom I's powerful +'fraid ole missis--" + +"And besides, if you will I'll show you my sore toe." + +Jim was only human--this attraction was too much for him. He put down +his pail, took the white alley, and bent over the toe with absorbing +interest while the bandage was being unwound. In another moment he was +flying down the street with his pail and a tingling rear, Tom was +whitewashing with vigor, and Aunt Polly was retiring from the field +with a slipper in her hand and triumph in her eye. + +But Tom's energy did not last. He began to think of the fun he had +planned for this day, and his sorrows multiplied. Soon the free boys +would come tripping along on all sorts of delicious expeditions, and +they would make a world of fun of him for having to work--the very +thought of it burnt him like fire. He got out his worldly wealth and +examined it--bits of toys, marbles, and trash; enough to buy an +exchange of WORK, maybe, but not half enough to buy so much as half an +hour of pure freedom. So he returned his straitened means to his +pocket, and gave up the idea of trying to buy the boys. At this dark +and hopeless moment an inspiration burst upon him! Nothing less than a +great, magnificent inspiration. + +He took up his brush and went tranquilly to work. Ben Rogers hove in +sight presently--the very boy, of all boys, whose ridicule he had been +dreading. Ben's gait was the hop-skip-and-jump--proof enough that his +heart was light and his anticipations high. He was eating an apple, and +giving a long, melodious whoop, at intervals, followed by a deep-toned +ding-dong-dong, ding-dong-dong, for he was personating a steamboat. As +he drew near, he slackened speed, took the middle of the street, leaned +far over to starboard and rounded to ponderously and with laborious +pomp and circumstance--for he was personating the Big Missouri, and +considered himself to be drawing nine feet of water. He was boat and +captain and engine-bells combined, so he had to imagine himself +standing on his own hurricane-deck giving the orders and executing them: + +"Stop her, sir! Ting-a-ling-ling!" The headway ran almost out, and he +drew up slowly toward the sidewalk. + +"Ship up to back! Ting-a-ling-ling!" His arms straightened and +stiffened down his sides. + +"Set her back on the stabboard! Ting-a-ling-ling! Chow! ch-chow-wow! +Chow!" His right hand, meantime, describing stately circles--for it was +representing a forty-foot wheel. + +"Let her go back on the labboard! Ting-a-lingling! Chow-ch-chow-chow!" +The left hand began to describe circles. + +"Stop the stabboard! Ting-a-ling-ling! Stop the labboard! Come ahead +on the stabboard! Stop her! Let your outside turn over slow! +Ting-a-ling-ling! Chow-ow-ow! Get out that head-line! LIVELY now! +Come--out with your spring-line--what're you about there! Take a turn +round that stump with the bight of it! Stand by that stage, now--let her +go! Done with the engines, sir! Ting-a-ling-ling! SH'T! S'H'T! SH'T!" +(trying the gauge-cocks). + +Tom went on whitewashing--paid no attention to the steamboat. Ben +stared a moment and then said: "Hi-YI! YOU'RE up a stump, ain't you!" + +No answer. Tom surveyed his last touch with the eye of an artist, then +he gave his brush another gentle sweep and surveyed the result, as +before. Ben ranged up alongside of him. Tom's mouth watered for the +apple, but he stuck to his work. Ben said: + +"Hello, old chap, you got to work, hey?" + +Tom wheeled suddenly and said: + +"Why, it's you, Ben! I warn't noticing." + +"Say--I'm going in a-swimming, I am. Don't you wish you could? But of +course you'd druther WORK--wouldn't you? Course you would!" + +Tom contemplated the boy a bit, and said: + +"What do you call work?" + +"Why, ain't THAT work?" + +Tom resumed his whitewashing, and answered carelessly: + +"Well, maybe it is, and maybe it ain't. All I know, is, it suits Tom +Sawyer." + +"Oh come, now, you don't mean to let on that you LIKE it?" + +The brush continued to move. + +"Like it? Well, I don't see why I oughtn't to like it. Does a boy get +a chance to whitewash a fence every day?" + +That put the thing in a new light. Ben stopped nibbling his apple. Tom +swept his brush daintily back and forth--stepped back to note the +effect--added a touch here and there--criticised the effect again--Ben +watching every move and getting more and more interested, more and more +absorbed. Presently he said: + +"Say, Tom, let ME whitewash a little." + +Tom considered, was about to consent; but he altered his mind: + +"No--no--I reckon it wouldn't hardly do, Ben. You see, Aunt Polly's +awful particular about this fence--right here on the street, you know +--but if it was the back fence I wouldn't mind and SHE wouldn't. Yes, +she's awful particular about this fence; it's got to be done very +careful; I reckon there ain't one boy in a thousand, maybe two +thousand, that can do it the way it's got to be done." + +"No--is that so? Oh come, now--lemme just try. Only just a little--I'd +let YOU, if you was me, Tom." + +"Ben, I'd like to, honest injun; but Aunt Polly--well, Jim wanted to +do it, but she wouldn't let him; Sid wanted to do it, and she wouldn't +let Sid. Now don't you see how I'm fixed? If you was to tackle this +fence and anything was to happen to it--" + +"Oh, shucks, I'll be just as careful. Now lemme try. Say--I'll give +you the core of my apple." + +"Well, here--No, Ben, now don't. I'm afeard--" + +"I'll give you ALL of it!" + +Tom gave up the brush with reluctance in his face, but alacrity in his +heart. And while the late steamer Big Missouri worked and sweated in +the sun, the retired artist sat on a barrel in the shade close by, +dangled his legs, munched his apple, and planned the slaughter of more +innocents. There was no lack of material; boys happened along every +little while; they came to jeer, but remained to whitewash. By the time +Ben was fagged out, Tom had traded the next chance to Billy Fisher for +a kite, in good repair; and when he played out, Johnny Miller bought in +for a dead rat and a string to swing it with--and so on, and so on, +hour after hour. And when the middle of the afternoon came, from being +a poor poverty-stricken boy in the morning, Tom was literally rolling +in wealth. He had besides the things before mentioned, twelve marbles, +part of a jews-harp, a piece of blue bottle-glass to look through, a +spool cannon, a key that wouldn't unlock anything, a fragment of chalk, +a glass stopper of a decanter, a tin soldier, a couple of tadpoles, six +fire-crackers, a kitten with only one eye, a brass doorknob, a +dog-collar--but no dog--the handle of a knife, four pieces of +orange-peel, and a dilapidated old window sash. + +He had had a nice, good, idle time all the while--plenty of company +--and the fence had three coats of whitewash on it! If he hadn't run out +of whitewash he would have bankrupted every boy in the village. + +Tom said to himself that it was not such a hollow world, after all. He +had discovered a great law of human action, without knowing it--namely, +that in order to make a man or a boy covet a thing, it is only +necessary to make the thing difficult to attain. If he had been a great +and wise philosopher, like the writer of this book, he would now have +comprehended that Work consists of whatever a body is OBLIGED to do, +and that Play consists of whatever a body is not obliged to do. And +this would help him to understand why constructing artificial flowers +or performing on a tread-mill is work, while rolling ten-pins or +climbing Mont Blanc is only amusement. There are wealthy gentlemen in +England who drive four-horse passenger-coaches twenty or thirty miles +on a daily line, in the summer, because the privilege costs them +considerable money; but if they were offered wages for the service, +that would turn it into work and then they would resign. + +The boy mused awhile over the substantial change which had taken place +in his worldly circumstances, and then wended toward headquarters to +report. + + + +CHAPTER III + +TOM presented himself before Aunt Polly, who was sitting by an open +window in a pleasant rearward apartment, which was bedroom, +breakfast-room, dining-room, and library, combined. The balmy summer +air, the restful quiet, the odor of the flowers, and the drowsing murmur +of the bees had had their effect, and she was nodding over her knitting +--for she had no company but the cat, and it was asleep in her lap. Her +spectacles were propped up on her gray head for safety. She had thought +that of course Tom had deserted long ago, and she wondered at seeing him +place himself in her power again in this intrepid way. He said: "Mayn't +I go and play now, aunt?" + +"What, a'ready? How much have you done?" + +"It's all done, aunt." + +"Tom, don't lie to me--I can't bear it." + +"I ain't, aunt; it IS all done." + +Aunt Polly placed small trust in such evidence. She went out to see +for herself; and she would have been content to find twenty per cent. +of Tom's statement true. When she found the entire fence whitewashed, +and not only whitewashed but elaborately coated and recoated, and even +a streak added to the ground, her astonishment was almost unspeakable. +She said: + +"Well, I never! There's no getting round it, you can work when you're +a mind to, Tom." And then she diluted the compliment by adding, "But +it's powerful seldom you're a mind to, I'm bound to say. Well, go 'long +and play; but mind you get back some time in a week, or I'll tan you." + +She was so overcome by the splendor of his achievement that she took +him into the closet and selected a choice apple and delivered it to +him, along with an improving lecture upon the added value and flavor a +treat took to itself when it came without sin through virtuous effort. +And while she closed with a happy Scriptural flourish, he "hooked" a +doughnut. + +Then he skipped out, and saw Sid just starting up the outside stairway +that led to the back rooms on the second floor. Clods were handy and +the air was full of them in a twinkling. They raged around Sid like a +hail-storm; and before Aunt Polly could collect her surprised faculties +and sally to the rescue, six or seven clods had taken personal effect, +and Tom was over the fence and gone. There was a gate, but as a general +thing he was too crowded for time to make use of it. His soul was at +peace, now that he had settled with Sid for calling attention to his +black thread and getting him into trouble. + +Tom skirted the block, and came round into a muddy alley that led by +the back of his aunt's cow-stable. He presently got safely beyond the +reach of capture and punishment, and hastened toward the public square +of the village, where two "military" companies of boys had met for +conflict, according to previous appointment. Tom was General of one of +these armies, Joe Harper (a bosom friend) General of the other. These +two great commanders did not condescend to fight in person--that being +better suited to the still smaller fry--but sat together on an eminence +and conducted the field operations by orders delivered through +aides-de-camp. Tom's army won a great victory, after a long and +hard-fought battle. Then the dead were counted, prisoners exchanged, +the terms of the next disagreement agreed upon, and the day for the +necessary battle appointed; after which the armies fell into line and +marched away, and Tom turned homeward alone. + +As he was passing by the house where Jeff Thatcher lived, he saw a new +girl in the garden--a lovely little blue-eyed creature with yellow hair +plaited into two long-tails, white summer frock and embroidered +pantalettes. The fresh-crowned hero fell without firing a shot. A +certain Amy Lawrence vanished out of his heart and left not even a +memory of herself behind. He had thought he loved her to distraction; +he had regarded his passion as adoration; and behold it was only a poor +little evanescent partiality. He had been months winning her; she had +confessed hardly a week ago; he had been the happiest and the proudest +boy in the world only seven short days, and here in one instant of time +she had gone out of his heart like a casual stranger whose visit is +done. + +He worshipped this new angel with furtive eye, till he saw that she +had discovered him; then he pretended he did not know she was present, +and began to "show off" in all sorts of absurd boyish ways, in order to +win her admiration. He kept up this grotesque foolishness for some +time; but by-and-by, while he was in the midst of some dangerous +gymnastic performances, he glanced aside and saw that the little girl +was wending her way toward the house. Tom came up to the fence and +leaned on it, grieving, and hoping she would tarry yet awhile longer. +She halted a moment on the steps and then moved toward the door. Tom +heaved a great sigh as she put her foot on the threshold. But his face +lit up, right away, for she tossed a pansy over the fence a moment +before she disappeared. + +The boy ran around and stopped within a foot or two of the flower, and +then shaded his eyes with his hand and began to look down street as if +he had discovered something of interest going on in that direction. +Presently he picked up a straw and began trying to balance it on his +nose, with his head tilted far back; and as he moved from side to side, +in his efforts, he edged nearer and nearer toward the pansy; finally +his bare foot rested upon it, his pliant toes closed upon it, and he +hopped away with the treasure and disappeared round the corner. But +only for a minute--only while he could button the flower inside his +jacket, next his heart--or next his stomach, possibly, for he was not +much posted in anatomy, and not hypercritical, anyway. + +He returned, now, and hung about the fence till nightfall, "showing +off," as before; but the girl never exhibited herself again, though Tom +comforted himself a little with the hope that she had been near some +window, meantime, and been aware of his attentions. Finally he strode +home reluctantly, with his poor head full of visions. + +All through supper his spirits were so high that his aunt wondered +"what had got into the child." He took a good scolding about clodding +Sid, and did not seem to mind it in the least. He tried to steal sugar +under his aunt's very nose, and got his knuckles rapped for it. He said: + +"Aunt, you don't whack Sid when he takes it." + +"Well, Sid don't torment a body the way you do. You'd be always into +that sugar if I warn't watching you." + +Presently she stepped into the kitchen, and Sid, happy in his +immunity, reached for the sugar-bowl--a sort of glorying over Tom which +was wellnigh unbearable. But Sid's fingers slipped and the bowl dropped +and broke. Tom was in ecstasies. In such ecstasies that he even +controlled his tongue and was silent. He said to himself that he would +not speak a word, even when his aunt came in, but would sit perfectly +still till she asked who did the mischief; and then he would tell, and +there would be nothing so good in the world as to see that pet model +"catch it." He was so brimful of exultation that he could hardly hold +himself when the old lady came back and stood above the wreck +discharging lightnings of wrath from over her spectacles. He said to +himself, "Now it's coming!" And the next instant he was sprawling on +the floor! The potent palm was uplifted to strike again when Tom cried +out: + +"Hold on, now, what 'er you belting ME for?--Sid broke it!" + +Aunt Polly paused, perplexed, and Tom looked for healing pity. But +when she got her tongue again, she only said: + +"Umf! Well, you didn't get a lick amiss, I reckon. You been into some +other audacious mischief when I wasn't around, like enough." + +Then her conscience reproached her, and she yearned to say something +kind and loving; but she judged that this would be construed into a +confession that she had been in the wrong, and discipline forbade that. +So she kept silence, and went about her affairs with a troubled heart. +Tom sulked in a corner and exalted his woes. He knew that in her heart +his aunt was on her knees to him, and he was morosely gratified by the +consciousness of it. He would hang out no signals, he would take notice +of none. He knew that a yearning glance fell upon him, now and then, +through a film of tears, but he refused recognition of it. He pictured +himself lying sick unto death and his aunt bending over him beseeching +one little forgiving word, but he would turn his face to the wall, and +die with that word unsaid. Ah, how would she feel then? And he pictured +himself brought home from the river, dead, with his curls all wet, and +his sore heart at rest. How she would throw herself upon him, and how +her tears would fall like rain, and her lips pray God to give her back +her boy and she would never, never abuse him any more! But he would lie +there cold and white and make no sign--a poor little sufferer, whose +griefs were at an end. He so worked upon his feelings with the pathos +of these dreams, that he had to keep swallowing, he was so like to +choke; and his eyes swam in a blur of water, which overflowed when he +winked, and ran down and trickled from the end of his nose. And such a +luxury to him was this petting of his sorrows, that he could not bear +to have any worldly cheeriness or any grating delight intrude upon it; +it was too sacred for such contact; and so, presently, when his cousin +Mary danced in, all alive with the joy of seeing home again after an +age-long visit of one week to the country, he got up and moved in +clouds and darkness out at one door as she brought song and sunshine in +at the other. + +He wandered far from the accustomed haunts of boys, and sought +desolate places that were in harmony with his spirit. A log raft in the +river invited him, and he seated himself on its outer edge and +contemplated the dreary vastness of the stream, wishing, the while, +that he could only be drowned, all at once and unconsciously, without +undergoing the uncomfortable routine devised by nature. Then he thought +of his flower. He got it out, rumpled and wilted, and it mightily +increased his dismal felicity. He wondered if she would pity him if she +knew? Would she cry, and wish that she had a right to put her arms +around his neck and comfort him? Or would she turn coldly away like all +the hollow world? This picture brought such an agony of pleasurable +suffering that he worked it over and over again in his mind and set it +up in new and varied lights, till he wore it threadbare. At last he +rose up sighing and departed in the darkness. + +About half-past nine or ten o'clock he came along the deserted street +to where the Adored Unknown lived; he paused a moment; no sound fell +upon his listening ear; a candle was casting a dull glow upon the +curtain of a second-story window. Was the sacred presence there? He +climbed the fence, threaded his stealthy way through the plants, till +he stood under that window; he looked up at it long, and with emotion; +then he laid him down on the ground under it, disposing himself upon +his back, with his hands clasped upon his breast and holding his poor +wilted flower. And thus he would die--out in the cold world, with no +shelter over his homeless head, no friendly hand to wipe the +death-damps from his brow, no loving face to bend pityingly over him +when the great agony came. And thus SHE would see him when she looked +out upon the glad morning, and oh! would she drop one little tear upon +his poor, lifeless form, would she heave one little sigh to see a bright +young life so rudely blighted, so untimely cut down? + +The window went up, a maid-servant's discordant voice profaned the +holy calm, and a deluge of water drenched the prone martyr's remains! + +The strangling hero sprang up with a relieving snort. There was a whiz +as of a missile in the air, mingled with the murmur of a curse, a sound +as of shivering glass followed, and a small, vague form went over the +fence and shot away in the gloom. + +Not long after, as Tom, all undressed for bed, was surveying his +drenched garments by the light of a tallow dip, Sid woke up; but if he +had any dim idea of making any "references to allusions," he thought +better of it and held his peace, for there was danger in Tom's eye. + +Tom turned in without the added vexation of prayers, and Sid made +mental note of the omission. + + + +CHAPTER IV + +THE sun rose upon a tranquil world, and beamed down upon the peaceful +village like a benediction. Breakfast over, Aunt Polly had family +worship: it began with a prayer built from the ground up of solid +courses of Scriptural quotations, welded together with a thin mortar of +originality; and from the summit of this she delivered a grim chapter +of the Mosaic Law, as from Sinai. + +Then Tom girded up his loins, so to speak, and went to work to "get +his verses." Sid had learned his lesson days before. Tom bent all his +energies to the memorizing of five verses, and he chose part of the +Sermon on the Mount, because he could find no verses that were shorter. +At the end of half an hour Tom had a vague general idea of his lesson, +but no more, for his mind was traversing the whole field of human +thought, and his hands were busy with distracting recreations. Mary +took his book to hear him recite, and he tried to find his way through +the fog: + +"Blessed are the--a--a--" + +"Poor"-- + +"Yes--poor; blessed are the poor--a--a--" + +"In spirit--" + +"In spirit; blessed are the poor in spirit, for they--they--" + +"THEIRS--" + +"For THEIRS. Blessed are the poor in spirit, for theirs is the kingdom +of heaven. Blessed are they that mourn, for they--they--" + +"Sh--" + +"For they--a--" + +"S, H, A--" + +"For they S, H--Oh, I don't know what it is!" + +"SHALL!" + +"Oh, SHALL! for they shall--for they shall--a--a--shall mourn--a--a-- +blessed are they that shall--they that--a--they that shall mourn, for +they shall--a--shall WHAT? Why don't you tell me, Mary?--what do you +want to be so mean for?" + +"Oh, Tom, you poor thick-headed thing, I'm not teasing you. I wouldn't +do that. You must go and learn it again. Don't you be discouraged, Tom, +you'll manage it--and if you do, I'll give you something ever so nice. +There, now, that's a good boy." + +"All right! What is it, Mary, tell me what it is." + +"Never you mind, Tom. You know if I say it's nice, it is nice." + +"You bet you that's so, Mary. All right, I'll tackle it again." + +And he did "tackle it again"--and under the double pressure of +curiosity and prospective gain he did it with such spirit that he +accomplished a shining success. Mary gave him a brand-new "Barlow" +knife worth twelve and a half cents; and the convulsion of delight that +swept his system shook him to his foundations. True, the knife would +not cut anything, but it was a "sure-enough" Barlow, and there was +inconceivable grandeur in that--though where the Western boys ever got +the idea that such a weapon could possibly be counterfeited to its +injury is an imposing mystery and will always remain so, perhaps. Tom +contrived to scarify the cupboard with it, and was arranging to begin +on the bureau, when he was called off to dress for Sunday-school. + +Mary gave him a tin basin of water and a piece of soap, and he went +outside the door and set the basin on a little bench there; then he +dipped the soap in the water and laid it down; turned up his sleeves; +poured out the water on the ground, gently, and then entered the +kitchen and began to wipe his face diligently on the towel behind the +door. But Mary removed the towel and said: + +"Now ain't you ashamed, Tom. You mustn't be so bad. Water won't hurt +you." + +Tom was a trifle disconcerted. The basin was refilled, and this time +he stood over it a little while, gathering resolution; took in a big +breath and began. When he entered the kitchen presently, with both eyes +shut and groping for the towel with his hands, an honorable testimony +of suds and water was dripping from his face. But when he emerged from +the towel, he was not yet satisfactory, for the clean territory stopped +short at his chin and his jaws, like a mask; below and beyond this line +there was a dark expanse of unirrigated soil that spread downward in +front and backward around his neck. Mary took him in hand, and when she +was done with him he was a man and a brother, without distinction of +color, and his saturated hair was neatly brushed, and its short curls +wrought into a dainty and symmetrical general effect. [He privately +smoothed out the curls, with labor and difficulty, and plastered his +hair close down to his head; for he held curls to be effeminate, and +his own filled his life with bitterness.] Then Mary got out a suit of +his clothing that had been used only on Sundays during two years--they +were simply called his "other clothes"--and so by that we know the +size of his wardrobe. The girl "put him to rights" after he had dressed +himself; she buttoned his neat roundabout up to his chin, turned his +vast shirt collar down over his shoulders, brushed him off and crowned +him with his speckled straw hat. He now looked exceedingly improved and +uncomfortable. He was fully as uncomfortable as he looked; for there +was a restraint about whole clothes and cleanliness that galled him. He +hoped that Mary would forget his shoes, but the hope was blighted; she +coated them thoroughly with tallow, as was the custom, and brought them +out. He lost his temper and said he was always being made to do +everything he didn't want to do. But Mary said, persuasively: + +"Please, Tom--that's a good boy." + +So he got into the shoes snarling. Mary was soon ready, and the three +children set out for Sunday-school--a place that Tom hated with his +whole heart; but Sid and Mary were fond of it. + +Sabbath-school hours were from nine to half-past ten; and then church +service. Two of the children always remained for the sermon +voluntarily, and the other always remained too--for stronger reasons. +The church's high-backed, uncushioned pews would seat about three +hundred persons; the edifice was but a small, plain affair, with a sort +of pine board tree-box on top of it for a steeple. At the door Tom +dropped back a step and accosted a Sunday-dressed comrade: + +"Say, Billy, got a yaller ticket?" + +"Yes." + +"What'll you take for her?" + +"What'll you give?" + +"Piece of lickrish and a fish-hook." + +"Less see 'em." + +Tom exhibited. They were satisfactory, and the property changed hands. +Then Tom traded a couple of white alleys for three red tickets, and +some small trifle or other for a couple of blue ones. He waylaid other +boys as they came, and went on buying tickets of various colors ten or +fifteen minutes longer. He entered the church, now, with a swarm of +clean and noisy boys and girls, proceeded to his seat and started a +quarrel with the first boy that came handy. The teacher, a grave, +elderly man, interfered; then turned his back a moment and Tom pulled a +boy's hair in the next bench, and was absorbed in his book when the boy +turned around; stuck a pin in another boy, presently, in order to hear +him say "Ouch!" and got a new reprimand from his teacher. Tom's whole +class were of a pattern--restless, noisy, and troublesome. When they +came to recite their lessons, not one of them knew his verses +perfectly, but had to be prompted all along. However, they worried +through, and each got his reward--in small blue tickets, each with a +passage of Scripture on it; each blue ticket was pay for two verses of +the recitation. Ten blue tickets equalled a red one, and could be +exchanged for it; ten red tickets equalled a yellow one; for ten yellow +tickets the superintendent gave a very plainly bound Bible (worth forty +cents in those easy times) to the pupil. How many of my readers would +have the industry and application to memorize two thousand verses, even +for a Dore Bible? And yet Mary had acquired two Bibles in this way--it +was the patient work of two years--and a boy of German parentage had +won four or five. He once recited three thousand verses without +stopping; but the strain upon his mental faculties was too great, and +he was little better than an idiot from that day forth--a grievous +misfortune for the school, for on great occasions, before company, the +superintendent (as Tom expressed it) had always made this boy come out +and "spread himself." Only the older pupils managed to keep their +tickets and stick to their tedious work long enough to get a Bible, and +so the delivery of one of these prizes was a rare and noteworthy +circumstance; the successful pupil was so great and conspicuous for +that day that on the spot every scholar's heart was fired with a fresh +ambition that often lasted a couple of weeks. It is possible that Tom's +mental stomach had never really hungered for one of those prizes, but +unquestionably his entire being had for many a day longed for the glory +and the eclat that came with it. + +In due course the superintendent stood up in front of the pulpit, with +a closed hymn-book in his hand and his forefinger inserted between its +leaves, and commanded attention. When a Sunday-school superintendent +makes his customary little speech, a hymn-book in the hand is as +necessary as is the inevitable sheet of music in the hand of a singer +who stands forward on the platform and sings a solo at a concert +--though why, is a mystery: for neither the hymn-book nor the sheet of +music is ever referred to by the sufferer. This superintendent was a +slim creature of thirty-five, with a sandy goatee and short sandy hair; +he wore a stiff standing-collar whose upper edge almost reached his +ears and whose sharp points curved forward abreast the corners of his +mouth--a fence that compelled a straight lookout ahead, and a turning +of the whole body when a side view was required; his chin was propped +on a spreading cravat which was as broad and as long as a bank-note, +and had fringed ends; his boot toes were turned sharply up, in the +fashion of the day, like sleigh-runners--an effect patiently and +laboriously produced by the young men by sitting with their toes +pressed against a wall for hours together. Mr. Walters was very earnest +of mien, and very sincere and honest at heart; and he held sacred +things and places in such reverence, and so separated them from worldly +matters, that unconsciously to himself his Sunday-school voice had +acquired a peculiar intonation which was wholly absent on week-days. He +began after this fashion: + +"Now, children, I want you all to sit up just as straight and pretty +as you can and give me all your attention for a minute or two. There +--that is it. That is the way good little boys and girls should do. I see +one little girl who is looking out of the window--I am afraid she +thinks I am out there somewhere--perhaps up in one of the trees making +a speech to the little birds. [Applausive titter.] I want to tell you +how good it makes me feel to see so many bright, clean little faces +assembled in a place like this, learning to do right and be good." And +so forth and so on. It is not necessary to set down the rest of the +oration. It was of a pattern which does not vary, and so it is familiar +to us all. + +The latter third of the speech was marred by the resumption of fights +and other recreations among certain of the bad boys, and by fidgetings +and whisperings that extended far and wide, washing even to the bases +of isolated and incorruptible rocks like Sid and Mary. But now every +sound ceased suddenly, with the subsidence of Mr. Walters' voice, and +the conclusion of the speech was received with a burst of silent +gratitude. + +A good part of the whispering had been occasioned by an event which +was more or less rare--the entrance of visitors: lawyer Thatcher, +accompanied by a very feeble and aged man; a fine, portly, middle-aged +gentleman with iron-gray hair; and a dignified lady who was doubtless +the latter's wife. The lady was leading a child. Tom had been restless +and full of chafings and repinings; conscience-smitten, too--he could +not meet Amy Lawrence's eye, he could not brook her loving gaze. But +when he saw this small new-comer his soul was all ablaze with bliss in +a moment. The next moment he was "showing off" with all his might +--cuffing boys, pulling hair, making faces--in a word, using every art +that seemed likely to fascinate a girl and win her applause. His +exaltation had but one alloy--the memory of his humiliation in this +angel's garden--and that record in sand was fast washing out, under +the waves of happiness that were sweeping over it now. + +The visitors were given the highest seat of honor, and as soon as Mr. +Walters' speech was finished, he introduced them to the school. The +middle-aged man turned out to be a prodigious personage--no less a one +than the county judge--altogether the most august creation these +children had ever looked upon--and they wondered what kind of material +he was made of--and they half wanted to hear him roar, and were half +afraid he might, too. He was from Constantinople, twelve miles away--so +he had travelled, and seen the world--these very eyes had looked upon +the county court-house--which was said to have a tin roof. The awe +which these reflections inspired was attested by the impressive silence +and the ranks of staring eyes. This was the great Judge Thatcher, +brother of their own lawyer. Jeff Thatcher immediately went forward, to +be familiar with the great man and be envied by the school. It would +have been music to his soul to hear the whisperings: + +"Look at him, Jim! He's a going up there. Say--look! he's a going to +shake hands with him--he IS shaking hands with him! By jings, don't you +wish you was Jeff?" + +Mr. Walters fell to "showing off," with all sorts of official +bustlings and activities, giving orders, delivering judgments, +discharging directions here, there, everywhere that he could find a +target. The librarian "showed off"--running hither and thither with his +arms full of books and making a deal of the splutter and fuss that +insect authority delights in. The young lady teachers "showed off" +--bending sweetly over pupils that were lately being boxed, lifting +pretty warning fingers at bad little boys and patting good ones +lovingly. The young gentlemen teachers "showed off" with small +scoldings and other little displays of authority and fine attention to +discipline--and most of the teachers, of both sexes, found business up +at the library, by the pulpit; and it was business that frequently had +to be done over again two or three times (with much seeming vexation). +The little girls "showed off" in various ways, and the little boys +"showed off" with such diligence that the air was thick with paper wads +and the murmur of scufflings. And above it all the great man sat and +beamed a majestic judicial smile upon all the house, and warmed himself +in the sun of his own grandeur--for he was "showing off," too. + +There was only one thing wanting to make Mr. Walters' ecstasy +complete, and that was a chance to deliver a Bible-prize and exhibit a +prodigy. Several pupils had a few yellow tickets, but none had enough +--he had been around among the star pupils inquiring. He would have given +worlds, now, to have that German lad back again with a sound mind. + +And now at this moment, when hope was dead, Tom Sawyer came forward +with nine yellow tickets, nine red tickets, and ten blue ones, and +demanded a Bible. This was a thunderbolt out of a clear sky. Walters +was not expecting an application from this source for the next ten +years. But there was no getting around it--here were the certified +checks, and they were good for their face. Tom was therefore elevated +to a place with the Judge and the other elect, and the great news was +announced from headquarters. It was the most stunning surprise of the +decade, and so profound was the sensation that it lifted the new hero +up to the judicial one's altitude, and the school had two marvels to +gaze upon in place of one. The boys were all eaten up with envy--but +those that suffered the bitterest pangs were those who perceived too +late that they themselves had contributed to this hated splendor by +trading tickets to Tom for the wealth he had amassed in selling +whitewashing privileges. These despised themselves, as being the dupes +of a wily fraud, a guileful snake in the grass. + +The prize was delivered to Tom with as much effusion as the +superintendent could pump up under the circumstances; but it lacked +somewhat of the true gush, for the poor fellow's instinct taught him +that there was a mystery here that could not well bear the light, +perhaps; it was simply preposterous that this boy had warehoused two +thousand sheaves of Scriptural wisdom on his premises--a dozen would +strain his capacity, without a doubt. + +Amy Lawrence was proud and glad, and she tried to make Tom see it in +her face--but he wouldn't look. She wondered; then she was just a grain +troubled; next a dim suspicion came and went--came again; she watched; +a furtive glance told her worlds--and then her heart broke, and she was +jealous, and angry, and the tears came and she hated everybody. Tom +most of all (she thought). + +Tom was introduced to the Judge; but his tongue was tied, his breath +would hardly come, his heart quaked--partly because of the awful +greatness of the man, but mainly because he was her parent. He would +have liked to fall down and worship him, if it were in the dark. The +Judge put his hand on Tom's head and called him a fine little man, and +asked him what his name was. The boy stammered, gasped, and got it out: + +"Tom." + +"Oh, no, not Tom--it is--" + +"Thomas." + +"Ah, that's it. I thought there was more to it, maybe. That's very +well. But you've another one I daresay, and you'll tell it to me, won't +you?" + +"Tell the gentleman your other name, Thomas," said Walters, "and say +sir. You mustn't forget your manners." + +"Thomas Sawyer--sir." + +"That's it! That's a good boy. Fine boy. Fine, manly little fellow. +Two thousand verses is a great many--very, very great many. And you +never can be sorry for the trouble you took to learn them; for +knowledge is worth more than anything there is in the world; it's what +makes great men and good men; you'll be a great man and a good man +yourself, some day, Thomas, and then you'll look back and say, It's all +owing to the precious Sunday-school privileges of my boyhood--it's all +owing to my dear teachers that taught me to learn--it's all owing to +the good superintendent, who encouraged me, and watched over me, and +gave me a beautiful Bible--a splendid elegant Bible--to keep and have +it all for my own, always--it's all owing to right bringing up! That is +what you will say, Thomas--and you wouldn't take any money for those +two thousand verses--no indeed you wouldn't. And now you wouldn't mind +telling me and this lady some of the things you've learned--no, I know +you wouldn't--for we are proud of little boys that learn. Now, no +doubt you know the names of all the twelve disciples. Won't you tell us +the names of the first two that were appointed?" + +Tom was tugging at a button-hole and looking sheepish. He blushed, +now, and his eyes fell. Mr. Walters' heart sank within him. He said to +himself, it is not possible that the boy can answer the simplest +question--why DID the Judge ask him? Yet he felt obliged to speak up +and say: + +"Answer the gentleman, Thomas--don't be afraid." + +Tom still hung fire. + +"Now I know you'll tell me," said the lady. "The names of the first +two disciples were--" + +"DAVID AND GOLIAH!" + +Let us draw the curtain of charity over the rest of the scene. + + + +CHAPTER V + +ABOUT half-past ten the cracked bell of the small church began to +ring, and presently the people began to gather for the morning sermon. +The Sunday-school children distributed themselves about the house and +occupied pews with their parents, so as to be under supervision. Aunt +Polly came, and Tom and Sid and Mary sat with her--Tom being placed +next the aisle, in order that he might be as far away from the open +window and the seductive outside summer scenes as possible. The crowd +filed up the aisles: the aged and needy postmaster, who had seen better +days; the mayor and his wife--for they had a mayor there, among other +unnecessaries; the justice of the peace; the widow Douglass, fair, +smart, and forty, a generous, good-hearted soul and well-to-do, her +hill mansion the only palace in the town, and the most hospitable and +much the most lavish in the matter of festivities that St. Petersburg +could boast; the bent and venerable Major and Mrs. Ward; lawyer +Riverson, the new notable from a distance; next the belle of the +village, followed by a troop of lawn-clad and ribbon-decked young +heart-breakers; then all the young clerks in town in a body--for they +had stood in the vestibule sucking their cane-heads, a circling wall of +oiled and simpering admirers, till the last girl had run their gantlet; +and last of all came the Model Boy, Willie Mufferson, taking as heedful +care of his mother as if she were cut glass. He always brought his +mother to church, and was the pride of all the matrons. The boys all +hated him, he was so good. And besides, he had been "thrown up to them" +so much. His white handkerchief was hanging out of his pocket behind, as +usual on Sundays--accidentally. Tom had no handkerchief, and he looked +upon boys who had as snobs. + +The congregation being fully assembled, now, the bell rang once more, +to warn laggards and stragglers, and then a solemn hush fell upon the +church which was only broken by the tittering and whispering of the +choir in the gallery. The choir always tittered and whispered all +through service. There was once a church choir that was not ill-bred, +but I have forgotten where it was, now. It was a great many years ago, +and I can scarcely remember anything about it, but I think it was in +some foreign country. + +The minister gave out the hymn, and read it through with a relish, in +a peculiar style which was much admired in that part of the country. +His voice began on a medium key and climbed steadily up till it reached +a certain point, where it bore with strong emphasis upon the topmost +word and then plunged down as if from a spring-board: + + Shall I be car-ri-ed toe the skies, on flow'ry BEDS of ease, + + Whilst others fight to win the prize, and sail thro' BLOODY seas? + +He was regarded as a wonderful reader. At church "sociables" he was +always called upon to read poetry; and when he was through, the ladies +would lift up their hands and let them fall helplessly in their laps, +and "wall" their eyes, and shake their heads, as much as to say, "Words +cannot express it; it is too beautiful, TOO beautiful for this mortal +earth." + +After the hymn had been sung, the Rev. Mr. Sprague turned himself into +a bulletin-board, and read off "notices" of meetings and societies and +things till it seemed that the list would stretch out to the crack of +doom--a queer custom which is still kept up in America, even in cities, +away here in this age of abundant newspapers. Often, the less there is +to justify a traditional custom, the harder it is to get rid of it. + +And now the minister prayed. A good, generous prayer it was, and went +into details: it pleaded for the church, and the little children of the +church; for the other churches of the village; for the village itself; +for the county; for the State; for the State officers; for the United +States; for the churches of the United States; for Congress; for the +President; for the officers of the Government; for poor sailors, tossed +by stormy seas; for the oppressed millions groaning under the heel of +European monarchies and Oriental despotisms; for such as have the light +and the good tidings, and yet have not eyes to see nor ears to hear +withal; for the heathen in the far islands of the sea; and closed with +a supplication that the words he was about to speak might find grace +and favor, and be as seed sown in fertile ground, yielding in time a +grateful harvest of good. Amen. + +There was a rustling of dresses, and the standing congregation sat +down. The boy whose history this book relates did not enjoy the prayer, +he only endured it--if he even did that much. He was restive all +through it; he kept tally of the details of the prayer, unconsciously +--for he was not listening, but he knew the ground of old, and the +clergyman's regular route over it--and when a little trifle of new +matter was interlarded, his ear detected it and his whole nature +resented it; he considered additions unfair, and scoundrelly. In the +midst of the prayer a fly had lit on the back of the pew in front of +him and tortured his spirit by calmly rubbing its hands together, +embracing its head with its arms, and polishing it so vigorously that +it seemed to almost part company with the body, and the slender thread +of a neck was exposed to view; scraping its wings with its hind legs +and smoothing them to its body as if they had been coat-tails; going +through its whole toilet as tranquilly as if it knew it was perfectly +safe. As indeed it was; for as sorely as Tom's hands itched to grab for +it they did not dare--he believed his soul would be instantly destroyed +if he did such a thing while the prayer was going on. But with the +closing sentence his hand began to curve and steal forward; and the +instant the "Amen" was out the fly was a prisoner of war. His aunt +detected the act and made him let it go. + +The minister gave out his text and droned along monotonously through +an argument that was so prosy that many a head by and by began to nod +--and yet it was an argument that dealt in limitless fire and brimstone +and thinned the predestined elect down to a company so small as to be +hardly worth the saving. Tom counted the pages of the sermon; after +church he always knew how many pages there had been, but he seldom knew +anything else about the discourse. However, this time he was really +interested for a little while. The minister made a grand and moving +picture of the assembling together of the world's hosts at the +millennium when the lion and the lamb should lie down together and a +little child should lead them. But the pathos, the lesson, the moral of +the great spectacle were lost upon the boy; he only thought of the +conspicuousness of the principal character before the on-looking +nations; his face lit with the thought, and he said to himself that he +wished he could be that child, if it was a tame lion. + +Now he lapsed into suffering again, as the dry argument was resumed. +Presently he bethought him of a treasure he had and got it out. It was +a large black beetle with formidable jaws--a "pinchbug," he called it. +It was in a percussion-cap box. The first thing the beetle did was to +take him by the finger. A natural fillip followed, the beetle went +floundering into the aisle and lit on its back, and the hurt finger +went into the boy's mouth. The beetle lay there working its helpless +legs, unable to turn over. Tom eyed it, and longed for it; but it was +safe out of his reach. Other people uninterested in the sermon found +relief in the beetle, and they eyed it too. Presently a vagrant poodle +dog came idling along, sad at heart, lazy with the summer softness and +the quiet, weary of captivity, sighing for change. He spied the beetle; +the drooping tail lifted and wagged. He surveyed the prize; walked +around it; smelt at it from a safe distance; walked around it again; +grew bolder, and took a closer smell; then lifted his lip and made a +gingerly snatch at it, just missing it; made another, and another; +began to enjoy the diversion; subsided to his stomach with the beetle +between his paws, and continued his experiments; grew weary at last, +and then indifferent and absent-minded. His head nodded, and little by +little his chin descended and touched the enemy, who seized it. There +was a sharp yelp, a flirt of the poodle's head, and the beetle fell a +couple of yards away, and lit on its back once more. The neighboring +spectators shook with a gentle inward joy, several faces went behind +fans and handkerchiefs, and Tom was entirely happy. The dog looked +foolish, and probably felt so; but there was resentment in his heart, +too, and a craving for revenge. So he went to the beetle and began a +wary attack on it again; jumping at it from every point of a circle, +lighting with his fore-paws within an inch of the creature, making even +closer snatches at it with his teeth, and jerking his head till his +ears flapped again. But he grew tired once more, after a while; tried +to amuse himself with a fly but found no relief; followed an ant +around, with his nose close to the floor, and quickly wearied of that; +yawned, sighed, forgot the beetle entirely, and sat down on it. Then +there was a wild yelp of agony and the poodle went sailing up the +aisle; the yelps continued, and so did the dog; he crossed the house in +front of the altar; he flew down the other aisle; he crossed before the +doors; he clamored up the home-stretch; his anguish grew with his +progress, till presently he was but a woolly comet moving in its orbit +with the gleam and the speed of light. At last the frantic sufferer +sheered from its course, and sprang into its master's lap; he flung it +out of the window, and the voice of distress quickly thinned away and +died in the distance. + +By this time the whole church was red-faced and suffocating with +suppressed laughter, and the sermon had come to a dead standstill. The +discourse was resumed presently, but it went lame and halting, all +possibility of impressiveness being at an end; for even the gravest +sentiments were constantly being received with a smothered burst of +unholy mirth, under cover of some remote pew-back, as if the poor +parson had said a rarely facetious thing. It was a genuine relief to +the whole congregation when the ordeal was over and the benediction +pronounced. + +Tom Sawyer went home quite cheerful, thinking to himself that there +was some satisfaction about divine service when there was a bit of +variety in it. He had but one marring thought; he was willing that the +dog should play with his pinchbug, but he did not think it was upright +in him to carry it off. + + + +CHAPTER VI + +MONDAY morning found Tom Sawyer miserable. Monday morning always found +him so--because it began another week's slow suffering in school. He +generally began that day with wishing he had had no intervening +holiday, it made the going into captivity and fetters again so much +more odious. + +Tom lay thinking. Presently it occurred to him that he wished he was +sick; then he could stay home from school. Here was a vague +possibility. He canvassed his system. No ailment was found, and he +investigated again. This time he thought he could detect colicky +symptoms, and he began to encourage them with considerable hope. But +they soon grew feeble, and presently died wholly away. He reflected +further. Suddenly he discovered something. One of his upper front teeth +was loose. This was lucky; he was about to begin to groan, as a +"starter," as he called it, when it occurred to him that if he came +into court with that argument, his aunt would pull it out, and that +would hurt. So he thought he would hold the tooth in reserve for the +present, and seek further. Nothing offered for some little time, and +then he remembered hearing the doctor tell about a certain thing that +laid up a patient for two or three weeks and threatened to make him +lose a finger. So the boy eagerly drew his sore toe from under the +sheet and held it up for inspection. But now he did not know the +necessary symptoms. However, it seemed well worth while to chance it, +so he fell to groaning with considerable spirit. + +But Sid slept on unconscious. + +Tom groaned louder, and fancied that he began to feel pain in the toe. + +No result from Sid. + +Tom was panting with his exertions by this time. He took a rest and +then swelled himself up and fetched a succession of admirable groans. + +Sid snored on. + +Tom was aggravated. He said, "Sid, Sid!" and shook him. This course +worked well, and Tom began to groan again. Sid yawned, stretched, then +brought himself up on his elbow with a snort, and began to stare at +Tom. Tom went on groaning. Sid said: + +"Tom! Say, Tom!" [No response.] "Here, Tom! TOM! What is the matter, +Tom?" And he shook him and looked in his face anxiously. + +Tom moaned out: + +"Oh, don't, Sid. Don't joggle me." + +"Why, what's the matter, Tom? I must call auntie." + +"No--never mind. It'll be over by and by, maybe. Don't call anybody." + +"But I must! DON'T groan so, Tom, it's awful. How long you been this +way?" + +"Hours. Ouch! Oh, don't stir so, Sid, you'll kill me." + +"Tom, why didn't you wake me sooner? Oh, Tom, DON'T! It makes my +flesh crawl to hear you. Tom, what is the matter?" + +"I forgive you everything, Sid. [Groan.] Everything you've ever done +to me. When I'm gone--" + +"Oh, Tom, you ain't dying, are you? Don't, Tom--oh, don't. Maybe--" + +"I forgive everybody, Sid. [Groan.] Tell 'em so, Sid. And Sid, you +give my window-sash and my cat with one eye to that new girl that's +come to town, and tell her--" + +But Sid had snatched his clothes and gone. Tom was suffering in +reality, now, so handsomely was his imagination working, and so his +groans had gathered quite a genuine tone. + +Sid flew down-stairs and said: + +"Oh, Aunt Polly, come! Tom's dying!" + +"Dying!" + +"Yes'm. Don't wait--come quick!" + +"Rubbage! I don't believe it!" + +But she fled up-stairs, nevertheless, with Sid and Mary at her heels. +And her face grew white, too, and her lip trembled. When she reached +the bedside she gasped out: + +"You, Tom! Tom, what's the matter with you?" + +"Oh, auntie, I'm--" + +"What's the matter with you--what is the matter with you, child?" + +"Oh, auntie, my sore toe's mortified!" + +The old lady sank down into a chair and laughed a little, then cried a +little, then did both together. This restored her and she said: + +"Tom, what a turn you did give me. Now you shut up that nonsense and +climb out of this." + +The groans ceased and the pain vanished from the toe. The boy felt a +little foolish, and he said: + +"Aunt Polly, it SEEMED mortified, and it hurt so I never minded my +tooth at all." + +"Your tooth, indeed! What's the matter with your tooth?" + +"One of them's loose, and it aches perfectly awful." + +"There, there, now, don't begin that groaning again. Open your mouth. +Well--your tooth IS loose, but you're not going to die about that. +Mary, get me a silk thread, and a chunk of fire out of the kitchen." + +Tom said: + +"Oh, please, auntie, don't pull it out. It don't hurt any more. I wish +I may never stir if it does. Please don't, auntie. I don't want to stay +home from school." + +"Oh, you don't, don't you? So all this row was because you thought +you'd get to stay home from school and go a-fishing? Tom, Tom, I love +you so, and you seem to try every way you can to break my old heart +with your outrageousness." By this time the dental instruments were +ready. The old lady made one end of the silk thread fast to Tom's tooth +with a loop and tied the other to the bedpost. Then she seized the +chunk of fire and suddenly thrust it almost into the boy's face. The +tooth hung dangling by the bedpost, now. + +But all trials bring their compensations. As Tom wended to school +after breakfast, he was the envy of every boy he met because the gap in +his upper row of teeth enabled him to expectorate in a new and +admirable way. He gathered quite a following of lads interested in the +exhibition; and one that had cut his finger and had been a centre of +fascination and homage up to this time, now found himself suddenly +without an adherent, and shorn of his glory. His heart was heavy, and +he said with a disdain which he did not feel that it wasn't anything to +spit like Tom Sawyer; but another boy said, "Sour grapes!" and he +wandered away a dismantled hero. + +Shortly Tom came upon the juvenile pariah of the village, Huckleberry +Finn, son of the town drunkard. Huckleberry was cordially hated and +dreaded by all the mothers of the town, because he was idle and lawless +and vulgar and bad--and because all their children admired him so, and +delighted in his forbidden society, and wished they dared to be like +him. Tom was like the rest of the respectable boys, in that he envied +Huckleberry his gaudy outcast condition, and was under strict orders +not to play with him. So he played with him every time he got a chance. +Huckleberry was always dressed in the cast-off clothes of full-grown +men, and they were in perennial bloom and fluttering with rags. His hat +was a vast ruin with a wide crescent lopped out of its brim; his coat, +when he wore one, hung nearly to his heels and had the rearward buttons +far down the back; but one suspender supported his trousers; the seat +of the trousers bagged low and contained nothing, the fringed legs +dragged in the dirt when not rolled up. + +Huckleberry came and went, at his own free will. He slept on doorsteps +in fine weather and in empty hogsheads in wet; he did not have to go to +school or to church, or call any being master or obey anybody; he could +go fishing or swimming when and where he chose, and stay as long as it +suited him; nobody forbade him to fight; he could sit up as late as he +pleased; he was always the first boy that went barefoot in the spring +and the last to resume leather in the fall; he never had to wash, nor +put on clean clothes; he could swear wonderfully. In a word, everything +that goes to make life precious that boy had. So thought every +harassed, hampered, respectable boy in St. Petersburg. + +Tom hailed the romantic outcast: + +"Hello, Huckleberry!" + +"Hello yourself, and see how you like it." + +"What's that you got?" + +"Dead cat." + +"Lemme see him, Huck. My, he's pretty stiff. Where'd you get him?" + +"Bought him off'n a boy." + +"What did you give?" + +"I give a blue ticket and a bladder that I got at the slaughter-house." + +"Where'd you get the blue ticket?" + +"Bought it off'n Ben Rogers two weeks ago for a hoop-stick." + +"Say--what is dead cats good for, Huck?" + +"Good for? Cure warts with." + +"No! Is that so? I know something that's better." + +"I bet you don't. What is it?" + +"Why, spunk-water." + +"Spunk-water! I wouldn't give a dern for spunk-water." + +"You wouldn't, wouldn't you? D'you ever try it?" + +"No, I hain't. But Bob Tanner did." + +"Who told you so!" + +"Why, he told Jeff Thatcher, and Jeff told Johnny Baker, and Johnny +told Jim Hollis, and Jim told Ben Rogers, and Ben told a nigger, and +the nigger told me. There now!" + +"Well, what of it? They'll all lie. Leastways all but the nigger. I +don't know HIM. But I never see a nigger that WOULDN'T lie. Shucks! Now +you tell me how Bob Tanner done it, Huck." + +"Why, he took and dipped his hand in a rotten stump where the +rain-water was." + +"In the daytime?" + +"Certainly." + +"With his face to the stump?" + +"Yes. Least I reckon so." + +"Did he say anything?" + +"I don't reckon he did. I don't know." + +"Aha! Talk about trying to cure warts with spunk-water such a blame +fool way as that! Why, that ain't a-going to do any good. You got to go +all by yourself, to the middle of the woods, where you know there's a +spunk-water stump, and just as it's midnight you back up against the +stump and jam your hand in and say: + + 'Barley-corn, barley-corn, injun-meal shorts, + Spunk-water, spunk-water, swaller these warts,' + +and then walk away quick, eleven steps, with your eyes shut, and then +turn around three times and walk home without speaking to anybody. +Because if you speak the charm's busted." + +"Well, that sounds like a good way; but that ain't the way Bob Tanner +done." + +"No, sir, you can bet he didn't, becuz he's the wartiest boy in this +town; and he wouldn't have a wart on him if he'd knowed how to work +spunk-water. I've took off thousands of warts off of my hands that way, +Huck. I play with frogs so much that I've always got considerable many +warts. Sometimes I take 'em off with a bean." + +"Yes, bean's good. I've done that." + +"Have you? What's your way?" + +"You take and split the bean, and cut the wart so as to get some +blood, and then you put the blood on one piece of the bean and take and +dig a hole and bury it 'bout midnight at the crossroads in the dark of +the moon, and then you burn up the rest of the bean. You see that piece +that's got the blood on it will keep drawing and drawing, trying to +fetch the other piece to it, and so that helps the blood to draw the +wart, and pretty soon off she comes." + +"Yes, that's it, Huck--that's it; though when you're burying it if you +say 'Down bean; off wart; come no more to bother me!' it's better. +That's the way Joe Harper does, and he's been nearly to Coonville and +most everywheres. But say--how do you cure 'em with dead cats?" + +"Why, you take your cat and go and get in the graveyard 'long about +midnight when somebody that was wicked has been buried; and when it's +midnight a devil will come, or maybe two or three, but you can't see +'em, you can only hear something like the wind, or maybe hear 'em talk; +and when they're taking that feller away, you heave your cat after 'em +and say, 'Devil follow corpse, cat follow devil, warts follow cat, I'm +done with ye!' That'll fetch ANY wart." + +"Sounds right. D'you ever try it, Huck?" + +"No, but old Mother Hopkins told me." + +"Well, I reckon it's so, then. Becuz they say she's a witch." + +"Say! Why, Tom, I KNOW she is. She witched pap. Pap says so his own +self. He come along one day, and he see she was a-witching him, so he +took up a rock, and if she hadn't dodged, he'd a got her. Well, that +very night he rolled off'n a shed wher' he was a layin drunk, and broke +his arm." + +"Why, that's awful. How did he know she was a-witching him?" + +"Lord, pap can tell, easy. Pap says when they keep looking at you +right stiddy, they're a-witching you. Specially if they mumble. Becuz +when they mumble they're saying the Lord's Prayer backards." + +"Say, Hucky, when you going to try the cat?" + +"To-night. I reckon they'll come after old Hoss Williams to-night." + +"But they buried him Saturday. Didn't they get him Saturday night?" + +"Why, how you talk! How could their charms work till midnight?--and +THEN it's Sunday. Devils don't slosh around much of a Sunday, I don't +reckon." + +"I never thought of that. That's so. Lemme go with you?" + +"Of course--if you ain't afeard." + +"Afeard! 'Tain't likely. Will you meow?" + +"Yes--and you meow back, if you get a chance. Last time, you kep' me +a-meowing around till old Hays went to throwing rocks at me and says +'Dern that cat!' and so I hove a brick through his window--but don't +you tell." + +"I won't. I couldn't meow that night, becuz auntie was watching me, +but I'll meow this time. Say--what's that?" + +"Nothing but a tick." + +"Where'd you get him?" + +"Out in the woods." + +"What'll you take for him?" + +"I don't know. I don't want to sell him." + +"All right. It's a mighty small tick, anyway." + +"Oh, anybody can run a tick down that don't belong to them. I'm +satisfied with it. It's a good enough tick for me." + +"Sho, there's ticks a plenty. I could have a thousand of 'em if I +wanted to." + +"Well, why don't you? Becuz you know mighty well you can't. This is a +pretty early tick, I reckon. It's the first one I've seen this year." + +"Say, Huck--I'll give you my tooth for him." + +"Less see it." + +Tom got out a bit of paper and carefully unrolled it. Huckleberry +viewed it wistfully. The temptation was very strong. At last he said: + +"Is it genuwyne?" + +Tom lifted his lip and showed the vacancy. + +"Well, all right," said Huckleberry, "it's a trade." + +Tom enclosed the tick in the percussion-cap box that had lately been +the pinchbug's prison, and the boys separated, each feeling wealthier +than before. + +When Tom reached the little isolated frame schoolhouse, he strode in +briskly, with the manner of one who had come with all honest speed. +He hung his hat on a peg and flung himself into his seat with +business-like alacrity. The master, throned on high in his great +splint-bottom arm-chair, was dozing, lulled by the drowsy hum of study. +The interruption roused him. + +"Thomas Sawyer!" + +Tom knew that when his name was pronounced in full, it meant trouble. + +"Sir!" + +"Come up here. Now, sir, why are you late again, as usual?" + +Tom was about to take refuge in a lie, when he saw two long tails of +yellow hair hanging down a back that he recognized by the electric +sympathy of love; and by that form was THE ONLY VACANT PLACE on the +girls' side of the schoolhouse. He instantly said: + +"I STOPPED TO TALK WITH HUCKLEBERRY FINN!" + +The master's pulse stood still, and he stared helplessly. The buzz of +study ceased. The pupils wondered if this foolhardy boy had lost his +mind. The master said: + +"You--you did what?" + +"Stopped to talk with Huckleberry Finn." + +There was no mistaking the words. + +"Thomas Sawyer, this is the most astounding confession I have ever +listened to. No mere ferule will answer for this offence. Take off your +jacket." + +The master's arm performed until it was tired and the stock of +switches notably diminished. Then the order followed: + +"Now, sir, go and sit with the girls! And let this be a warning to you." + +The titter that rippled around the room appeared to abash the boy, but +in reality that result was caused rather more by his worshipful awe of +his unknown idol and the dread pleasure that lay in his high good +fortune. He sat down upon the end of the pine bench and the girl +hitched herself away from him with a toss of her head. Nudges and winks +and whispers traversed the room, but Tom sat still, with his arms upon +the long, low desk before him, and seemed to study his book. + +By and by attention ceased from him, and the accustomed school murmur +rose upon the dull air once more. Presently the boy began to steal +furtive glances at the girl. She observed it, "made a mouth" at him and +gave him the back of her head for the space of a minute. When she +cautiously faced around again, a peach lay before her. She thrust it +away. Tom gently put it back. She thrust it away again, but with less +animosity. Tom patiently returned it to its place. Then she let it +remain. Tom scrawled on his slate, "Please take it--I got more." The +girl glanced at the words, but made no sign. Now the boy began to draw +something on the slate, hiding his work with his left hand. For a time +the girl refused to notice; but her human curiosity presently began to +manifest itself by hardly perceptible signs. The boy worked on, +apparently unconscious. The girl made a sort of noncommittal attempt to +see, but the boy did not betray that he was aware of it. At last she +gave in and hesitatingly whispered: + +"Let me see it." + +Tom partly uncovered a dismal caricature of a house with two gable +ends to it and a corkscrew of smoke issuing from the chimney. Then the +girl's interest began to fasten itself upon the work and she forgot +everything else. When it was finished, she gazed a moment, then +whispered: + +"It's nice--make a man." + +The artist erected a man in the front yard, that resembled a derrick. +He could have stepped over the house; but the girl was not +hypercritical; she was satisfied with the monster, and whispered: + +"It's a beautiful man--now make me coming along." + +Tom drew an hour-glass with a full moon and straw limbs to it and +armed the spreading fingers with a portentous fan. The girl said: + +"It's ever so nice--I wish I could draw." + +"It's easy," whispered Tom, "I'll learn you." + +"Oh, will you? When?" + +"At noon. Do you go home to dinner?" + +"I'll stay if you will." + +"Good--that's a whack. What's your name?" + +"Becky Thatcher. What's yours? Oh, I know. It's Thomas Sawyer." + +"That's the name they lick me by. I'm Tom when I'm good. You call me +Tom, will you?" + +"Yes." + +Now Tom began to scrawl something on the slate, hiding the words from +the girl. But she was not backward this time. She begged to see. Tom +said: + +"Oh, it ain't anything." + +"Yes it is." + +"No it ain't. You don't want to see." + +"Yes I do, indeed I do. Please let me." + +"You'll tell." + +"No I won't--deed and deed and double deed won't." + +"You won't tell anybody at all? Ever, as long as you live?" + +"No, I won't ever tell ANYbody. Now let me." + +"Oh, YOU don't want to see!" + +"Now that you treat me so, I WILL see." And she put her small hand +upon his and a little scuffle ensued, Tom pretending to resist in +earnest but letting his hand slip by degrees till these words were +revealed: "I LOVE YOU." + +"Oh, you bad thing!" And she hit his hand a smart rap, but reddened +and looked pleased, nevertheless. + +Just at this juncture the boy felt a slow, fateful grip closing on his +ear, and a steady lifting impulse. In that wise he was borne across the +house and deposited in his own seat, under a peppering fire of giggles +from the whole school. Then the master stood over him during a few +awful moments, and finally moved away to his throne without saying a +word. But although Tom's ear tingled, his heart was jubilant. + +As the school quieted down Tom made an honest effort to study, but the +turmoil within him was too great. In turn he took his place in the +reading class and made a botch of it; then in the geography class and +turned lakes into mountains, mountains into rivers, and rivers into +continents, till chaos was come again; then in the spelling class, and +got "turned down," by a succession of mere baby words, till he brought +up at the foot and yielded up the pewter medal which he had worn with +ostentation for months. + + + +CHAPTER VII + +THE harder Tom tried to fasten his mind on his book, the more his +ideas wandered. So at last, with a sigh and a yawn, he gave it up. It +seemed to him that the noon recess would never come. The air was +utterly dead. There was not a breath stirring. It was the sleepiest of +sleepy days. The drowsing murmur of the five and twenty studying +scholars soothed the soul like the spell that is in the murmur of bees. +Away off in the flaming sunshine, Cardiff Hill lifted its soft green +sides through a shimmering veil of heat, tinted with the purple of +distance; a few birds floated on lazy wing high in the air; no other +living thing was visible but some cows, and they were asleep. Tom's +heart ached to be free, or else to have something of interest to do to +pass the dreary time. His hand wandered into his pocket and his face +lit up with a glow of gratitude that was prayer, though he did not know +it. Then furtively the percussion-cap box came out. He released the +tick and put him on the long flat desk. The creature probably glowed +with a gratitude that amounted to prayer, too, at this moment, but it +was premature: for when he started thankfully to travel off, Tom turned +him aside with a pin and made him take a new direction. + +Tom's bosom friend sat next him, suffering just as Tom had been, and +now he was deeply and gratefully interested in this entertainment in an +instant. This bosom friend was Joe Harper. The two boys were sworn +friends all the week, and embattled enemies on Saturdays. Joe took a +pin out of his lapel and began to assist in exercising the prisoner. +The sport grew in interest momently. Soon Tom said that they were +interfering with each other, and neither getting the fullest benefit of +the tick. So he put Joe's slate on the desk and drew a line down the +middle of it from top to bottom. + +"Now," said he, "as long as he is on your side you can stir him up and +I'll let him alone; but if you let him get away and get on my side, +you're to leave him alone as long as I can keep him from crossing over." + +"All right, go ahead; start him up." + +The tick escaped from Tom, presently, and crossed the equator. Joe +harassed him awhile, and then he got away and crossed back again. This +change of base occurred often. While one boy was worrying the tick with +absorbing interest, the other would look on with interest as strong, +the two heads bowed together over the slate, and the two souls dead to +all things else. At last luck seemed to settle and abide with Joe. The +tick tried this, that, and the other course, and got as excited and as +anxious as the boys themselves, but time and again just as he would +have victory in his very grasp, so to speak, and Tom's fingers would be +twitching to begin, Joe's pin would deftly head him off, and keep +possession. At last Tom could stand it no longer. The temptation was +too strong. So he reached out and lent a hand with his pin. Joe was +angry in a moment. Said he: + +"Tom, you let him alone." + +"I only just want to stir him up a little, Joe." + +"No, sir, it ain't fair; you just let him alone." + +"Blame it, I ain't going to stir him much." + +"Let him alone, I tell you." + +"I won't!" + +"You shall--he's on my side of the line." + +"Look here, Joe Harper, whose is that tick?" + +"I don't care whose tick he is--he's on my side of the line, and you +sha'n't touch him." + +"Well, I'll just bet I will, though. He's my tick and I'll do what I +blame please with him, or die!" + +A tremendous whack came down on Tom's shoulders, and its duplicate on +Joe's; and for the space of two minutes the dust continued to fly from +the two jackets and the whole school to enjoy it. The boys had been too +absorbed to notice the hush that had stolen upon the school awhile +before when the master came tiptoeing down the room and stood over +them. He had contemplated a good part of the performance before he +contributed his bit of variety to it. + +When school broke up at noon, Tom flew to Becky Thatcher, and +whispered in her ear: + +"Put on your bonnet and let on you're going home; and when you get to +the corner, give the rest of 'em the slip, and turn down through the +lane and come back. I'll go the other way and come it over 'em the same +way." + +So the one went off with one group of scholars, and the other with +another. In a little while the two met at the bottom of the lane, and +when they reached the school they had it all to themselves. Then they +sat together, with a slate before them, and Tom gave Becky the pencil +and held her hand in his, guiding it, and so created another surprising +house. When the interest in art began to wane, the two fell to talking. +Tom was swimming in bliss. He said: + +"Do you love rats?" + +"No! I hate them!" + +"Well, I do, too--LIVE ones. But I mean dead ones, to swing round your +head with a string." + +"No, I don't care for rats much, anyway. What I like is chewing-gum." + +"Oh, I should say so! I wish I had some now." + +"Do you? I've got some. I'll let you chew it awhile, but you must give +it back to me." + +That was agreeable, so they chewed it turn about, and dangled their +legs against the bench in excess of contentment. + +"Was you ever at a circus?" said Tom. + +"Yes, and my pa's going to take me again some time, if I'm good." + +"I been to the circus three or four times--lots of times. Church ain't +shucks to a circus. There's things going on at a circus all the time. +I'm going to be a clown in a circus when I grow up." + +"Oh, are you! That will be nice. They're so lovely, all spotted up." + +"Yes, that's so. And they get slathers of money--most a dollar a day, +Ben Rogers says. Say, Becky, was you ever engaged?" + +"What's that?" + +"Why, engaged to be married." + +"No." + +"Would you like to?" + +"I reckon so. I don't know. What is it like?" + +"Like? Why it ain't like anything. You only just tell a boy you won't +ever have anybody but him, ever ever ever, and then you kiss and that's +all. Anybody can do it." + +"Kiss? What do you kiss for?" + +"Why, that, you know, is to--well, they always do that." + +"Everybody?" + +"Why, yes, everybody that's in love with each other. Do you remember +what I wrote on the slate?" + +"Ye--yes." + +"What was it?" + +"I sha'n't tell you." + +"Shall I tell YOU?" + +"Ye--yes--but some other time." + +"No, now." + +"No, not now--to-morrow." + +"Oh, no, NOW. Please, Becky--I'll whisper it, I'll whisper it ever so +easy." + +Becky hesitating, Tom took silence for consent, and passed his arm +about her waist and whispered the tale ever so softly, with his mouth +close to her ear. And then he added: + +"Now you whisper it to me--just the same." + +She resisted, for a while, and then said: + +"You turn your face away so you can't see, and then I will. But you +mustn't ever tell anybody--WILL you, Tom? Now you won't, WILL you?" + +"No, indeed, indeed I won't. Now, Becky." + +He turned his face away. She bent timidly around till her breath +stirred his curls and whispered, "I--love--you!" + +Then she sprang away and ran around and around the desks and benches, +with Tom after her, and took refuge in a corner at last, with her +little white apron to her face. Tom clasped her about her neck and +pleaded: + +"Now, Becky, it's all done--all over but the kiss. Don't you be afraid +of that--it ain't anything at all. Please, Becky." And he tugged at her +apron and the hands. + +By and by she gave up, and let her hands drop; her face, all glowing +with the struggle, came up and submitted. Tom kissed the red lips and +said: + +"Now it's all done, Becky. And always after this, you know, you ain't +ever to love anybody but me, and you ain't ever to marry anybody but +me, ever never and forever. Will you?" + +"No, I'll never love anybody but you, Tom, and I'll never marry +anybody but you--and you ain't to ever marry anybody but me, either." + +"Certainly. Of course. That's PART of it. And always coming to school +or when we're going home, you're to walk with me, when there ain't +anybody looking--and you choose me and I choose you at parties, because +that's the way you do when you're engaged." + +"It's so nice. I never heard of it before." + +"Oh, it's ever so gay! Why, me and Amy Lawrence--" + +The big eyes told Tom his blunder and he stopped, confused. + +"Oh, Tom! Then I ain't the first you've ever been engaged to!" + +The child began to cry. Tom said: + +"Oh, don't cry, Becky, I don't care for her any more." + +"Yes, you do, Tom--you know you do." + +Tom tried to put his arm about her neck, but she pushed him away and +turned her face to the wall, and went on crying. Tom tried again, with +soothing words in his mouth, and was repulsed again. Then his pride was +up, and he strode away and went outside. He stood about, restless and +uneasy, for a while, glancing at the door, every now and then, hoping +she would repent and come to find him. But she did not. Then he began +to feel badly and fear that he was in the wrong. It was a hard struggle +with him to make new advances, now, but he nerved himself to it and +entered. She was still standing back there in the corner, sobbing, with +her face to the wall. Tom's heart smote him. He went to her and stood a +moment, not knowing exactly how to proceed. Then he said hesitatingly: + +"Becky, I--I don't care for anybody but you." + +No reply--but sobs. + +"Becky"--pleadingly. "Becky, won't you say something?" + +More sobs. + +Tom got out his chiefest jewel, a brass knob from the top of an +andiron, and passed it around her so that she could see it, and said: + +"Please, Becky, won't you take it?" + +She struck it to the floor. Then Tom marched out of the house and over +the hills and far away, to return to school no more that day. Presently +Becky began to suspect. She ran to the door; he was not in sight; she +flew around to the play-yard; he was not there. Then she called: + +"Tom! Come back, Tom!" + +She listened intently, but there was no answer. She had no companions +but silence and loneliness. So she sat down to cry again and upbraid +herself; and by this time the scholars began to gather again, and she +had to hide her griefs and still her broken heart and take up the cross +of a long, dreary, aching afternoon, with none among the strangers +about her to exchange sorrows with. + + + +CHAPTER VIII + +TOM dodged hither and thither through lanes until he was well out of +the track of returning scholars, and then fell into a moody jog. He +crossed a small "branch" two or three times, because of a prevailing +juvenile superstition that to cross water baffled pursuit. Half an hour +later he was disappearing behind the Douglas mansion on the summit of +Cardiff Hill, and the schoolhouse was hardly distinguishable away off +in the valley behind him. He entered a dense wood, picked his pathless +way to the centre of it, and sat down on a mossy spot under a spreading +oak. There was not even a zephyr stirring; the dead noonday heat had +even stilled the songs of the birds; nature lay in a trance that was +broken by no sound but the occasional far-off hammering of a +woodpecker, and this seemed to render the pervading silence and sense +of loneliness the more profound. The boy's soul was steeped in +melancholy; his feelings were in happy accord with his surroundings. He +sat long with his elbows on his knees and his chin in his hands, +meditating. It seemed to him that life was but a trouble, at best, and +he more than half envied Jimmy Hodges, so lately released; it must be +very peaceful, he thought, to lie and slumber and dream forever and +ever, with the wind whispering through the trees and caressing the +grass and the flowers over the grave, and nothing to bother and grieve +about, ever any more. If he only had a clean Sunday-school record he +could be willing to go, and be done with it all. Now as to this girl. +What had he done? Nothing. He had meant the best in the world, and been +treated like a dog--like a very dog. She would be sorry some day--maybe +when it was too late. Ah, if he could only die TEMPORARILY! + +But the elastic heart of youth cannot be compressed into one +constrained shape long at a time. Tom presently began to drift +insensibly back into the concerns of this life again. What if he turned +his back, now, and disappeared mysteriously? What if he went away--ever +so far away, into unknown countries beyond the seas--and never came +back any more! How would she feel then! The idea of being a clown +recurred to him now, only to fill him with disgust. For frivolity and +jokes and spotted tights were an offense, when they intruded themselves +upon a spirit that was exalted into the vague august realm of the +romantic. No, he would be a soldier, and return after long years, all +war-worn and illustrious. No--better still, he would join the Indians, +and hunt buffaloes and go on the warpath in the mountain ranges and the +trackless great plains of the Far West, and away in the future come +back a great chief, bristling with feathers, hideous with paint, and +prance into Sunday-school, some drowsy summer morning, with a +bloodcurdling war-whoop, and sear the eyeballs of all his companions +with unappeasable envy. But no, there was something gaudier even than +this. He would be a pirate! That was it! NOW his future lay plain +before him, and glowing with unimaginable splendor. How his name would +fill the world, and make people shudder! How gloriously he would go +plowing the dancing seas, in his long, low, black-hulled racer, the +Spirit of the Storm, with his grisly flag flying at the fore! And at +the zenith of his fame, how he would suddenly appear at the old village +and stalk into church, brown and weather-beaten, in his black velvet +doublet and trunks, his great jack-boots, his crimson sash, his belt +bristling with horse-pistols, his crime-rusted cutlass at his side, his +slouch hat with waving plumes, his black flag unfurled, with the skull +and crossbones on it, and hear with swelling ecstasy the whisperings, +"It's Tom Sawyer the Pirate!--the Black Avenger of the Spanish Main!" + +Yes, it was settled; his career was determined. He would run away from +home and enter upon it. He would start the very next morning. Therefore +he must now begin to get ready. He would collect his resources +together. He went to a rotten log near at hand and began to dig under +one end of it with his Barlow knife. He soon struck wood that sounded +hollow. He put his hand there and uttered this incantation impressively: + +"What hasn't come here, come! What's here, stay here!" + +Then he scraped away the dirt, and exposed a pine shingle. He took it +up and disclosed a shapely little treasure-house whose bottom and sides +were of shingles. In it lay a marble. Tom's astonishment was boundless! +He scratched his head with a perplexed air, and said: + +"Well, that beats anything!" + +Then he tossed the marble away pettishly, and stood cogitating. The +truth was, that a superstition of his had failed, here, which he and +all his comrades had always looked upon as infallible. If you buried a +marble with certain necessary incantations, and left it alone a +fortnight, and then opened the place with the incantation he had just +used, you would find that all the marbles you had ever lost had +gathered themselves together there, meantime, no matter how widely they +had been separated. But now, this thing had actually and unquestionably +failed. Tom's whole structure of faith was shaken to its foundations. +He had many a time heard of this thing succeeding but never of its +failing before. It did not occur to him that he had tried it several +times before, himself, but could never find the hiding-places +afterward. He puzzled over the matter some time, and finally decided +that some witch had interfered and broken the charm. He thought he +would satisfy himself on that point; so he searched around till he +found a small sandy spot with a little funnel-shaped depression in it. +He laid himself down and put his mouth close to this depression and +called-- + +"Doodle-bug, doodle-bug, tell me what I want to know! Doodle-bug, +doodle-bug, tell me what I want to know!" + +The sand began to work, and presently a small black bug appeared for a +second and then darted under again in a fright. + +"He dasn't tell! So it WAS a witch that done it. I just knowed it." + +He well knew the futility of trying to contend against witches, so he +gave up discouraged. But it occurred to him that he might as well have +the marble he had just thrown away, and therefore he went and made a +patient search for it. But he could not find it. Now he went back to +his treasure-house and carefully placed himself just as he had been +standing when he tossed the marble away; then he took another marble +from his pocket and tossed it in the same way, saying: + +"Brother, go find your brother!" + +He watched where it stopped, and went there and looked. But it must +have fallen short or gone too far; so he tried twice more. The last +repetition was successful. The two marbles lay within a foot of each +other. + +Just here the blast of a toy tin trumpet came faintly down the green +aisles of the forest. Tom flung off his jacket and trousers, turned a +suspender into a belt, raked away some brush behind the rotten log, +disclosing a rude bow and arrow, a lath sword and a tin trumpet, and in +a moment had seized these things and bounded away, barelegged, with +fluttering shirt. He presently halted under a great elm, blew an +answering blast, and then began to tiptoe and look warily out, this way +and that. He said cautiously--to an imaginary company: + +"Hold, my merry men! Keep hid till I blow." + +Now appeared Joe Harper, as airily clad and elaborately armed as Tom. +Tom called: + +"Hold! Who comes here into Sherwood Forest without my pass?" + +"Guy of Guisborne wants no man's pass. Who art thou that--that--" + +"Dares to hold such language," said Tom, prompting--for they talked +"by the book," from memory. + +"Who art thou that dares to hold such language?" + +"I, indeed! I am Robin Hood, as thy caitiff carcase soon shall know." + +"Then art thou indeed that famous outlaw? Right gladly will I dispute +with thee the passes of the merry wood. Have at thee!" + +They took their lath swords, dumped their other traps on the ground, +struck a fencing attitude, foot to foot, and began a grave, careful +combat, "two up and two down." Presently Tom said: + +"Now, if you've got the hang, go it lively!" + +So they "went it lively," panting and perspiring with the work. By and +by Tom shouted: + +"Fall! fall! Why don't you fall?" + +"I sha'n't! Why don't you fall yourself? You're getting the worst of +it." + +"Why, that ain't anything. I can't fall; that ain't the way it is in +the book. The book says, 'Then with one back-handed stroke he slew poor +Guy of Guisborne.' You're to turn around and let me hit you in the +back." + +There was no getting around the authorities, so Joe turned, received +the whack and fell. + +"Now," said Joe, getting up, "you got to let me kill YOU. That's fair." + +"Why, I can't do that, it ain't in the book." + +"Well, it's blamed mean--that's all." + +"Well, say, Joe, you can be Friar Tuck or Much the miller's son, and +lam me with a quarter-staff; or I'll be the Sheriff of Nottingham and +you be Robin Hood a little while and kill me." + +This was satisfactory, and so these adventures were carried out. Then +Tom became Robin Hood again, and was allowed by the treacherous nun to +bleed his strength away through his neglected wound. And at last Joe, +representing a whole tribe of weeping outlaws, dragged him sadly forth, +gave his bow into his feeble hands, and Tom said, "Where this arrow +falls, there bury poor Robin Hood under the greenwood tree." Then he +shot the arrow and fell back and would have died, but he lit on a +nettle and sprang up too gaily for a corpse. + +The boys dressed themselves, hid their accoutrements, and went off +grieving that there were no outlaws any more, and wondering what modern +civilization could claim to have done to compensate for their loss. +They said they would rather be outlaws a year in Sherwood Forest than +President of the United States forever. + + + +CHAPTER IX + +AT half-past nine, that night, Tom and Sid were sent to bed, as usual. +They said their prayers, and Sid was soon asleep. Tom lay awake and +waited, in restless impatience. When it seemed to him that it must be +nearly daylight, he heard the clock strike ten! This was despair. He +would have tossed and fidgeted, as his nerves demanded, but he was +afraid he might wake Sid. So he lay still, and stared up into the dark. +Everything was dismally still. By and by, out of the stillness, little, +scarcely perceptible noises began to emphasize themselves. The ticking +of the clock began to bring itself into notice. Old beams began to +crack mysteriously. The stairs creaked faintly. Evidently spirits were +abroad. A measured, muffled snore issued from Aunt Polly's chamber. And +now the tiresome chirping of a cricket that no human ingenuity could +locate, began. Next the ghastly ticking of a deathwatch in the wall at +the bed's head made Tom shudder--it meant that somebody's days were +numbered. Then the howl of a far-off dog rose on the night air, and was +answered by a fainter howl from a remoter distance. Tom was in an +agony. At last he was satisfied that time had ceased and eternity +begun; he began to doze, in spite of himself; the clock chimed eleven, +but he did not hear it. And then there came, mingling with his +half-formed dreams, a most melancholy caterwauling. The raising of a +neighboring window disturbed him. A cry of "Scat! you devil!" and the +crash of an empty bottle against the back of his aunt's woodshed +brought him wide awake, and a single minute later he was dressed and +out of the window and creeping along the roof of the "ell" on all +fours. He "meow'd" with caution once or twice, as he went; then jumped +to the roof of the woodshed and thence to the ground. Huckleberry Finn +was there, with his dead cat. The boys moved off and disappeared in the +gloom. At the end of half an hour they were wading through the tall +grass of the graveyard. + +It was a graveyard of the old-fashioned Western kind. It was on a +hill, about a mile and a half from the village. It had a crazy board +fence around it, which leaned inward in places, and outward the rest of +the time, but stood upright nowhere. Grass and weeds grew rank over the +whole cemetery. All the old graves were sunken in, there was not a +tombstone on the place; round-topped, worm-eaten boards staggered over +the graves, leaning for support and finding none. "Sacred to the memory +of" So-and-So had been painted on them once, but it could no longer +have been read, on the most of them, now, even if there had been light. + +A faint wind moaned through the trees, and Tom feared it might be the +spirits of the dead, complaining at being disturbed. The boys talked +little, and only under their breath, for the time and the place and the +pervading solemnity and silence oppressed their spirits. They found the +sharp new heap they were seeking, and ensconced themselves within the +protection of three great elms that grew in a bunch within a few feet +of the grave. + +Then they waited in silence for what seemed a long time. The hooting +of a distant owl was all the sound that troubled the dead stillness. +Tom's reflections grew oppressive. He must force some talk. So he said +in a whisper: + +"Hucky, do you believe the dead people like it for us to be here?" + +Huckleberry whispered: + +"I wisht I knowed. It's awful solemn like, AIN'T it?" + +"I bet it is." + +There was a considerable pause, while the boys canvassed this matter +inwardly. Then Tom whispered: + +"Say, Hucky--do you reckon Hoss Williams hears us talking?" + +"O' course he does. Least his sperrit does." + +Tom, after a pause: + +"I wish I'd said Mister Williams. But I never meant any harm. +Everybody calls him Hoss." + +"A body can't be too partic'lar how they talk 'bout these-yer dead +people, Tom." + +This was a damper, and conversation died again. + +Presently Tom seized his comrade's arm and said: + +"Sh!" + +"What is it, Tom?" And the two clung together with beating hearts. + +"Sh! There 'tis again! Didn't you hear it?" + +"I--" + +"There! Now you hear it." + +"Lord, Tom, they're coming! They're coming, sure. What'll we do?" + +"I dono. Think they'll see us?" + +"Oh, Tom, they can see in the dark, same as cats. I wisht I hadn't +come." + +"Oh, don't be afeard. I don't believe they'll bother us. We ain't +doing any harm. If we keep perfectly still, maybe they won't notice us +at all." + +"I'll try to, Tom, but, Lord, I'm all of a shiver." + +"Listen!" + +The boys bent their heads together and scarcely breathed. A muffled +sound of voices floated up from the far end of the graveyard. + +"Look! See there!" whispered Tom. "What is it?" + +"It's devil-fire. Oh, Tom, this is awful." + +Some vague figures approached through the gloom, swinging an +old-fashioned tin lantern that freckled the ground with innumerable +little spangles of light. Presently Huckleberry whispered with a +shudder: + +"It's the devils sure enough. Three of 'em! Lordy, Tom, we're goners! +Can you pray?" + +"I'll try, but don't you be afeard. They ain't going to hurt us. 'Now +I lay me down to sleep, I--'" + +"Sh!" + +"What is it, Huck?" + +"They're HUMANS! One of 'em is, anyway. One of 'em's old Muff Potter's +voice." + +"No--'tain't so, is it?" + +"I bet I know it. Don't you stir nor budge. He ain't sharp enough to +notice us. Drunk, the same as usual, likely--blamed old rip!" + +"All right, I'll keep still. Now they're stuck. Can't find it. Here +they come again. Now they're hot. Cold again. Hot again. Red hot! +They're p'inted right, this time. Say, Huck, I know another o' them +voices; it's Injun Joe." + +"That's so--that murderin' half-breed! I'd druther they was devils a +dern sight. What kin they be up to?" + +The whisper died wholly out, now, for the three men had reached the +grave and stood within a few feet of the boys' hiding-place. + +"Here it is," said the third voice; and the owner of it held the +lantern up and revealed the face of young Doctor Robinson. + +Potter and Injun Joe were carrying a handbarrow with a rope and a +couple of shovels on it. They cast down their load and began to open +the grave. The doctor put the lantern at the head of the grave and came +and sat down with his back against one of the elm trees. He was so +close the boys could have touched him. + +"Hurry, men!" he said, in a low voice; "the moon might come out at any +moment." + +They growled a response and went on digging. For some time there was +no noise but the grating sound of the spades discharging their freight +of mould and gravel. It was very monotonous. Finally a spade struck +upon the coffin with a dull woody accent, and within another minute or +two the men had hoisted it out on the ground. They pried off the lid +with their shovels, got out the body and dumped it rudely on the +ground. The moon drifted from behind the clouds and exposed the pallid +face. The barrow was got ready and the corpse placed on it, covered +with a blanket, and bound to its place with the rope. Potter took out a +large spring-knife and cut off the dangling end of the rope and then +said: + +"Now the cussed thing's ready, Sawbones, and you'll just out with +another five, or here she stays." + +"That's the talk!" said Injun Joe. + +"Look here, what does this mean?" said the doctor. "You required your +pay in advance, and I've paid you." + +"Yes, and you done more than that," said Injun Joe, approaching the +doctor, who was now standing. "Five years ago you drove me away from +your father's kitchen one night, when I come to ask for something to +eat, and you said I warn't there for any good; and when I swore I'd get +even with you if it took a hundred years, your father had me jailed for +a vagrant. Did you think I'd forget? The Injun blood ain't in me for +nothing. And now I've GOT you, and you got to SETTLE, you know!" + +He was threatening the doctor, with his fist in his face, by this +time. The doctor struck out suddenly and stretched the ruffian on the +ground. Potter dropped his knife, and exclaimed: + +"Here, now, don't you hit my pard!" and the next moment he had +grappled with the doctor and the two were struggling with might and +main, trampling the grass and tearing the ground with their heels. +Injun Joe sprang to his feet, his eyes flaming with passion, snatched +up Potter's knife, and went creeping, catlike and stooping, round and +round about the combatants, seeking an opportunity. All at once the +doctor flung himself free, seized the heavy headboard of Williams' +grave and felled Potter to the earth with it--and in the same instant +the half-breed saw his chance and drove the knife to the hilt in the +young man's breast. He reeled and fell partly upon Potter, flooding him +with his blood, and in the same moment the clouds blotted out the +dreadful spectacle and the two frightened boys went speeding away in +the dark. + +Presently, when the moon emerged again, Injun Joe was standing over +the two forms, contemplating them. The doctor murmured inarticulately, +gave a long gasp or two and was still. The half-breed muttered: + +"THAT score is settled--damn you." + +Then he robbed the body. After which he put the fatal knife in +Potter's open right hand, and sat down on the dismantled coffin. Three +--four--five minutes passed, and then Potter began to stir and moan. His +hand closed upon the knife; he raised it, glanced at it, and let it +fall, with a shudder. Then he sat up, pushing the body from him, and +gazed at it, and then around him, confusedly. His eyes met Joe's. + +"Lord, how is this, Joe?" he said. + +"It's a dirty business," said Joe, without moving. + +"What did you do it for?" + +"I! I never done it!" + +"Look here! That kind of talk won't wash." + +Potter trembled and grew white. + +"I thought I'd got sober. I'd no business to drink to-night. But it's +in my head yet--worse'n when we started here. I'm all in a muddle; +can't recollect anything of it, hardly. Tell me, Joe--HONEST, now, old +feller--did I do it? Joe, I never meant to--'pon my soul and honor, I +never meant to, Joe. Tell me how it was, Joe. Oh, it's awful--and him +so young and promising." + +"Why, you two was scuffling, and he fetched you one with the headboard +and you fell flat; and then up you come, all reeling and staggering +like, and snatched the knife and jammed it into him, just as he fetched +you another awful clip--and here you've laid, as dead as a wedge til +now." + +"Oh, I didn't know what I was a-doing. I wish I may die this minute if +I did. It was all on account of the whiskey and the excitement, I +reckon. I never used a weepon in my life before, Joe. I've fought, but +never with weepons. They'll all say that. Joe, don't tell! Say you +won't tell, Joe--that's a good feller. I always liked you, Joe, and +stood up for you, too. Don't you remember? You WON'T tell, WILL you, +Joe?" And the poor creature dropped on his knees before the stolid +murderer, and clasped his appealing hands. + +"No, you've always been fair and square with me, Muff Potter, and I +won't go back on you. There, now, that's as fair as a man can say." + +"Oh, Joe, you're an angel. I'll bless you for this the longest day I +live." And Potter began to cry. + +"Come, now, that's enough of that. This ain't any time for blubbering. +You be off yonder way and I'll go this. Move, now, and don't leave any +tracks behind you." + +Potter started on a trot that quickly increased to a run. The +half-breed stood looking after him. He muttered: + +"If he's as much stunned with the lick and fuddled with the rum as he +had the look of being, he won't think of the knife till he's gone so +far he'll be afraid to come back after it to such a place by himself +--chicken-heart!" + +Two or three minutes later the murdered man, the blanketed corpse, the +lidless coffin, and the open grave were under no inspection but the +moon's. The stillness was complete again, too. + + + +CHAPTER X + +THE two boys flew on and on, toward the village, speechless with +horror. They glanced backward over their shoulders from time to time, +apprehensively, as if they feared they might be followed. Every stump +that started up in their path seemed a man and an enemy, and made them +catch their breath; and as they sped by some outlying cottages that lay +near the village, the barking of the aroused watch-dogs seemed to give +wings to their feet. + +"If we can only get to the old tannery before we break down!" +whispered Tom, in short catches between breaths. "I can't stand it much +longer." + +Huckleberry's hard pantings were his only reply, and the boys fixed +their eyes on the goal of their hopes and bent to their work to win it. +They gained steadily on it, and at last, breast to breast, they burst +through the open door and fell grateful and exhausted in the sheltering +shadows beyond. By and by their pulses slowed down, and Tom whispered: + +"Huckleberry, what do you reckon'll come of this?" + +"If Doctor Robinson dies, I reckon hanging'll come of it." + +"Do you though?" + +"Why, I KNOW it, Tom." + +Tom thought a while, then he said: + +"Who'll tell? We?" + +"What are you talking about? S'pose something happened and Injun Joe +DIDN'T hang? Why, he'd kill us some time or other, just as dead sure as +we're a laying here." + +"That's just what I was thinking to myself, Huck." + +"If anybody tells, let Muff Potter do it, if he's fool enough. He's +generally drunk enough." + +Tom said nothing--went on thinking. Presently he whispered: + +"Huck, Muff Potter don't know it. How can he tell?" + +"What's the reason he don't know it?" + +"Because he'd just got that whack when Injun Joe done it. D'you reckon +he could see anything? D'you reckon he knowed anything?" + +"By hokey, that's so, Tom!" + +"And besides, look-a-here--maybe that whack done for HIM!" + +"No, 'taint likely, Tom. He had liquor in him; I could see that; and +besides, he always has. Well, when pap's full, you might take and belt +him over the head with a church and you couldn't phase him. He says so, +his own self. So it's the same with Muff Potter, of course. But if a +man was dead sober, I reckon maybe that whack might fetch him; I dono." + +After another reflective silence, Tom said: + +"Hucky, you sure you can keep mum?" + +"Tom, we GOT to keep mum. You know that. That Injun devil wouldn't +make any more of drownding us than a couple of cats, if we was to +squeak 'bout this and they didn't hang him. Now, look-a-here, Tom, less +take and swear to one another--that's what we got to do--swear to keep +mum." + +"I'm agreed. It's the best thing. Would you just hold hands and swear +that we--" + +"Oh no, that wouldn't do for this. That's good enough for little +rubbishy common things--specially with gals, cuz THEY go back on you +anyway, and blab if they get in a huff--but there orter be writing +'bout a big thing like this. And blood." + +Tom's whole being applauded this idea. It was deep, and dark, and +awful; the hour, the circumstances, the surroundings, were in keeping +with it. He picked up a clean pine shingle that lay in the moonlight, +took a little fragment of "red keel" out of his pocket, got the moon on +his work, and painfully scrawled these lines, emphasizing each slow +down-stroke by clamping his tongue between his teeth, and letting up +the pressure on the up-strokes. [See next page.] + + "Huck Finn and + Tom Sawyer swears + they will keep mum + about This and They + wish They may Drop + down dead in Their + Tracks if They ever + Tell and Rot." + +Huckleberry was filled with admiration of Tom's facility in writing, +and the sublimity of his language. He at once took a pin from his lapel +and was going to prick his flesh, but Tom said: + +"Hold on! Don't do that. A pin's brass. It might have verdigrease on +it." + +"What's verdigrease?" + +"It's p'ison. That's what it is. You just swaller some of it once +--you'll see." + +So Tom unwound the thread from one of his needles, and each boy +pricked the ball of his thumb and squeezed out a drop of blood. In +time, after many squeezes, Tom managed to sign his initials, using the +ball of his little finger for a pen. Then he showed Huckleberry how to +make an H and an F, and the oath was complete. They buried the shingle +close to the wall, with some dismal ceremonies and incantations, and +the fetters that bound their tongues were considered to be locked and +the key thrown away. + +A figure crept stealthily through a break in the other end of the +ruined building, now, but they did not notice it. + +"Tom," whispered Huckleberry, "does this keep us from EVER telling +--ALWAYS?" + +"Of course it does. It don't make any difference WHAT happens, we got +to keep mum. We'd drop down dead--don't YOU know that?" + +"Yes, I reckon that's so." + +They continued to whisper for some little time. Presently a dog set up +a long, lugubrious howl just outside--within ten feet of them. The boys +clasped each other suddenly, in an agony of fright. + +"Which of us does he mean?" gasped Huckleberry. + +"I dono--peep through the crack. Quick!" + +"No, YOU, Tom!" + +"I can't--I can't DO it, Huck!" + +"Please, Tom. There 'tis again!" + +"Oh, lordy, I'm thankful!" whispered Tom. "I know his voice. It's Bull +Harbison." * + +[* If Mr. Harbison owned a slave named Bull, Tom would have spoken of +him as "Harbison's Bull," but a son or a dog of that name was "Bull +Harbison."] + +"Oh, that's good--I tell you, Tom, I was most scared to death; I'd a +bet anything it was a STRAY dog." + +The dog howled again. The boys' hearts sank once more. + +"Oh, my! that ain't no Bull Harbison!" whispered Huckleberry. "DO, Tom!" + +Tom, quaking with fear, yielded, and put his eye to the crack. His +whisper was hardly audible when he said: + +"Oh, Huck, IT S A STRAY DOG!" + +"Quick, Tom, quick! Who does he mean?" + +"Huck, he must mean us both--we're right together." + +"Oh, Tom, I reckon we're goners. I reckon there ain't no mistake 'bout +where I'LL go to. I been so wicked." + +"Dad fetch it! This comes of playing hookey and doing everything a +feller's told NOT to do. I might a been good, like Sid, if I'd a tried +--but no, I wouldn't, of course. But if ever I get off this time, I lay +I'll just WALLER in Sunday-schools!" And Tom began to snuffle a little. + +"YOU bad!" and Huckleberry began to snuffle too. "Consound it, Tom +Sawyer, you're just old pie, 'longside o' what I am. Oh, LORDY, lordy, +lordy, I wisht I only had half your chance." + +Tom choked off and whispered: + +"Look, Hucky, look! He's got his BACK to us!" + +Hucky looked, with joy in his heart. + +"Well, he has, by jingoes! Did he before?" + +"Yes, he did. But I, like a fool, never thought. Oh, this is bully, +you know. NOW who can he mean?" + +The howling stopped. Tom pricked up his ears. + +"Sh! What's that?" he whispered. + +"Sounds like--like hogs grunting. No--it's somebody snoring, Tom." + +"That IS it! Where 'bouts is it, Huck?" + +"I bleeve it's down at 'tother end. Sounds so, anyway. Pap used to +sleep there, sometimes, 'long with the hogs, but laws bless you, he +just lifts things when HE snores. Besides, I reckon he ain't ever +coming back to this town any more." + +The spirit of adventure rose in the boys' souls once more. + +"Hucky, do you das't to go if I lead?" + +"I don't like to, much. Tom, s'pose it's Injun Joe!" + +Tom quailed. But presently the temptation rose up strong again and the +boys agreed to try, with the understanding that they would take to +their heels if the snoring stopped. So they went tiptoeing stealthily +down, the one behind the other. When they had got to within five steps +of the snorer, Tom stepped on a stick, and it broke with a sharp snap. +The man moaned, writhed a little, and his face came into the moonlight. +It was Muff Potter. The boys' hearts had stood still, and their hopes +too, when the man moved, but their fears passed away now. They tiptoed +out, through the broken weather-boarding, and stopped at a little +distance to exchange a parting word. That long, lugubrious howl rose on +the night air again! They turned and saw the strange dog standing +within a few feet of where Potter was lying, and FACING Potter, with +his nose pointing heavenward. + +"Oh, geeminy, it's HIM!" exclaimed both boys, in a breath. + +"Say, Tom--they say a stray dog come howling around Johnny Miller's +house, 'bout midnight, as much as two weeks ago; and a whippoorwill +come in and lit on the banisters and sung, the very same evening; and +there ain't anybody dead there yet." + +"Well, I know that. And suppose there ain't. Didn't Gracie Miller fall +in the kitchen fire and burn herself terrible the very next Saturday?" + +"Yes, but she ain't DEAD. And what's more, she's getting better, too." + +"All right, you wait and see. She's a goner, just as dead sure as Muff +Potter's a goner. That's what the niggers say, and they know all about +these kind of things, Huck." + +Then they separated, cogitating. When Tom crept in at his bedroom +window the night was almost spent. He undressed with excessive caution, +and fell asleep congratulating himself that nobody knew of his +escapade. He was not aware that the gently-snoring Sid was awake, and +had been so for an hour. + +When Tom awoke, Sid was dressed and gone. There was a late look in the +light, a late sense in the atmosphere. He was startled. Why had he not +been called--persecuted till he was up, as usual? The thought filled +him with bodings. Within five minutes he was dressed and down-stairs, +feeling sore and drowsy. The family were still at table, but they had +finished breakfast. There was no voice of rebuke; but there were +averted eyes; there was a silence and an air of solemnity that struck a +chill to the culprit's heart. He sat down and tried to seem gay, but it +was up-hill work; it roused no smile, no response, and he lapsed into +silence and let his heart sink down to the depths. + +After breakfast his aunt took him aside, and Tom almost brightened in +the hope that he was going to be flogged; but it was not so. His aunt +wept over him and asked him how he could go and break her old heart so; +and finally told him to go on, and ruin himself and bring her gray +hairs with sorrow to the grave, for it was no use for her to try any +more. This was worse than a thousand whippings, and Tom's heart was +sorer now than his body. He cried, he pleaded for forgiveness, promised +to reform over and over again, and then received his dismissal, feeling +that he had won but an imperfect forgiveness and established but a +feeble confidence. + +He left the presence too miserable to even feel revengeful toward Sid; +and so the latter's prompt retreat through the back gate was +unnecessary. He moped to school gloomy and sad, and took his flogging, +along with Joe Harper, for playing hookey the day before, with the air +of one whose heart was busy with heavier woes and wholly dead to +trifles. Then he betook himself to his seat, rested his elbows on his +desk and his jaws in his hands, and stared at the wall with the stony +stare of suffering that has reached the limit and can no further go. +His elbow was pressing against some hard substance. After a long time +he slowly and sadly changed his position, and took up this object with +a sigh. It was in a paper. He unrolled it. A long, lingering, colossal +sigh followed, and his heart broke. It was his brass andiron knob! + +This final feather broke the camel's back. + + + +CHAPTER XI + +CLOSE upon the hour of noon the whole village was suddenly electrified +with the ghastly news. No need of the as yet undreamed-of telegraph; +the tale flew from man to man, from group to group, from house to +house, with little less than telegraphic speed. Of course the +schoolmaster gave holiday for that afternoon; the town would have +thought strangely of him if he had not. + +A gory knife had been found close to the murdered man, and it had been +recognized by somebody as belonging to Muff Potter--so the story ran. +And it was said that a belated citizen had come upon Potter washing +himself in the "branch" about one or two o'clock in the morning, and +that Potter had at once sneaked off--suspicious circumstances, +especially the washing which was not a habit with Potter. It was also +said that the town had been ransacked for this "murderer" (the public +are not slow in the matter of sifting evidence and arriving at a +verdict), but that he could not be found. Horsemen had departed down +all the roads in every direction, and the Sheriff "was confident" that +he would be captured before night. + +All the town was drifting toward the graveyard. Tom's heartbreak +vanished and he joined the procession, not because he would not a +thousand times rather go anywhere else, but because an awful, +unaccountable fascination drew him on. Arrived at the dreadful place, +he wormed his small body through the crowd and saw the dismal +spectacle. It seemed to him an age since he was there before. Somebody +pinched his arm. He turned, and his eyes met Huckleberry's. Then both +looked elsewhere at once, and wondered if anybody had noticed anything +in their mutual glance. But everybody was talking, and intent upon the +grisly spectacle before them. + +"Poor fellow!" "Poor young fellow!" "This ought to be a lesson to +grave robbers!" "Muff Potter'll hang for this if they catch him!" This +was the drift of remark; and the minister said, "It was a judgment; His +hand is here." + +Now Tom shivered from head to heel; for his eye fell upon the stolid +face of Injun Joe. At this moment the crowd began to sway and struggle, +and voices shouted, "It's him! it's him! he's coming himself!" + +"Who? Who?" from twenty voices. + +"Muff Potter!" + +"Hallo, he's stopped!--Look out, he's turning! Don't let him get away!" + +People in the branches of the trees over Tom's head said he wasn't +trying to get away--he only looked doubtful and perplexed. + +"Infernal impudence!" said a bystander; "wanted to come and take a +quiet look at his work, I reckon--didn't expect any company." + +The crowd fell apart, now, and the Sheriff came through, +ostentatiously leading Potter by the arm. The poor fellow's face was +haggard, and his eyes showed the fear that was upon him. When he stood +before the murdered man, he shook as with a palsy, and he put his face +in his hands and burst into tears. + +"I didn't do it, friends," he sobbed; "'pon my word and honor I never +done it." + +"Who's accused you?" shouted a voice. + +This shot seemed to carry home. Potter lifted his face and looked +around him with a pathetic hopelessness in his eyes. He saw Injun Joe, +and exclaimed: + +"Oh, Injun Joe, you promised me you'd never--" + +"Is that your knife?" and it was thrust before him by the Sheriff. + +Potter would have fallen if they had not caught him and eased him to +the ground. Then he said: + +"Something told me 't if I didn't come back and get--" He shuddered; +then waved his nerveless hand with a vanquished gesture and said, "Tell +'em, Joe, tell 'em--it ain't any use any more." + +Then Huckleberry and Tom stood dumb and staring, and heard the +stony-hearted liar reel off his serene statement, they expecting every +moment that the clear sky would deliver God's lightnings upon his head, +and wondering to see how long the stroke was delayed. And when he had +finished and still stood alive and whole, their wavering impulse to +break their oath and save the poor betrayed prisoner's life faded and +vanished away, for plainly this miscreant had sold himself to Satan and +it would be fatal to meddle with the property of such a power as that. + +"Why didn't you leave? What did you want to come here for?" somebody +said. + +"I couldn't help it--I couldn't help it," Potter moaned. "I wanted to +run away, but I couldn't seem to come anywhere but here." And he fell +to sobbing again. + +Injun Joe repeated his statement, just as calmly, a few minutes +afterward on the inquest, under oath; and the boys, seeing that the +lightnings were still withheld, were confirmed in their belief that Joe +had sold himself to the devil. He was now become, to them, the most +balefully interesting object they had ever looked upon, and they could +not take their fascinated eyes from his face. + +They inwardly resolved to watch him nights, when opportunity should +offer, in the hope of getting a glimpse of his dread master. + +Injun Joe helped to raise the body of the murdered man and put it in a +wagon for removal; and it was whispered through the shuddering crowd +that the wound bled a little! The boys thought that this happy +circumstance would turn suspicion in the right direction; but they were +disappointed, for more than one villager remarked: + +"It was within three feet of Muff Potter when it done it." + +Tom's fearful secret and gnawing conscience disturbed his sleep for as +much as a week after this; and at breakfast one morning Sid said: + +"Tom, you pitch around and talk in your sleep so much that you keep me +awake half the time." + +Tom blanched and dropped his eyes. + +"It's a bad sign," said Aunt Polly, gravely. "What you got on your +mind, Tom?" + +"Nothing. Nothing 't I know of." But the boy's hand shook so that he +spilled his coffee. + +"And you do talk such stuff," Sid said. "Last night you said, 'It's +blood, it's blood, that's what it is!' You said that over and over. And +you said, 'Don't torment me so--I'll tell!' Tell WHAT? What is it +you'll tell?" + +Everything was swimming before Tom. There is no telling what might +have happened, now, but luckily the concern passed out of Aunt Polly's +face and she came to Tom's relief without knowing it. She said: + +"Sho! It's that dreadful murder. I dream about it most every night +myself. Sometimes I dream it's me that done it." + +Mary said she had been affected much the same way. Sid seemed +satisfied. Tom got out of the presence as quick as he plausibly could, +and after that he complained of toothache for a week, and tied up his +jaws every night. He never knew that Sid lay nightly watching, and +frequently slipped the bandage free and then leaned on his elbow +listening a good while at a time, and afterward slipped the bandage +back to its place again. Tom's distress of mind wore off gradually and +the toothache grew irksome and was discarded. If Sid really managed to +make anything out of Tom's disjointed mutterings, he kept it to himself. + +It seemed to Tom that his schoolmates never would get done holding +inquests on dead cats, and thus keeping his trouble present to his +mind. Sid noticed that Tom never was coroner at one of these inquiries, +though it had been his habit to take the lead in all new enterprises; +he noticed, too, that Tom never acted as a witness--and that was +strange; and Sid did not overlook the fact that Tom even showed a +marked aversion to these inquests, and always avoided them when he +could. Sid marvelled, but said nothing. However, even inquests went out +of vogue at last, and ceased to torture Tom's conscience. + +Every day or two, during this time of sorrow, Tom watched his +opportunity and went to the little grated jail-window and smuggled such +small comforts through to the "murderer" as he could get hold of. The +jail was a trifling little brick den that stood in a marsh at the edge +of the village, and no guards were afforded for it; indeed, it was +seldom occupied. These offerings greatly helped to ease Tom's +conscience. + +The villagers had a strong desire to tar-and-feather Injun Joe and +ride him on a rail, for body-snatching, but so formidable was his +character that nobody could be found who was willing to take the lead +in the matter, so it was dropped. He had been careful to begin both of +his inquest-statements with the fight, without confessing the +grave-robbery that preceded it; therefore it was deemed wisest not +to try the case in the courts at present. + + + +CHAPTER XII + +ONE of the reasons why Tom's mind had drifted away from its secret +troubles was, that it had found a new and weighty matter to interest +itself about. Becky Thatcher had stopped coming to school. Tom had +struggled with his pride a few days, and tried to "whistle her down the +wind," but failed. He began to find himself hanging around her father's +house, nights, and feeling very miserable. She was ill. What if she +should die! There was distraction in the thought. He no longer took an +interest in war, nor even in piracy. The charm of life was gone; there +was nothing but dreariness left. He put his hoop away, and his bat; +there was no joy in them any more. His aunt was concerned. She began to +try all manner of remedies on him. She was one of those people who are +infatuated with patent medicines and all new-fangled methods of +producing health or mending it. She was an inveterate experimenter in +these things. When something fresh in this line came out she was in a +fever, right away, to try it; not on herself, for she was never ailing, +but on anybody else that came handy. She was a subscriber for all the +"Health" periodicals and phrenological frauds; and the solemn ignorance +they were inflated with was breath to her nostrils. All the "rot" they +contained about ventilation, and how to go to bed, and how to get up, +and what to eat, and what to drink, and how much exercise to take, and +what frame of mind to keep one's self in, and what sort of clothing to +wear, was all gospel to her, and she never observed that her +health-journals of the current month customarily upset everything they +had recommended the month before. She was as simple-hearted and honest +as the day was long, and so she was an easy victim. She gathered +together her quack periodicals and her quack medicines, and thus armed +with death, went about on her pale horse, metaphorically speaking, with +"hell following after." But she never suspected that she was not an +angel of healing and the balm of Gilead in disguise, to the suffering +neighbors. + +The water treatment was new, now, and Tom's low condition was a +windfall to her. She had him out at daylight every morning, stood him +up in the woodshed and drowned him with a deluge of cold water; then +she scrubbed him down with a towel like a file, and so brought him to; +then she rolled him up in a wet sheet and put him away under blankets +till she sweated his soul clean and "the yellow stains of it came +through his pores"--as Tom said. + +Yet notwithstanding all this, the boy grew more and more melancholy +and pale and dejected. She added hot baths, sitz baths, shower baths, +and plunges. The boy remained as dismal as a hearse. She began to +assist the water with a slim oatmeal diet and blister-plasters. She +calculated his capacity as she would a jug's, and filled him up every +day with quack cure-alls. + +Tom had become indifferent to persecution by this time. This phase +filled the old lady's heart with consternation. This indifference must +be broken up at any cost. Now she heard of Pain-killer for the first +time. She ordered a lot at once. She tasted it and was filled with +gratitude. It was simply fire in a liquid form. She dropped the water +treatment and everything else, and pinned her faith to Pain-killer. She +gave Tom a teaspoonful and watched with the deepest anxiety for the +result. Her troubles were instantly at rest, her soul at peace again; +for the "indifference" was broken up. The boy could not have shown a +wilder, heartier interest, if she had built a fire under him. + +Tom felt that it was time to wake up; this sort of life might be +romantic enough, in his blighted condition, but it was getting to have +too little sentiment and too much distracting variety about it. So he +thought over various plans for relief, and finally hit pon that of +professing to be fond of Pain-killer. He asked for it so often that he +became a nuisance, and his aunt ended by telling him to help himself +and quit bothering her. If it had been Sid, she would have had no +misgivings to alloy her delight; but since it was Tom, she watched the +bottle clandestinely. She found that the medicine did really diminish, +but it did not occur to her that the boy was mending the health of a +crack in the sitting-room floor with it. + +One day Tom was in the act of dosing the crack when his aunt's yellow +cat came along, purring, eying the teaspoon avariciously, and begging +for a taste. Tom said: + +"Don't ask for it unless you want it, Peter." + +But Peter signified that he did want it. + +"You better make sure." + +Peter was sure. + +"Now you've asked for it, and I'll give it to you, because there ain't +anything mean about me; but if you find you don't like it, you mustn't +blame anybody but your own self." + +Peter was agreeable. So Tom pried his mouth open and poured down the +Pain-killer. Peter sprang a couple of yards in the air, and then +delivered a war-whoop and set off round and round the room, banging +against furniture, upsetting flower-pots, and making general havoc. +Next he rose on his hind feet and pranced around, in a frenzy of +enjoyment, with his head over his shoulder and his voice proclaiming +his unappeasable happiness. Then he went tearing around the house again +spreading chaos and destruction in his path. Aunt Polly entered in time +to see him throw a few double summersets, deliver a final mighty +hurrah, and sail through the open window, carrying the rest of the +flower-pots with him. The old lady stood petrified with astonishment, +peering over her glasses; Tom lay on the floor expiring with laughter. + +"Tom, what on earth ails that cat?" + +"I don't know, aunt," gasped the boy. + +"Why, I never see anything like it. What did make him act so?" + +"Deed I don't know, Aunt Polly; cats always act so when they're having +a good time." + +"They do, do they?" There was something in the tone that made Tom +apprehensive. + +"Yes'm. That is, I believe they do." + +"You DO?" + +"Yes'm." + +The old lady was bending down, Tom watching, with interest emphasized +by anxiety. Too late he divined her "drift." The handle of the telltale +teaspoon was visible under the bed-valance. Aunt Polly took it, held it +up. Tom winced, and dropped his eyes. Aunt Polly raised him by the +usual handle--his ear--and cracked his head soundly with her thimble. + +"Now, sir, what did you want to treat that poor dumb beast so, for?" + +"I done it out of pity for him--because he hadn't any aunt." + +"Hadn't any aunt!--you numskull. What has that got to do with it?" + +"Heaps. Because if he'd had one she'd a burnt him out herself! She'd a +roasted his bowels out of him 'thout any more feeling than if he was a +human!" + +Aunt Polly felt a sudden pang of remorse. This was putting the thing +in a new light; what was cruelty to a cat MIGHT be cruelty to a boy, +too. She began to soften; she felt sorry. Her eyes watered a little, +and she put her hand on Tom's head and said gently: + +"I was meaning for the best, Tom. And, Tom, it DID do you good." + +Tom looked up in her face with just a perceptible twinkle peeping +through his gravity. + +"I know you was meaning for the best, aunty, and so was I with Peter. +It done HIM good, too. I never see him get around so since--" + +"Oh, go 'long with you, Tom, before you aggravate me again. And you +try and see if you can't be a good boy, for once, and you needn't take +any more medicine." + +Tom reached school ahead of time. It was noticed that this strange +thing had been occurring every day latterly. And now, as usual of late, +he hung about the gate of the schoolyard instead of playing with his +comrades. He was sick, he said, and he looked it. He tried to seem to +be looking everywhere but whither he really was looking--down the road. +Presently Jeff Thatcher hove in sight, and Tom's face lighted; he gazed +a moment, and then turned sorrowfully away. When Jeff arrived, Tom +accosted him; and "led up" warily to opportunities for remark about +Becky, but the giddy lad never could see the bait. Tom watched and +watched, hoping whenever a frisking frock came in sight, and hating the +owner of it as soon as he saw she was not the right one. At last frocks +ceased to appear, and he dropped hopelessly into the dumps; he entered +the empty schoolhouse and sat down to suffer. Then one more frock +passed in at the gate, and Tom's heart gave a great bound. The next +instant he was out, and "going on" like an Indian; yelling, laughing, +chasing boys, jumping over the fence at risk of life and limb, throwing +handsprings, standing on his head--doing all the heroic things he could +conceive of, and keeping a furtive eye out, all the while, to see if +Becky Thatcher was noticing. But she seemed to be unconscious of it +all; she never looked. Could it be possible that she was not aware that +he was there? He carried his exploits to her immediate vicinity; came +war-whooping around, snatched a boy's cap, hurled it to the roof of the +schoolhouse, broke through a group of boys, tumbling them in every +direction, and fell sprawling, himself, under Becky's nose, almost +upsetting her--and she turned, with her nose in the air, and he heard +her say: "Mf! some people think they're mighty smart--always showing +off!" + +Tom's cheeks burned. He gathered himself up and sneaked off, crushed +and crestfallen. + + + +CHAPTER XIII + +TOM'S mind was made up now. He was gloomy and desperate. He was a +forsaken, friendless boy, he said; nobody loved him; when they found +out what they had driven him to, perhaps they would be sorry; he had +tried to do right and get along, but they would not let him; since +nothing would do them but to be rid of him, let it be so; and let them +blame HIM for the consequences--why shouldn't they? What right had the +friendless to complain? Yes, they had forced him to it at last: he +would lead a life of crime. There was no choice. + +By this time he was far down Meadow Lane, and the bell for school to +"take up" tinkled faintly upon his ear. He sobbed, now, to think he +should never, never hear that old familiar sound any more--it was very +hard, but it was forced on him; since he was driven out into the cold +world, he must submit--but he forgave them. Then the sobs came thick +and fast. + +Just at this point he met his soul's sworn comrade, Joe Harper +--hard-eyed, and with evidently a great and dismal purpose in his heart. +Plainly here were "two souls with but a single thought." Tom, wiping +his eyes with his sleeve, began to blubber out something about a +resolution to escape from hard usage and lack of sympathy at home by +roaming abroad into the great world never to return; and ended by +hoping that Joe would not forget him. + +But it transpired that this was a request which Joe had just been +going to make of Tom, and had come to hunt him up for that purpose. His +mother had whipped him for drinking some cream which he had never +tasted and knew nothing about; it was plain that she was tired of him +and wished him to go; if she felt that way, there was nothing for him +to do but succumb; he hoped she would be happy, and never regret having +driven her poor boy out into the unfeeling world to suffer and die. + +As the two boys walked sorrowing along, they made a new compact to +stand by each other and be brothers and never separate till death +relieved them of their troubles. Then they began to lay their plans. +Joe was for being a hermit, and living on crusts in a remote cave, and +dying, some time, of cold and want and grief; but after listening to +Tom, he conceded that there were some conspicuous advantages about a +life of crime, and so he consented to be a pirate. + +Three miles below St. Petersburg, at a point where the Mississippi +River was a trifle over a mile wide, there was a long, narrow, wooded +island, with a shallow bar at the head of it, and this offered well as +a rendezvous. It was not inhabited; it lay far over toward the further +shore, abreast a dense and almost wholly unpeopled forest. So Jackson's +Island was chosen. Who were to be the subjects of their piracies was a +matter that did not occur to them. Then they hunted up Huckleberry +Finn, and he joined them promptly, for all careers were one to him; he +was indifferent. They presently separated to meet at a lonely spot on +the river-bank two miles above the village at the favorite hour--which +was midnight. There was a small log raft there which they meant to +capture. Each would bring hooks and lines, and such provision as he +could steal in the most dark and mysterious way--as became outlaws. And +before the afternoon was done, they had all managed to enjoy the sweet +glory of spreading the fact that pretty soon the town would "hear +something." All who got this vague hint were cautioned to "be mum and +wait." + +About midnight Tom arrived with a boiled ham and a few trifles, +and stopped in a dense undergrowth on a small bluff overlooking the +meeting-place. It was starlight, and very still. The mighty river lay +like an ocean at rest. Tom listened a moment, but no sound disturbed the +quiet. Then he gave a low, distinct whistle. It was answered from under +the bluff. Tom whistled twice more; these signals were answered in the +same way. Then a guarded voice said: + +"Who goes there?" + +"Tom Sawyer, the Black Avenger of the Spanish Main. Name your names." + +"Huck Finn the Red-Handed, and Joe Harper the Terror of the Seas." Tom +had furnished these titles, from his favorite literature. + +"'Tis well. Give the countersign." + +Two hoarse whispers delivered the same awful word simultaneously to +the brooding night: + +"BLOOD!" + +Then Tom tumbled his ham over the bluff and let himself down after it, +tearing both skin and clothes to some extent in the effort. There was +an easy, comfortable path along the shore under the bluff, but it +lacked the advantages of difficulty and danger so valued by a pirate. + +The Terror of the Seas had brought a side of bacon, and had about worn +himself out with getting it there. Finn the Red-Handed had stolen a +skillet and a quantity of half-cured leaf tobacco, and had also brought +a few corn-cobs to make pipes with. But none of the pirates smoked or +"chewed" but himself. The Black Avenger of the Spanish Main said it +would never do to start without some fire. That was a wise thought; +matches were hardly known there in that day. They saw a fire +smouldering upon a great raft a hundred yards above, and they went +stealthily thither and helped themselves to a chunk. They made an +imposing adventure of it, saying, "Hist!" every now and then, and +suddenly halting with finger on lip; moving with hands on imaginary +dagger-hilts; and giving orders in dismal whispers that if "the foe" +stirred, to "let him have it to the hilt," because "dead men tell no +tales." They knew well enough that the raftsmen were all down at the +village laying in stores or having a spree, but still that was no +excuse for their conducting this thing in an unpiratical way. + +They shoved off, presently, Tom in command, Huck at the after oar and +Joe at the forward. Tom stood amidships, gloomy-browed, and with folded +arms, and gave his orders in a low, stern whisper: + +"Luff, and bring her to the wind!" + +"Aye-aye, sir!" + +"Steady, steady-y-y-y!" + +"Steady it is, sir!" + +"Let her go off a point!" + +"Point it is, sir!" + +As the boys steadily and monotonously drove the raft toward mid-stream +it was no doubt understood that these orders were given only for +"style," and were not intended to mean anything in particular. + +"What sail's she carrying?" + +"Courses, tops'ls, and flying-jib, sir." + +"Send the r'yals up! Lay out aloft, there, half a dozen of ye +--foretopmaststuns'l! Lively, now!" + +"Aye-aye, sir!" + +"Shake out that maintogalans'l! Sheets and braces! NOW my hearties!" + +"Aye-aye, sir!" + +"Hellum-a-lee--hard a port! Stand by to meet her when she comes! Port, +port! NOW, men! With a will! Stead-y-y-y!" + +"Steady it is, sir!" + +The raft drew beyond the middle of the river; the boys pointed her +head right, and then lay on their oars. The river was not high, so +there was not more than a two or three mile current. Hardly a word was +said during the next three-quarters of an hour. Now the raft was +passing before the distant town. Two or three glimmering lights showed +where it lay, peacefully sleeping, beyond the vague vast sweep of +star-gemmed water, unconscious of the tremendous event that was happening. +The Black Avenger stood still with folded arms, "looking his last" upon +the scene of his former joys and his later sufferings, and wishing +"she" could see him now, abroad on the wild sea, facing peril and death +with dauntless heart, going to his doom with a grim smile on his lips. +It was but a small strain on his imagination to remove Jackson's Island +beyond eyeshot of the village, and so he "looked his last" with a +broken and satisfied heart. The other pirates were looking their last, +too; and they all looked so long that they came near letting the +current drift them out of the range of the island. But they discovered +the danger in time, and made shift to avert it. About two o'clock in +the morning the raft grounded on the bar two hundred yards above the +head of the island, and they waded back and forth until they had landed +their freight. Part of the little raft's belongings consisted of an old +sail, and this they spread over a nook in the bushes for a tent to +shelter their provisions; but they themselves would sleep in the open +air in good weather, as became outlaws. + +They built a fire against the side of a great log twenty or thirty +steps within the sombre depths of the forest, and then cooked some +bacon in the frying-pan for supper, and used up half of the corn "pone" +stock they had brought. It seemed glorious sport to be feasting in that +wild, free way in the virgin forest of an unexplored and uninhabited +island, far from the haunts of men, and they said they never would +return to civilization. The climbing fire lit up their faces and threw +its ruddy glare upon the pillared tree-trunks of their forest temple, +and upon the varnished foliage and festooning vines. + +When the last crisp slice of bacon was gone, and the last allowance of +corn pone devoured, the boys stretched themselves out on the grass, +filled with contentment. They could have found a cooler place, but they +would not deny themselves such a romantic feature as the roasting +camp-fire. + +"AIN'T it gay?" said Joe. + +"It's NUTS!" said Tom. "What would the boys say if they could see us?" + +"Say? Well, they'd just die to be here--hey, Hucky!" + +"I reckon so," said Huckleberry; "anyways, I'm suited. I don't want +nothing better'n this. I don't ever get enough to eat, gen'ally--and +here they can't come and pick at a feller and bullyrag him so." + +"It's just the life for me," said Tom. "You don't have to get up, +mornings, and you don't have to go to school, and wash, and all that +blame foolishness. You see a pirate don't have to do ANYTHING, Joe, +when he's ashore, but a hermit HE has to be praying considerable, and +then he don't have any fun, anyway, all by himself that way." + +"Oh yes, that's so," said Joe, "but I hadn't thought much about it, +you know. I'd a good deal rather be a pirate, now that I've tried it." + +"You see," said Tom, "people don't go much on hermits, nowadays, like +they used to in old times, but a pirate's always respected. And a +hermit's got to sleep on the hardest place he can find, and put +sackcloth and ashes on his head, and stand out in the rain, and--" + +"What does he put sackcloth and ashes on his head for?" inquired Huck. + +"I dono. But they've GOT to do it. Hermits always do. You'd have to do +that if you was a hermit." + +"Dern'd if I would," said Huck. + +"Well, what would you do?" + +"I dono. But I wouldn't do that." + +"Why, Huck, you'd HAVE to. How'd you get around it?" + +"Why, I just wouldn't stand it. I'd run away." + +"Run away! Well, you WOULD be a nice old slouch of a hermit. You'd be +a disgrace." + +The Red-Handed made no response, being better employed. He had +finished gouging out a cob, and now he fitted a weed stem to it, loaded +it with tobacco, and was pressing a coal to the charge and blowing a +cloud of fragrant smoke--he was in the full bloom of luxurious +contentment. The other pirates envied him this majestic vice, and +secretly resolved to acquire it shortly. Presently Huck said: + +"What does pirates have to do?" + +Tom said: + +"Oh, they have just a bully time--take ships and burn them, and get +the money and bury it in awful places in their island where there's +ghosts and things to watch it, and kill everybody in the ships--make +'em walk a plank." + +"And they carry the women to the island," said Joe; "they don't kill +the women." + +"No," assented Tom, "they don't kill the women--they're too noble. And +the women's always beautiful, too. + +"And don't they wear the bulliest clothes! Oh no! All gold and silver +and di'monds," said Joe, with enthusiasm. + +"Who?" said Huck. + +"Why, the pirates." + +Huck scanned his own clothing forlornly. + +"I reckon I ain't dressed fitten for a pirate," said he, with a +regretful pathos in his voice; "but I ain't got none but these." + +But the other boys told him the fine clothes would come fast enough, +after they should have begun their adventures. They made him understand +that his poor rags would do to begin with, though it was customary for +wealthy pirates to start with a proper wardrobe. + +Gradually their talk died out and drowsiness began to steal upon the +eyelids of the little waifs. The pipe dropped from the fingers of the +Red-Handed, and he slept the sleep of the conscience-free and the +weary. The Terror of the Seas and the Black Avenger of the Spanish Main +had more difficulty in getting to sleep. They said their prayers +inwardly, and lying down, since there was nobody there with authority +to make them kneel and recite aloud; in truth, they had a mind not to +say them at all, but they were afraid to proceed to such lengths as +that, lest they might call down a sudden and special thunderbolt from +heaven. Then at once they reached and hovered upon the imminent verge +of sleep--but an intruder came, now, that would not "down." It was +conscience. They began to feel a vague fear that they had been doing +wrong to run away; and next they thought of the stolen meat, and then +the real torture came. They tried to argue it away by reminding +conscience that they had purloined sweetmeats and apples scores of +times; but conscience was not to be appeased by such thin +plausibilities; it seemed to them, in the end, that there was no +getting around the stubborn fact that taking sweetmeats was only +"hooking," while taking bacon and hams and such valuables was plain +simple stealing--and there was a command against that in the Bible. So +they inwardly resolved that so long as they remained in the business, +their piracies should not again be sullied with the crime of stealing. +Then conscience granted a truce, and these curiously inconsistent +pirates fell peacefully to sleep. + + + +CHAPTER XIV + +WHEN Tom awoke in the morning, he wondered where he was. He sat up and +rubbed his eyes and looked around. Then he comprehended. It was the +cool gray dawn, and there was a delicious sense of repose and peace in +the deep pervading calm and silence of the woods. Not a leaf stirred; +not a sound obtruded upon great Nature's meditation. Beaded dewdrops +stood upon the leaves and grasses. A white layer of ashes covered the +fire, and a thin blue breath of smoke rose straight into the air. Joe +and Huck still slept. + +Now, far away in the woods a bird called; another answered; presently +the hammering of a woodpecker was heard. Gradually the cool dim gray of +the morning whitened, and as gradually sounds multiplied and life +manifested itself. The marvel of Nature shaking off sleep and going to +work unfolded itself to the musing boy. A little green worm came +crawling over a dewy leaf, lifting two-thirds of his body into the air +from time to time and "sniffing around," then proceeding again--for he +was measuring, Tom said; and when the worm approached him, of its own +accord, he sat as still as a stone, with his hopes rising and falling, +by turns, as the creature still came toward him or seemed inclined to +go elsewhere; and when at last it considered a painful moment with its +curved body in the air and then came decisively down upon Tom's leg and +began a journey over him, his whole heart was glad--for that meant that +he was going to have a new suit of clothes--without the shadow of a +doubt a gaudy piratical uniform. Now a procession of ants appeared, +from nowhere in particular, and went about their labors; one struggled +manfully by with a dead spider five times as big as itself in its arms, +and lugged it straight up a tree-trunk. A brown spotted lady-bug +climbed the dizzy height of a grass blade, and Tom bent down close to +it and said, "Lady-bug, lady-bug, fly away home, your house is on fire, +your children's alone," and she took wing and went off to see about it +--which did not surprise the boy, for he knew of old that this insect was +credulous about conflagrations, and he had practised upon its +simplicity more than once. A tumblebug came next, heaving sturdily at +its ball, and Tom touched the creature, to see it shut its legs against +its body and pretend to be dead. The birds were fairly rioting by this +time. A catbird, the Northern mocker, lit in a tree over Tom's head, +and trilled out her imitations of her neighbors in a rapture of +enjoyment; then a shrill jay swept down, a flash of blue flame, and +stopped on a twig almost within the boy's reach, cocked his head to one +side and eyed the strangers with a consuming curiosity; a gray squirrel +and a big fellow of the "fox" kind came skurrying along, sitting up at +intervals to inspect and chatter at the boys, for the wild things had +probably never seen a human being before and scarcely knew whether to +be afraid or not. All Nature was wide awake and stirring, now; long +lances of sunlight pierced down through the dense foliage far and near, +and a few butterflies came fluttering upon the scene. + +Tom stirred up the other pirates and they all clattered away with a +shout, and in a minute or two were stripped and chasing after and +tumbling over each other in the shallow limpid water of the white +sandbar. They felt no longing for the little village sleeping in the +distance beyond the majestic waste of water. A vagrant current or a +slight rise in the river had carried off their raft, but this only +gratified them, since its going was something like burning the bridge +between them and civilization. + +They came back to camp wonderfully refreshed, glad-hearted, and +ravenous; and they soon had the camp-fire blazing up again. Huck found +a spring of clear cold water close by, and the boys made cups of broad +oak or hickory leaves, and felt that water, sweetened with such a +wildwood charm as that, would be a good enough substitute for coffee. +While Joe was slicing bacon for breakfast, Tom and Huck asked him to +hold on a minute; they stepped to a promising nook in the river-bank +and threw in their lines; almost immediately they had reward. Joe had +not had time to get impatient before they were back again with some +handsome bass, a couple of sun-perch and a small catfish--provisions +enough for quite a family. They fried the fish with the bacon, and were +astonished; for no fish had ever seemed so delicious before. They did +not know that the quicker a fresh-water fish is on the fire after he is +caught the better he is; and they reflected little upon what a sauce +open-air sleeping, open-air exercise, bathing, and a large ingredient +of hunger make, too. + +They lay around in the shade, after breakfast, while Huck had a smoke, +and then went off through the woods on an exploring expedition. They +tramped gayly along, over decaying logs, through tangled underbrush, +among solemn monarchs of the forest, hung from their crowns to the +ground with a drooping regalia of grape-vines. Now and then they came +upon snug nooks carpeted with grass and jeweled with flowers. + +They found plenty of things to be delighted with, but nothing to be +astonished at. They discovered that the island was about three miles +long and a quarter of a mile wide, and that the shore it lay closest to +was only separated from it by a narrow channel hardly two hundred yards +wide. They took a swim about every hour, so it was close upon the +middle of the afternoon when they got back to camp. They were too +hungry to stop to fish, but they fared sumptuously upon cold ham, and +then threw themselves down in the shade to talk. But the talk soon +began to drag, and then died. The stillness, the solemnity that brooded +in the woods, and the sense of loneliness, began to tell upon the +spirits of the boys. They fell to thinking. A sort of undefined longing +crept upon them. This took dim shape, presently--it was budding +homesickness. Even Finn the Red-Handed was dreaming of his doorsteps +and empty hogsheads. But they were all ashamed of their weakness, and +none was brave enough to speak his thought. + +For some time, now, the boys had been dully conscious of a peculiar +sound in the distance, just as one sometimes is of the ticking of a +clock which he takes no distinct note of. But now this mysterious sound +became more pronounced, and forced a recognition. The boys started, +glanced at each other, and then each assumed a listening attitude. +There was a long silence, profound and unbroken; then a deep, sullen +boom came floating down out of the distance. + +"What is it!" exclaimed Joe, under his breath. + +"I wonder," said Tom in a whisper. + +"'Tain't thunder," said Huckleberry, in an awed tone, "becuz thunder--" + +"Hark!" said Tom. "Listen--don't talk." + +They waited a time that seemed an age, and then the same muffled boom +troubled the solemn hush. + +"Let's go and see." + +They sprang to their feet and hurried to the shore toward the town. +They parted the bushes on the bank and peered out over the water. The +little steam ferryboat was about a mile below the village, drifting +with the current. Her broad deck seemed crowded with people. There were +a great many skiffs rowing about or floating with the stream in the +neighborhood of the ferryboat, but the boys could not determine what +the men in them were doing. Presently a great jet of white smoke burst +from the ferryboat's side, and as it expanded and rose in a lazy cloud, +that same dull throb of sound was borne to the listeners again. + +"I know now!" exclaimed Tom; "somebody's drownded!" + +"That's it!" said Huck; "they done that last summer, when Bill Turner +got drownded; they shoot a cannon over the water, and that makes him +come up to the top. Yes, and they take loaves of bread and put +quicksilver in 'em and set 'em afloat, and wherever there's anybody +that's drownded, they'll float right there and stop." + +"Yes, I've heard about that," said Joe. "I wonder what makes the bread +do that." + +"Oh, it ain't the bread, so much," said Tom; "I reckon it's mostly +what they SAY over it before they start it out." + +"But they don't say anything over it," said Huck. "I've seen 'em and +they don't." + +"Well, that's funny," said Tom. "But maybe they say it to themselves. +Of COURSE they do. Anybody might know that." + +The other boys agreed that there was reason in what Tom said, because +an ignorant lump of bread, uninstructed by an incantation, could not be +expected to act very intelligently when set upon an errand of such +gravity. + +"By jings, I wish I was over there, now," said Joe. + +"I do too" said Huck "I'd give heaps to know who it is." + +The boys still listened and watched. Presently a revealing thought +flashed through Tom's mind, and he exclaimed: + +"Boys, I know who's drownded--it's us!" + +They felt like heroes in an instant. Here was a gorgeous triumph; they +were missed; they were mourned; hearts were breaking on their account; +tears were being shed; accusing memories of unkindness to these poor +lost lads were rising up, and unavailing regrets and remorse were being +indulged; and best of all, the departed were the talk of the whole +town, and the envy of all the boys, as far as this dazzling notoriety +was concerned. This was fine. It was worth while to be a pirate, after +all. + +As twilight drew on, the ferryboat went back to her accustomed +business and the skiffs disappeared. The pirates returned to camp. They +were jubilant with vanity over their new grandeur and the illustrious +trouble they were making. They caught fish, cooked supper and ate it, +and then fell to guessing at what the village was thinking and saying +about them; and the pictures they drew of the public distress on their +account were gratifying to look upon--from their point of view. But +when the shadows of night closed them in, they gradually ceased to +talk, and sat gazing into the fire, with their minds evidently +wandering elsewhere. The excitement was gone, now, and Tom and Joe +could not keep back thoughts of certain persons at home who were not +enjoying this fine frolic as much as they were. Misgivings came; they +grew troubled and unhappy; a sigh or two escaped, unawares. By and by +Joe timidly ventured upon a roundabout "feeler" as to how the others +might look upon a return to civilization--not right now, but-- + +Tom withered him with derision! Huck, being uncommitted as yet, joined +in with Tom, and the waverer quickly "explained," and was glad to get +out of the scrape with as little taint of chicken-hearted homesickness +clinging to his garments as he could. Mutiny was effectually laid to +rest for the moment. + +As the night deepened, Huck began to nod, and presently to snore. Joe +followed next. Tom lay upon his elbow motionless, for some time, +watching the two intently. At last he got up cautiously, on his knees, +and went searching among the grass and the flickering reflections flung +by the camp-fire. He picked up and inspected several large +semi-cylinders of the thin white bark of a sycamore, and finally chose +two which seemed to suit him. Then he knelt by the fire and painfully +wrote something upon each of these with his "red keel"; one he rolled up +and put in his jacket pocket, and the other he put in Joe's hat and +removed it to a little distance from the owner. And he also put into the +hat certain schoolboy treasures of almost inestimable value--among them +a lump of chalk, an India-rubber ball, three fishhooks, and one of that +kind of marbles known as a "sure 'nough crystal." Then he tiptoed his +way cautiously among the trees till he felt that he was out of hearing, +and straightway broke into a keen run in the direction of the sandbar. + + + +CHAPTER XV + +A FEW minutes later Tom was in the shoal water of the bar, wading +toward the Illinois shore. Before the depth reached his middle he was +half-way over; the current would permit no more wading, now, so he +struck out confidently to swim the remaining hundred yards. He swam +quartering upstream, but still was swept downward rather faster than he +had expected. However, he reached the shore finally, and drifted along +till he found a low place and drew himself out. He put his hand on his +jacket pocket, found his piece of bark safe, and then struck through +the woods, following the shore, with streaming garments. Shortly before +ten o'clock he came out into an open place opposite the village, and +saw the ferryboat lying in the shadow of the trees and the high bank. +Everything was quiet under the blinking stars. He crept down the bank, +watching with all his eyes, slipped into the water, swam three or four +strokes and climbed into the skiff that did "yawl" duty at the boat's +stern. He laid himself down under the thwarts and waited, panting. + +Presently the cracked bell tapped and a voice gave the order to "cast +off." A minute or two later the skiff's head was standing high up, +against the boat's swell, and the voyage was begun. Tom felt happy in +his success, for he knew it was the boat's last trip for the night. At +the end of a long twelve or fifteen minutes the wheels stopped, and Tom +slipped overboard and swam ashore in the dusk, landing fifty yards +downstream, out of danger of possible stragglers. + +He flew along unfrequented alleys, and shortly found himself at his +aunt's back fence. He climbed over, approached the "ell," and looked in +at the sitting-room window, for a light was burning there. There sat +Aunt Polly, Sid, Mary, and Joe Harper's mother, grouped together, +talking. They were by the bed, and the bed was between them and the +door. Tom went to the door and began to softly lift the latch; then he +pressed gently and the door yielded a crack; he continued pushing +cautiously, and quaking every time it creaked, till he judged he might +squeeze through on his knees; so he put his head through and began, +warily. + +"What makes the candle blow so?" said Aunt Polly. Tom hurried up. +"Why, that door's open, I believe. Why, of course it is. No end of +strange things now. Go 'long and shut it, Sid." + +Tom disappeared under the bed just in time. He lay and "breathed" +himself for a time, and then crept to where he could almost touch his +aunt's foot. + +"But as I was saying," said Aunt Polly, "he warn't BAD, so to say +--only mischEEvous. Only just giddy, and harum-scarum, you know. He +warn't any more responsible than a colt. HE never meant any harm, and +he was the best-hearted boy that ever was"--and she began to cry. + +"It was just so with my Joe--always full of his devilment, and up to +every kind of mischief, but he was just as unselfish and kind as he +could be--and laws bless me, to think I went and whipped him for taking +that cream, never once recollecting that I throwed it out myself +because it was sour, and I never to see him again in this world, never, +never, never, poor abused boy!" And Mrs. Harper sobbed as if her heart +would break. + +"I hope Tom's better off where he is," said Sid, "but if he'd been +better in some ways--" + +"SID!" Tom felt the glare of the old lady's eye, though he could not +see it. "Not a word against my Tom, now that he's gone! God'll take +care of HIM--never you trouble YOURself, sir! Oh, Mrs. Harper, I don't +know how to give him up! I don't know how to give him up! He was such a +comfort to me, although he tormented my old heart out of me, 'most." + +"The Lord giveth and the Lord hath taken away--Blessed be the name of +the Lord! But it's so hard--Oh, it's so hard! Only last Saturday my +Joe busted a firecracker right under my nose and I knocked him +sprawling. Little did I know then, how soon--Oh, if it was to do over +again I'd hug him and bless him for it." + +"Yes, yes, yes, I know just how you feel, Mrs. Harper, I know just +exactly how you feel. No longer ago than yesterday noon, my Tom took +and filled the cat full of Pain-killer, and I did think the cretur +would tear the house down. And God forgive me, I cracked Tom's head +with my thimble, poor boy, poor dead boy. But he's out of all his +troubles now. And the last words I ever heard him say was to reproach--" + +But this memory was too much for the old lady, and she broke entirely +down. Tom was snuffling, now, himself--and more in pity of himself than +anybody else. He could hear Mary crying, and putting in a kindly word +for him from time to time. He began to have a nobler opinion of himself +than ever before. Still, he was sufficiently touched by his aunt's +grief to long to rush out from under the bed and overwhelm her with +joy--and the theatrical gorgeousness of the thing appealed strongly to +his nature, too, but he resisted and lay still. + +He went on listening, and gathered by odds and ends that it was +conjectured at first that the boys had got drowned while taking a swim; +then the small raft had been missed; next, certain boys said the +missing lads had promised that the village should "hear something" +soon; the wise-heads had "put this and that together" and decided that +the lads had gone off on that raft and would turn up at the next town +below, presently; but toward noon the raft had been found, lodged +against the Missouri shore some five or six miles below the village +--and then hope perished; they must be drowned, else hunger would have +driven them home by nightfall if not sooner. It was believed that the +search for the bodies had been a fruitless effort merely because the +drowning must have occurred in mid-channel, since the boys, being good +swimmers, would otherwise have escaped to shore. This was Wednesday +night. If the bodies continued missing until Sunday, all hope would be +given over, and the funerals would be preached on that morning. Tom +shuddered. + +Mrs. Harper gave a sobbing good-night and turned to go. Then with a +mutual impulse the two bereaved women flung themselves into each +other's arms and had a good, consoling cry, and then parted. Aunt Polly +was tender far beyond her wont, in her good-night to Sid and Mary. Sid +snuffled a bit and Mary went off crying with all her heart. + +Aunt Polly knelt down and prayed for Tom so touchingly, so +appealingly, and with such measureless love in her words and her old +trembling voice, that he was weltering in tears again, long before she +was through. + +He had to keep still long after she went to bed, for she kept making +broken-hearted ejaculations from time to time, tossing unrestfully, and +turning over. But at last she was still, only moaning a little in her +sleep. Now the boy stole out, rose gradually by the bedside, shaded the +candle-light with his hand, and stood regarding her. His heart was full +of pity for her. He took out his sycamore scroll and placed it by the +candle. But something occurred to him, and he lingered considering. His +face lighted with a happy solution of his thought; he put the bark +hastily in his pocket. Then he bent over and kissed the faded lips, and +straightway made his stealthy exit, latching the door behind him. + +He threaded his way back to the ferry landing, found nobody at large +there, and walked boldly on board the boat, for he knew she was +tenantless except that there was a watchman, who always turned in and +slept like a graven image. He untied the skiff at the stern, slipped +into it, and was soon rowing cautiously upstream. When he had pulled a +mile above the village, he started quartering across and bent himself +stoutly to his work. He hit the landing on the other side neatly, for +this was a familiar bit of work to him. He was moved to capture the +skiff, arguing that it might be considered a ship and therefore +legitimate prey for a pirate, but he knew a thorough search would be +made for it and that might end in revelations. So he stepped ashore and +entered the woods. + +He sat down and took a long rest, torturing himself meanwhile to keep +awake, and then started warily down the home-stretch. The night was far +spent. It was broad daylight before he found himself fairly abreast the +island bar. He rested again until the sun was well up and gilding the +great river with its splendor, and then he plunged into the stream. A +little later he paused, dripping, upon the threshold of the camp, and +heard Joe say: + +"No, Tom's true-blue, Huck, and he'll come back. He won't desert. He +knows that would be a disgrace to a pirate, and Tom's too proud for +that sort of thing. He's up to something or other. Now I wonder what?" + +"Well, the things is ours, anyway, ain't they?" + +"Pretty near, but not yet, Huck. The writing says they are if he ain't +back here to breakfast." + +"Which he is!" exclaimed Tom, with fine dramatic effect, stepping +grandly into camp. + +A sumptuous breakfast of bacon and fish was shortly provided, and as +the boys set to work upon it, Tom recounted (and adorned) his +adventures. They were a vain and boastful company of heroes when the +tale was done. Then Tom hid himself away in a shady nook to sleep till +noon, and the other pirates got ready to fish and explore. + + + +CHAPTER XVI + +AFTER dinner all the gang turned out to hunt for turtle eggs on the +bar. They went about poking sticks into the sand, and when they found a +soft place they went down on their knees and dug with their hands. +Sometimes they would take fifty or sixty eggs out of one hole. They +were perfectly round white things a trifle smaller than an English +walnut. They had a famous fried-egg feast that night, and another on +Friday morning. + +After breakfast they went whooping and prancing out on the bar, and +chased each other round and round, shedding clothes as they went, until +they were naked, and then continued the frolic far away up the shoal +water of the bar, against the stiff current, which latter tripped their +legs from under them from time to time and greatly increased the fun. +And now and then they stooped in a group and splashed water in each +other's faces with their palms, gradually approaching each other, with +averted faces to avoid the strangling sprays, and finally gripping and +struggling till the best man ducked his neighbor, and then they all +went under in a tangle of white legs and arms and came up blowing, +sputtering, laughing, and gasping for breath at one and the same time. + +When they were well exhausted, they would run out and sprawl on the +dry, hot sand, and lie there and cover themselves up with it, and by +and by break for the water again and go through the original +performance once more. Finally it occurred to them that their naked +skin represented flesh-colored "tights" very fairly; so they drew a +ring in the sand and had a circus--with three clowns in it, for none +would yield this proudest post to his neighbor. + +Next they got their marbles and played "knucks" and "ring-taw" and +"keeps" till that amusement grew stale. Then Joe and Huck had another +swim, but Tom would not venture, because he found that in kicking off +his trousers he had kicked his string of rattlesnake rattles off his +ankle, and he wondered how he had escaped cramp so long without the +protection of this mysterious charm. He did not venture again until he +had found it, and by that time the other boys were tired and ready to +rest. They gradually wandered apart, dropped into the "dumps," and fell +to gazing longingly across the wide river to where the village lay +drowsing in the sun. Tom found himself writing "BECKY" in the sand with +his big toe; he scratched it out, and was angry with himself for his +weakness. But he wrote it again, nevertheless; he could not help it. He +erased it once more and then took himself out of temptation by driving +the other boys together and joining them. + +But Joe's spirits had gone down almost beyond resurrection. He was so +homesick that he could hardly endure the misery of it. The tears lay +very near the surface. Huck was melancholy, too. Tom was downhearted, +but tried hard not to show it. He had a secret which he was not ready +to tell, yet, but if this mutinous depression was not broken up soon, +he would have to bring it out. He said, with a great show of +cheerfulness: + +"I bet there's been pirates on this island before, boys. We'll explore +it again. They've hid treasures here somewhere. How'd you feel to light +on a rotten chest full of gold and silver--hey?" + +But it roused only faint enthusiasm, which faded out, with no reply. +Tom tried one or two other seductions; but they failed, too. It was +discouraging work. Joe sat poking up the sand with a stick and looking +very gloomy. Finally he said: + +"Oh, boys, let's give it up. I want to go home. It's so lonesome." + +"Oh no, Joe, you'll feel better by and by," said Tom. "Just think of +the fishing that's here." + +"I don't care for fishing. I want to go home." + +"But, Joe, there ain't such another swimming-place anywhere." + +"Swimming's no good. I don't seem to care for it, somehow, when there +ain't anybody to say I sha'n't go in. I mean to go home." + +"Oh, shucks! Baby! You want to see your mother, I reckon." + +"Yes, I DO want to see my mother--and you would, too, if you had one. +I ain't any more baby than you are." And Joe snuffled a little. + +"Well, we'll let the cry-baby go home to his mother, won't we, Huck? +Poor thing--does it want to see its mother? And so it shall. You like +it here, don't you, Huck? We'll stay, won't we?" + +Huck said, "Y-e-s"--without any heart in it. + +"I'll never speak to you again as long as I live," said Joe, rising. +"There now!" And he moved moodily away and began to dress himself. + +"Who cares!" said Tom. "Nobody wants you to. Go 'long home and get +laughed at. Oh, you're a nice pirate. Huck and me ain't cry-babies. +We'll stay, won't we, Huck? Let him go if he wants to. I reckon we can +get along without him, per'aps." + +But Tom was uneasy, nevertheless, and was alarmed to see Joe go +sullenly on with his dressing. And then it was discomforting to see +Huck eying Joe's preparations so wistfully, and keeping up such an +ominous silence. Presently, without a parting word, Joe began to wade +off toward the Illinois shore. Tom's heart began to sink. He glanced at +Huck. Huck could not bear the look, and dropped his eyes. Then he said: + +"I want to go, too, Tom. It was getting so lonesome anyway, and now +it'll be worse. Let's us go, too, Tom." + +"I won't! You can all go, if you want to. I mean to stay." + +"Tom, I better go." + +"Well, go 'long--who's hendering you." + +Huck began to pick up his scattered clothes. He said: + +"Tom, I wisht you'd come, too. Now you think it over. We'll wait for +you when we get to shore." + +"Well, you'll wait a blame long time, that's all." + +Huck started sorrowfully away, and Tom stood looking after him, with a +strong desire tugging at his heart to yield his pride and go along too. +He hoped the boys would stop, but they still waded slowly on. It +suddenly dawned on Tom that it was become very lonely and still. He +made one final struggle with his pride, and then darted after his +comrades, yelling: + +"Wait! Wait! I want to tell you something!" + +They presently stopped and turned around. When he got to where they +were, he began unfolding his secret, and they listened moodily till at +last they saw the "point" he was driving at, and then they set up a +war-whoop of applause and said it was "splendid!" and said if he had +told them at first, they wouldn't have started away. He made a plausible +excuse; but his real reason had been the fear that not even the secret +would keep them with him any very great length of time, and so he had +meant to hold it in reserve as a last seduction. + +The lads came gayly back and went at their sports again with a will, +chattering all the time about Tom's stupendous plan and admiring the +genius of it. After a dainty egg and fish dinner, Tom said he wanted to +learn to smoke, now. Joe caught at the idea and said he would like to +try, too. So Huck made pipes and filled them. These novices had never +smoked anything before but cigars made of grape-vine, and they "bit" +the tongue, and were not considered manly anyway. + +Now they stretched themselves out on their elbows and began to puff, +charily, and with slender confidence. The smoke had an unpleasant +taste, and they gagged a little, but Tom said: + +"Why, it's just as easy! If I'd a knowed this was all, I'd a learnt +long ago." + +"So would I," said Joe. "It's just nothing." + +"Why, many a time I've looked at people smoking, and thought well I +wish I could do that; but I never thought I could," said Tom. + +"That's just the way with me, hain't it, Huck? You've heard me talk +just that way--haven't you, Huck? I'll leave it to Huck if I haven't." + +"Yes--heaps of times," said Huck. + +"Well, I have too," said Tom; "oh, hundreds of times. Once down by the +slaughter-house. Don't you remember, Huck? Bob Tanner was there, and +Johnny Miller, and Jeff Thatcher, when I said it. Don't you remember, +Huck, 'bout me saying that?" + +"Yes, that's so," said Huck. "That was the day after I lost a white +alley. No, 'twas the day before." + +"There--I told you so," said Tom. "Huck recollects it." + +"I bleeve I could smoke this pipe all day," said Joe. "I don't feel +sick." + +"Neither do I," said Tom. "I could smoke it all day. But I bet you +Jeff Thatcher couldn't." + +"Jeff Thatcher! Why, he'd keel over just with two draws. Just let him +try it once. HE'D see!" + +"I bet he would. And Johnny Miller--I wish could see Johnny Miller +tackle it once." + +"Oh, don't I!" said Joe. "Why, I bet you Johnny Miller couldn't any +more do this than nothing. Just one little snifter would fetch HIM." + +"'Deed it would, Joe. Say--I wish the boys could see us now." + +"So do I." + +"Say--boys, don't say anything about it, and some time when they're +around, I'll come up to you and say, 'Joe, got a pipe? I want a smoke.' +And you'll say, kind of careless like, as if it warn't anything, you'll +say, 'Yes, I got my OLD pipe, and another one, but my tobacker ain't +very good.' And I'll say, 'Oh, that's all right, if it's STRONG +enough.' And then you'll out with the pipes, and we'll light up just as +ca'm, and then just see 'em look!" + +"By jings, that'll be gay, Tom! I wish it was NOW!" + +"So do I! And when we tell 'em we learned when we was off pirating, +won't they wish they'd been along?" + +"Oh, I reckon not! I'll just BET they will!" + +So the talk ran on. But presently it began to flag a trifle, and grow +disjointed. The silences widened; the expectoration marvellously +increased. Every pore inside the boys' cheeks became a spouting +fountain; they could scarcely bail out the cellars under their tongues +fast enough to prevent an inundation; little overflowings down their +throats occurred in spite of all they could do, and sudden retchings +followed every time. Both boys were looking very pale and miserable, +now. Joe's pipe dropped from his nerveless fingers. Tom's followed. +Both fountains were going furiously and both pumps bailing with might +and main. Joe said feebly: + +"I've lost my knife. I reckon I better go and find it." + +Tom said, with quivering lips and halting utterance: + +"I'll help you. You go over that way and I'll hunt around by the +spring. No, you needn't come, Huck--we can find it." + +So Huck sat down again, and waited an hour. Then he found it lonesome, +and went to find his comrades. They were wide apart in the woods, both +very pale, both fast asleep. But something informed him that if they +had had any trouble they had got rid of it. + +They were not talkative at supper that night. They had a humble look, +and when Huck prepared his pipe after the meal and was going to prepare +theirs, they said no, they were not feeling very well--something they +ate at dinner had disagreed with them. + +About midnight Joe awoke, and called the boys. There was a brooding +oppressiveness in the air that seemed to bode something. The boys +huddled themselves together and sought the friendly companionship of +the fire, though the dull dead heat of the breathless atmosphere was +stifling. They sat still, intent and waiting. The solemn hush +continued. Beyond the light of the fire everything was swallowed up in +the blackness of darkness. Presently there came a quivering glow that +vaguely revealed the foliage for a moment and then vanished. By and by +another came, a little stronger. Then another. Then a faint moan came +sighing through the branches of the forest and the boys felt a fleeting +breath upon their cheeks, and shuddered with the fancy that the Spirit +of the Night had gone by. There was a pause. Now a weird flash turned +night into day and showed every little grass-blade, separate and +distinct, that grew about their feet. And it showed three white, +startled faces, too. A deep peal of thunder went rolling and tumbling +down the heavens and lost itself in sullen rumblings in the distance. A +sweep of chilly air passed by, rustling all the leaves and snowing the +flaky ashes broadcast about the fire. Another fierce glare lit up the +forest and an instant crash followed that seemed to rend the tree-tops +right over the boys' heads. They clung together in terror, in the thick +gloom that followed. A few big rain-drops fell pattering upon the +leaves. + +"Quick! boys, go for the tent!" exclaimed Tom. + +They sprang away, stumbling over roots and among vines in the dark, no +two plunging in the same direction. A furious blast roared through the +trees, making everything sing as it went. One blinding flash after +another came, and peal on peal of deafening thunder. And now a +drenching rain poured down and the rising hurricane drove it in sheets +along the ground. The boys cried out to each other, but the roaring +wind and the booming thunder-blasts drowned their voices utterly. +However, one by one they straggled in at last and took shelter under +the tent, cold, scared, and streaming with water; but to have company +in misery seemed something to be grateful for. They could not talk, the +old sail flapped so furiously, even if the other noises would have +allowed them. The tempest rose higher and higher, and presently the +sail tore loose from its fastenings and went winging away on the blast. +The boys seized each others' hands and fled, with many tumblings and +bruises, to the shelter of a great oak that stood upon the river-bank. +Now the battle was at its highest. Under the ceaseless conflagration of +lightning that flamed in the skies, everything below stood out in +clean-cut and shadowless distinctness: the bending trees, the billowy +river, white with foam, the driving spray of spume-flakes, the dim +outlines of the high bluffs on the other side, glimpsed through the +drifting cloud-rack and the slanting veil of rain. Every little while +some giant tree yielded the fight and fell crashing through the younger +growth; and the unflagging thunder-peals came now in ear-splitting +explosive bursts, keen and sharp, and unspeakably appalling. The storm +culminated in one matchless effort that seemed likely to tear the island +to pieces, burn it up, drown it to the tree-tops, blow it away, and +deafen every creature in it, all at one and the same moment. It was a +wild night for homeless young heads to be out in. + +But at last the battle was done, and the forces retired with weaker +and weaker threatenings and grumblings, and peace resumed her sway. The +boys went back to camp, a good deal awed; but they found there was +still something to be thankful for, because the great sycamore, the +shelter of their beds, was a ruin, now, blasted by the lightnings, and +they were not under it when the catastrophe happened. + +Everything in camp was drenched, the camp-fire as well; for they were +but heedless lads, like their generation, and had made no provision +against rain. Here was matter for dismay, for they were soaked through +and chilled. They were eloquent in their distress; but they presently +discovered that the fire had eaten so far up under the great log it had +been built against (where it curved upward and separated itself from +the ground), that a handbreadth or so of it had escaped wetting; so +they patiently wrought until, with shreds and bark gathered from the +under sides of sheltered logs, they coaxed the fire to burn again. Then +they piled on great dead boughs till they had a roaring furnace, and +were glad-hearted once more. They dried their boiled ham and had a +feast, and after that they sat by the fire and expanded and glorified +their midnight adventure until morning, for there was not a dry spot to +sleep on, anywhere around. + +As the sun began to steal in upon the boys, drowsiness came over them, +and they went out on the sandbar and lay down to sleep. They got +scorched out by and by, and drearily set about getting breakfast. After +the meal they felt rusty, and stiff-jointed, and a little homesick once +more. Tom saw the signs, and fell to cheering up the pirates as well as +he could. But they cared nothing for marbles, or circus, or swimming, +or anything. He reminded them of the imposing secret, and raised a ray +of cheer. While it lasted, he got them interested in a new device. This +was to knock off being pirates, for a while, and be Indians for a +change. They were attracted by this idea; so it was not long before +they were stripped, and striped from head to heel with black mud, like +so many zebras--all of them chiefs, of course--and then they went +tearing through the woods to attack an English settlement. + +By and by they separated into three hostile tribes, and darted upon +each other from ambush with dreadful war-whoops, and killed and scalped +each other by thousands. It was a gory day. Consequently it was an +extremely satisfactory one. + +They assembled in camp toward supper-time, hungry and happy; but now a +difficulty arose--hostile Indians could not break the bread of +hospitality together without first making peace, and this was a simple +impossibility without smoking a pipe of peace. There was no other +process that ever they had heard of. Two of the savages almost wished +they had remained pirates. However, there was no other way; so with +such show of cheerfulness as they could muster they called for the pipe +and took their whiff as it passed, in due form. + +And behold, they were glad they had gone into savagery, for they had +gained something; they found that they could now smoke a little without +having to go and hunt for a lost knife; they did not get sick enough to +be seriously uncomfortable. They were not likely to fool away this high +promise for lack of effort. No, they practised cautiously, after +supper, with right fair success, and so they spent a jubilant evening. +They were prouder and happier in their new acquirement than they would +have been in the scalping and skinning of the Six Nations. We will +leave them to smoke and chatter and brag, since we have no further use +for them at present. + + + +CHAPTER XVII + +BUT there was no hilarity in the little town that same tranquil +Saturday afternoon. The Harpers, and Aunt Polly's family, were being +put into mourning, with great grief and many tears. An unusual quiet +possessed the village, although it was ordinarily quiet enough, in all +conscience. The villagers conducted their concerns with an absent air, +and talked little; but they sighed often. The Saturday holiday seemed a +burden to the children. They had no heart in their sports, and +gradually gave them up. + +In the afternoon Becky Thatcher found herself moping about the +deserted schoolhouse yard, and feeling very melancholy. But she found +nothing there to comfort her. She soliloquized: + +"Oh, if I only had a brass andiron-knob again! But I haven't got +anything now to remember him by." And she choked back a little sob. + +Presently she stopped, and said to herself: + +"It was right here. Oh, if it was to do over again, I wouldn't say +that--I wouldn't say it for the whole world. But he's gone now; I'll +never, never, never see him any more." + +This thought broke her down, and she wandered away, with tears rolling +down her cheeks. Then quite a group of boys and girls--playmates of +Tom's and Joe's--came by, and stood looking over the paling fence and +talking in reverent tones of how Tom did so-and-so the last time they +saw him, and how Joe said this and that small trifle (pregnant with +awful prophecy, as they could easily see now!)--and each speaker +pointed out the exact spot where the lost lads stood at the time, and +then added something like "and I was a-standing just so--just as I am +now, and as if you was him--I was as close as that--and he smiled, just +this way--and then something seemed to go all over me, like--awful, you +know--and I never thought what it meant, of course, but I can see now!" + +Then there was a dispute about who saw the dead boys last in life, and +many claimed that dismal distinction, and offered evidences, more or +less tampered with by the witness; and when it was ultimately decided +who DID see the departed last, and exchanged the last words with them, +the lucky parties took upon themselves a sort of sacred importance, and +were gaped at and envied by all the rest. One poor chap, who had no +other grandeur to offer, said with tolerably manifest pride in the +remembrance: + +"Well, Tom Sawyer he licked me once." + +But that bid for glory was a failure. Most of the boys could say that, +and so that cheapened the distinction too much. The group loitered +away, still recalling memories of the lost heroes, in awed voices. + +When the Sunday-school hour was finished, the next morning, the bell +began to toll, instead of ringing in the usual way. It was a very still +Sabbath, and the mournful sound seemed in keeping with the musing hush +that lay upon nature. The villagers began to gather, loitering a moment +in the vestibule to converse in whispers about the sad event. But there +was no whispering in the house; only the funereal rustling of dresses +as the women gathered to their seats disturbed the silence there. None +could remember when the little church had been so full before. There +was finally a waiting pause, an expectant dumbness, and then Aunt Polly +entered, followed by Sid and Mary, and they by the Harper family, all +in deep black, and the whole congregation, the old minister as well, +rose reverently and stood until the mourners were seated in the front +pew. There was another communing silence, broken at intervals by +muffled sobs, and then the minister spread his hands abroad and prayed. +A moving hymn was sung, and the text followed: "I am the Resurrection +and the Life." + +As the service proceeded, the clergyman drew such pictures of the +graces, the winning ways, and the rare promise of the lost lads that +every soul there, thinking he recognized these pictures, felt a pang in +remembering that he had persistently blinded himself to them always +before, and had as persistently seen only faults and flaws in the poor +boys. The minister related many a touching incident in the lives of the +departed, too, which illustrated their sweet, generous natures, and the +people could easily see, now, how noble and beautiful those episodes +were, and remembered with grief that at the time they occurred they had +seemed rank rascalities, well deserving of the cowhide. The +congregation became more and more moved, as the pathetic tale went on, +till at last the whole company broke down and joined the weeping +mourners in a chorus of anguished sobs, the preacher himself giving way +to his feelings, and crying in the pulpit. + +There was a rustle in the gallery, which nobody noticed; a moment +later the church door creaked; the minister raised his streaming eyes +above his handkerchief, and stood transfixed! First one and then +another pair of eyes followed the minister's, and then almost with one +impulse the congregation rose and stared while the three dead boys came +marching up the aisle, Tom in the lead, Joe next, and Huck, a ruin of +drooping rags, sneaking sheepishly in the rear! They had been hid in +the unused gallery listening to their own funeral sermon! + +Aunt Polly, Mary, and the Harpers threw themselves upon their restored +ones, smothered them with kisses and poured out thanksgivings, while +poor Huck stood abashed and uncomfortable, not knowing exactly what to +do or where to hide from so many unwelcoming eyes. He wavered, and +started to slink away, but Tom seized him and said: + +"Aunt Polly, it ain't fair. Somebody's got to be glad to see Huck." + +"And so they shall. I'm glad to see him, poor motherless thing!" And +the loving attentions Aunt Polly lavished upon him were the one thing +capable of making him more uncomfortable than he was before. + +Suddenly the minister shouted at the top of his voice: "Praise God +from whom all blessings flow--SING!--and put your hearts in it!" + +And they did. Old Hundred swelled up with a triumphant burst, and +while it shook the rafters Tom Sawyer the Pirate looked around upon the +envying juveniles about him and confessed in his heart that this was +the proudest moment of his life. + +As the "sold" congregation trooped out they said they would almost be +willing to be made ridiculous again to hear Old Hundred sung like that +once more. + +Tom got more cuffs and kisses that day--according to Aunt Polly's +varying moods--than he had earned before in a year; and he hardly knew +which expressed the most gratefulness to God and affection for himself. + + + +CHAPTER XVIII + +THAT was Tom's great secret--the scheme to return home with his +brother pirates and attend their own funerals. They had paddled over to +the Missouri shore on a log, at dusk on Saturday, landing five or six +miles below the village; they had slept in the woods at the edge of the +town till nearly daylight, and had then crept through back lanes and +alleys and finished their sleep in the gallery of the church among a +chaos of invalided benches. + +At breakfast, Monday morning, Aunt Polly and Mary were very loving to +Tom, and very attentive to his wants. There was an unusual amount of +talk. In the course of it Aunt Polly said: + +"Well, I don't say it wasn't a fine joke, Tom, to keep everybody +suffering 'most a week so you boys had a good time, but it is a pity +you could be so hard-hearted as to let me suffer so. If you could come +over on a log to go to your funeral, you could have come over and give +me a hint some way that you warn't dead, but only run off." + +"Yes, you could have done that, Tom," said Mary; "and I believe you +would if you had thought of it." + +"Would you, Tom?" said Aunt Polly, her face lighting wistfully. "Say, +now, would you, if you'd thought of it?" + +"I--well, I don't know. 'Twould 'a' spoiled everything." + +"Tom, I hoped you loved me that much," said Aunt Polly, with a grieved +tone that discomforted the boy. "It would have been something if you'd +cared enough to THINK of it, even if you didn't DO it." + +"Now, auntie, that ain't any harm," pleaded Mary; "it's only Tom's +giddy way--he is always in such a rush that he never thinks of +anything." + +"More's the pity. Sid would have thought. And Sid would have come and +DONE it, too. Tom, you'll look back, some day, when it's too late, and +wish you'd cared a little more for me when it would have cost you so +little." + +"Now, auntie, you know I do care for you," said Tom. + +"I'd know it better if you acted more like it." + +"I wish now I'd thought," said Tom, with a repentant tone; "but I +dreamt about you, anyway. That's something, ain't it?" + +"It ain't much--a cat does that much--but it's better than nothing. +What did you dream?" + +"Why, Wednesday night I dreamt that you was sitting over there by the +bed, and Sid was sitting by the woodbox, and Mary next to him." + +"Well, so we did. So we always do. I'm glad your dreams could take +even that much trouble about us." + +"And I dreamt that Joe Harper's mother was here." + +"Why, she was here! Did you dream any more?" + +"Oh, lots. But it's so dim, now." + +"Well, try to recollect--can't you?" + +"Somehow it seems to me that the wind--the wind blowed the--the--" + +"Try harder, Tom! The wind did blow something. Come!" + +Tom pressed his fingers on his forehead an anxious minute, and then +said: + +"I've got it now! I've got it now! It blowed the candle!" + +"Mercy on us! Go on, Tom--go on!" + +"And it seems to me that you said, 'Why, I believe that that door--'" + +"Go ON, Tom!" + +"Just let me study a moment--just a moment. Oh, yes--you said you +believed the door was open." + +"As I'm sitting here, I did! Didn't I, Mary! Go on!" + +"And then--and then--well I won't be certain, but it seems like as if +you made Sid go and--and--" + +"Well? Well? What did I make him do, Tom? What did I make him do?" + +"You made him--you--Oh, you made him shut it." + +"Well, for the land's sake! I never heard the beat of that in all my +days! Don't tell ME there ain't anything in dreams, any more. Sereny +Harper shall know of this before I'm an hour older. I'd like to see her +get around THIS with her rubbage 'bout superstition. Go on, Tom!" + +"Oh, it's all getting just as bright as day, now. Next you said I +warn't BAD, only mischeevous and harum-scarum, and not any more +responsible than--than--I think it was a colt, or something." + +"And so it was! Well, goodness gracious! Go on, Tom!" + +"And then you began to cry." + +"So I did. So I did. Not the first time, neither. And then--" + +"Then Mrs. Harper she began to cry, and said Joe was just the same, +and she wished she hadn't whipped him for taking cream when she'd +throwed it out her own self--" + +"Tom! The sperrit was upon you! You was a prophesying--that's what you +was doing! Land alive, go on, Tom!" + +"Then Sid he said--he said--" + +"I don't think I said anything," said Sid. + +"Yes you did, Sid," said Mary. + +"Shut your heads and let Tom go on! What did he say, Tom?" + +"He said--I THINK he said he hoped I was better off where I was gone +to, but if I'd been better sometimes--" + +"THERE, d'you hear that! It was his very words!" + +"And you shut him up sharp." + +"I lay I did! There must 'a' been an angel there. There WAS an angel +there, somewheres!" + +"And Mrs. Harper told about Joe scaring her with a firecracker, and +you told about Peter and the Painkiller--" + +"Just as true as I live!" + +"And then there was a whole lot of talk 'bout dragging the river for +us, and 'bout having the funeral Sunday, and then you and old Miss +Harper hugged and cried, and she went." + +"It happened just so! It happened just so, as sure as I'm a-sitting in +these very tracks. Tom, you couldn't told it more like if you'd 'a' +seen it! And then what? Go on, Tom!" + +"Then I thought you prayed for me--and I could see you and hear every +word you said. And you went to bed, and I was so sorry that I took and +wrote on a piece of sycamore bark, 'We ain't dead--we are only off +being pirates,' and put it on the table by the candle; and then you +looked so good, laying there asleep, that I thought I went and leaned +over and kissed you on the lips." + +"Did you, Tom, DID you! I just forgive you everything for that!" And +she seized the boy in a crushing embrace that made him feel like the +guiltiest of villains. + +"It was very kind, even though it was only a--dream," Sid soliloquized +just audibly. + +"Shut up, Sid! A body does just the same in a dream as he'd do if he +was awake. Here's a big Milum apple I've been saving for you, Tom, if +you was ever found again--now go 'long to school. I'm thankful to the +good God and Father of us all I've got you back, that's long-suffering +and merciful to them that believe on Him and keep His word, though +goodness knows I'm unworthy of it, but if only the worthy ones got His +blessings and had His hand to help them over the rough places, there's +few enough would smile here or ever enter into His rest when the long +night comes. Go 'long Sid, Mary, Tom--take yourselves off--you've +hendered me long enough." + +The children left for school, and the old lady to call on Mrs. Harper +and vanquish her realism with Tom's marvellous dream. Sid had better +judgment than to utter the thought that was in his mind as he left the +house. It was this: "Pretty thin--as long a dream as that, without any +mistakes in it!" + +What a hero Tom was become, now! He did not go skipping and prancing, +but moved with a dignified swagger as became a pirate who felt that the +public eye was on him. And indeed it was; he tried not to seem to see +the looks or hear the remarks as he passed along, but they were food +and drink to him. Smaller boys than himself flocked at his heels, as +proud to be seen with him, and tolerated by him, as if he had been the +drummer at the head of a procession or the elephant leading a menagerie +into town. Boys of his own size pretended not to know he had been away +at all; but they were consuming with envy, nevertheless. They would +have given anything to have that swarthy suntanned skin of his, and his +glittering notoriety; and Tom would not have parted with either for a +circus. + +At school the children made so much of him and of Joe, and delivered +such eloquent admiration from their eyes, that the two heroes were not +long in becoming insufferably "stuck-up." They began to tell their +adventures to hungry listeners--but they only began; it was not a thing +likely to have an end, with imaginations like theirs to furnish +material. And finally, when they got out their pipes and went serenely +puffing around, the very summit of glory was reached. + +Tom decided that he could be independent of Becky Thatcher now. Glory +was sufficient. He would live for glory. Now that he was distinguished, +maybe she would be wanting to "make up." Well, let her--she should see +that he could be as indifferent as some other people. Presently she +arrived. Tom pretended not to see her. He moved away and joined a group +of boys and girls and began to talk. Soon he observed that she was +tripping gayly back and forth with flushed face and dancing eyes, +pretending to be busy chasing schoolmates, and screaming with laughter +when she made a capture; but he noticed that she always made her +captures in his vicinity, and that she seemed to cast a conscious eye +in his direction at such times, too. It gratified all the vicious +vanity that was in him; and so, instead of winning him, it only "set +him up" the more and made him the more diligent to avoid betraying that +he knew she was about. Presently she gave over skylarking, and moved +irresolutely about, sighing once or twice and glancing furtively and +wistfully toward Tom. Then she observed that now Tom was talking more +particularly to Amy Lawrence than to any one else. She felt a sharp +pang and grew disturbed and uneasy at once. She tried to go away, but +her feet were treacherous, and carried her to the group instead. She +said to a girl almost at Tom's elbow--with sham vivacity: + +"Why, Mary Austin! you bad girl, why didn't you come to Sunday-school?" + +"I did come--didn't you see me?" + +"Why, no! Did you? Where did you sit?" + +"I was in Miss Peters' class, where I always go. I saw YOU." + +"Did you? Why, it's funny I didn't see you. I wanted to tell you about +the picnic." + +"Oh, that's jolly. Who's going to give it?" + +"My ma's going to let me have one." + +"Oh, goody; I hope she'll let ME come." + +"Well, she will. The picnic's for me. She'll let anybody come that I +want, and I want you." + +"That's ever so nice. When is it going to be?" + +"By and by. Maybe about vacation." + +"Oh, won't it be fun! You going to have all the girls and boys?" + +"Yes, every one that's friends to me--or wants to be"; and she glanced +ever so furtively at Tom, but he talked right along to Amy Lawrence +about the terrible storm on the island, and how the lightning tore the +great sycamore tree "all to flinders" while he was "standing within +three feet of it." + +"Oh, may I come?" said Grace Miller. + +"Yes." + +"And me?" said Sally Rogers. + +"Yes." + +"And me, too?" said Susy Harper. "And Joe?" + +"Yes." + +And so on, with clapping of joyful hands till all the group had begged +for invitations but Tom and Amy. Then Tom turned coolly away, still +talking, and took Amy with him. Becky's lips trembled and the tears +came to her eyes; she hid these signs with a forced gayety and went on +chattering, but the life had gone out of the picnic, now, and out of +everything else; she got away as soon as she could and hid herself and +had what her sex call "a good cry." Then she sat moody, with wounded +pride, till the bell rang. She roused up, now, with a vindictive cast +in her eye, and gave her plaited tails a shake and said she knew what +SHE'D do. + +At recess Tom continued his flirtation with Amy with jubilant +self-satisfaction. And he kept drifting about to find Becky and lacerate +her with the performance. At last he spied her, but there was a sudden +falling of his mercury. She was sitting cosily on a little bench behind +the schoolhouse looking at a picture-book with Alfred Temple--and so +absorbed were they, and their heads so close together over the book, +that they did not seem to be conscious of anything in the world besides. +Jealousy ran red-hot through Tom's veins. He began to hate himself for +throwing away the chance Becky had offered for a reconciliation. He +called himself a fool, and all the hard names he could think of. He +wanted to cry with vexation. Amy chatted happily along, as they walked, +for her heart was singing, but Tom's tongue had lost its function. He +did not hear what Amy was saying, and whenever she paused expectantly he +could only stammer an awkward assent, which was as often misplaced as +otherwise. He kept drifting to the rear of the schoolhouse, again and +again, to sear his eyeballs with the hateful spectacle there. He could +not help it. And it maddened him to see, as he thought he saw, that +Becky Thatcher never once suspected that he was even in the land of the +living. But she did see, nevertheless; and she knew she was winning her +fight, too, and was glad to see him suffer as she had suffered. + +Amy's happy prattle became intolerable. Tom hinted at things he had to +attend to; things that must be done; and time was fleeting. But in +vain--the girl chirped on. Tom thought, "Oh, hang her, ain't I ever +going to get rid of her?" At last he must be attending to those +things--and she said artlessly that she would be "around" when school +let out. And he hastened away, hating her for it. + +"Any other boy!" Tom thought, grating his teeth. "Any boy in the whole +town but that Saint Louis smarty that thinks he dresses so fine and is +aristocracy! Oh, all right, I licked you the first day you ever saw +this town, mister, and I'll lick you again! You just wait till I catch +you out! I'll just take and--" + +And he went through the motions of thrashing an imaginary boy +--pummelling the air, and kicking and gouging. "Oh, you do, do you? You +holler 'nough, do you? Now, then, let that learn you!" And so the +imaginary flogging was finished to his satisfaction. + +Tom fled home at noon. His conscience could not endure any more of +Amy's grateful happiness, and his jealousy could bear no more of the +other distress. Becky resumed her picture inspections with Alfred, but +as the minutes dragged along and no Tom came to suffer, her triumph +began to cloud and she lost interest; gravity and absent-mindedness +followed, and then melancholy; two or three times she pricked up her +ear at a footstep, but it was a false hope; no Tom came. At last she +grew entirely miserable and wished she hadn't carried it so far. When +poor Alfred, seeing that he was losing her, he did not know how, kept +exclaiming: "Oh, here's a jolly one! look at this!" she lost patience +at last, and said, "Oh, don't bother me! I don't care for them!" and +burst into tears, and got up and walked away. + +Alfred dropped alongside and was going to try to comfort her, but she +said: + +"Go away and leave me alone, can't you! I hate you!" + +So the boy halted, wondering what he could have done--for she had said +she would look at pictures all through the nooning--and she walked on, +crying. Then Alfred went musing into the deserted schoolhouse. He was +humiliated and angry. He easily guessed his way to the truth--the girl +had simply made a convenience of him to vent her spite upon Tom Sawyer. +He was far from hating Tom the less when this thought occurred to him. +He wished there was some way to get that boy into trouble without much +risk to himself. Tom's spelling-book fell under his eye. Here was his +opportunity. He gratefully opened to the lesson for the afternoon and +poured ink upon the page. + +Becky, glancing in at a window behind him at the moment, saw the act, +and moved on, without discovering herself. She started homeward, now, +intending to find Tom and tell him; Tom would be thankful and their +troubles would be healed. Before she was half way home, however, she +had changed her mind. The thought of Tom's treatment of her when she +was talking about her picnic came scorching back and filled her with +shame. She resolved to let him get whipped on the damaged +spelling-book's account, and to hate him forever, into the bargain. + + + +CHAPTER XIX + +TOM arrived at home in a dreary mood, and the first thing his aunt +said to him showed him that he had brought his sorrows to an +unpromising market: + +"Tom, I've a notion to skin you alive!" + +"Auntie, what have I done?" + +"Well, you've done enough. Here I go over to Sereny Harper, like an +old softy, expecting I'm going to make her believe all that rubbage +about that dream, when lo and behold you she'd found out from Joe that +you was over here and heard all the talk we had that night. Tom, I +don't know what is to become of a boy that will act like that. It makes +me feel so bad to think you could let me go to Sereny Harper and make +such a fool of myself and never say a word." + +This was a new aspect of the thing. His smartness of the morning had +seemed to Tom a good joke before, and very ingenious. It merely looked +mean and shabby now. He hung his head and could not think of anything +to say for a moment. Then he said: + +"Auntie, I wish I hadn't done it--but I didn't think." + +"Oh, child, you never think. You never think of anything but your own +selfishness. You could think to come all the way over here from +Jackson's Island in the night to laugh at our troubles, and you could +think to fool me with a lie about a dream; but you couldn't ever think +to pity us and save us from sorrow." + +"Auntie, I know now it was mean, but I didn't mean to be mean. I +didn't, honest. And besides, I didn't come over here to laugh at you +that night." + +"What did you come for, then?" + +"It was to tell you not to be uneasy about us, because we hadn't got +drownded." + +"Tom, Tom, I would be the thankfullest soul in this world if I could +believe you ever had as good a thought as that, but you know you never +did--and I know it, Tom." + +"Indeed and 'deed I did, auntie--I wish I may never stir if I didn't." + +"Oh, Tom, don't lie--don't do it. It only makes things a hundred times +worse." + +"It ain't a lie, auntie; it's the truth. I wanted to keep you from +grieving--that was all that made me come." + +"I'd give the whole world to believe that--it would cover up a power +of sins, Tom. I'd 'most be glad you'd run off and acted so bad. But it +ain't reasonable; because, why didn't you tell me, child?" + +"Why, you see, when you got to talking about the funeral, I just got +all full of the idea of our coming and hiding in the church, and I +couldn't somehow bear to spoil it. So I just put the bark back in my +pocket and kept mum." + +"What bark?" + +"The bark I had wrote on to tell you we'd gone pirating. I wish, now, +you'd waked up when I kissed you--I do, honest." + +The hard lines in his aunt's face relaxed and a sudden tenderness +dawned in her eyes. + +"DID you kiss me, Tom?" + +"Why, yes, I did." + +"Are you sure you did, Tom?" + +"Why, yes, I did, auntie--certain sure." + +"What did you kiss me for, Tom?" + +"Because I loved you so, and you laid there moaning and I was so sorry." + +The words sounded like truth. The old lady could not hide a tremor in +her voice when she said: + +"Kiss me again, Tom!--and be off with you to school, now, and don't +bother me any more." + +The moment he was gone, she ran to a closet and got out the ruin of a +jacket which Tom had gone pirating in. Then she stopped, with it in her +hand, and said to herself: + +"No, I don't dare. Poor boy, I reckon he's lied about it--but it's a +blessed, blessed lie, there's such a comfort come from it. I hope the +Lord--I KNOW the Lord will forgive him, because it was such +goodheartedness in him to tell it. But I don't want to find out it's a +lie. I won't look." + +She put the jacket away, and stood by musing a minute. Twice she put +out her hand to take the garment again, and twice she refrained. Once +more she ventured, and this time she fortified herself with the +thought: "It's a good lie--it's a good lie--I won't let it grieve me." +So she sought the jacket pocket. A moment later she was reading Tom's +piece of bark through flowing tears and saying: "I could forgive the +boy, now, if he'd committed a million sins!" + + + +CHAPTER XX + +THERE was something about Aunt Polly's manner, when she kissed Tom, +that swept away his low spirits and made him lighthearted and happy +again. He started to school and had the luck of coming upon Becky +Thatcher at the head of Meadow Lane. His mood always determined his +manner. Without a moment's hesitation he ran to her and said: + +"I acted mighty mean to-day, Becky, and I'm so sorry. I won't ever, +ever do that way again, as long as ever I live--please make up, won't +you?" + +The girl stopped and looked him scornfully in the face: + +"I'll thank you to keep yourself TO yourself, Mr. Thomas Sawyer. I'll +never speak to you again." + +She tossed her head and passed on. Tom was so stunned that he had not +even presence of mind enough to say "Who cares, Miss Smarty?" until the +right time to say it had gone by. So he said nothing. But he was in a +fine rage, nevertheless. He moped into the schoolyard wishing she were +a boy, and imagining how he would trounce her if she were. He presently +encountered her and delivered a stinging remark as he passed. She +hurled one in return, and the angry breach was complete. It seemed to +Becky, in her hot resentment, that she could hardly wait for school to +"take in," she was so impatient to see Tom flogged for the injured +spelling-book. If she had had any lingering notion of exposing Alfred +Temple, Tom's offensive fling had driven it entirely away. + +Poor girl, she did not know how fast she was nearing trouble herself. +The master, Mr. Dobbins, had reached middle age with an unsatisfied +ambition. The darling of his desires was, to be a doctor, but poverty +had decreed that he should be nothing higher than a village +schoolmaster. Every day he took a mysterious book out of his desk and +absorbed himself in it at times when no classes were reciting. He kept +that book under lock and key. There was not an urchin in school but was +perishing to have a glimpse of it, but the chance never came. Every boy +and girl had a theory about the nature of that book; but no two +theories were alike, and there was no way of getting at the facts in +the case. Now, as Becky was passing by the desk, which stood near the +door, she noticed that the key was in the lock! It was a precious +moment. She glanced around; found herself alone, and the next instant +she had the book in her hands. The title-page--Professor Somebody's +ANATOMY--carried no information to her mind; so she began to turn the +leaves. She came at once upon a handsomely engraved and colored +frontispiece--a human figure, stark naked. At that moment a shadow fell +on the page and Tom Sawyer stepped in at the door and caught a glimpse +of the picture. Becky snatched at the book to close it, and had the +hard luck to tear the pictured page half down the middle. She thrust +the volume into the desk, turned the key, and burst out crying with +shame and vexation. + +"Tom Sawyer, you are just as mean as you can be, to sneak up on a +person and look at what they're looking at." + +"How could I know you was looking at anything?" + +"You ought to be ashamed of yourself, Tom Sawyer; you know you're +going to tell on me, and oh, what shall I do, what shall I do! I'll be +whipped, and I never was whipped in school." + +Then she stamped her little foot and said: + +"BE so mean if you want to! I know something that's going to happen. +You just wait and you'll see! Hateful, hateful, hateful!"--and she +flung out of the house with a new explosion of crying. + +Tom stood still, rather flustered by this onslaught. Presently he said +to himself: + +"What a curious kind of a fool a girl is! Never been licked in school! +Shucks! What's a licking! That's just like a girl--they're so +thin-skinned and chicken-hearted. Well, of course I ain't going to tell +old Dobbins on this little fool, because there's other ways of getting +even on her, that ain't so mean; but what of it? Old Dobbins will ask +who it was tore his book. Nobody'll answer. Then he'll do just the way +he always does--ask first one and then t'other, and when he comes to the +right girl he'll know it, without any telling. Girls' faces always tell +on them. They ain't got any backbone. She'll get licked. Well, it's a +kind of a tight place for Becky Thatcher, because there ain't any way +out of it." Tom conned the thing a moment longer, and then added: "All +right, though; she'd like to see me in just such a fix--let her sweat it +out!" + +Tom joined the mob of skylarking scholars outside. In a few moments +the master arrived and school "took in." Tom did not feel a strong +interest in his studies. Every time he stole a glance at the girls' +side of the room Becky's face troubled him. Considering all things, he +did not want to pity her, and yet it was all he could do to help it. He +could get up no exultation that was really worthy the name. Presently +the spelling-book discovery was made, and Tom's mind was entirely full +of his own matters for a while after that. Becky roused up from her +lethargy of distress and showed good interest in the proceedings. She +did not expect that Tom could get out of his trouble by denying that he +spilt the ink on the book himself; and she was right. The denial only +seemed to make the thing worse for Tom. Becky supposed she would be +glad of that, and she tried to believe she was glad of it, but she +found she was not certain. When the worst came to the worst, she had an +impulse to get up and tell on Alfred Temple, but she made an effort and +forced herself to keep still--because, said she to herself, "he'll tell +about me tearing the picture sure. I wouldn't say a word, not to save +his life!" + +Tom took his whipping and went back to his seat not at all +broken-hearted, for he thought it was possible that he had unknowingly +upset the ink on the spelling-book himself, in some skylarking bout--he +had denied it for form's sake and because it was custom, and had stuck +to the denial from principle. + +A whole hour drifted by, the master sat nodding in his throne, the air +was drowsy with the hum of study. By and by, Mr. Dobbins straightened +himself up, yawned, then unlocked his desk, and reached for his book, +but seemed undecided whether to take it out or leave it. Most of the +pupils glanced up languidly, but there were two among them that watched +his movements with intent eyes. Mr. Dobbins fingered his book absently +for a while, then took it out and settled himself in his chair to read! +Tom shot a glance at Becky. He had seen a hunted and helpless rabbit +look as she did, with a gun levelled at its head. Instantly he forgot +his quarrel with her. Quick--something must be done! done in a flash, +too! But the very imminence of the emergency paralyzed his invention. +Good!--he had an inspiration! He would run and snatch the book, spring +through the door and fly. But his resolution shook for one little +instant, and the chance was lost--the master opened the volume. If Tom +only had the wasted opportunity back again! Too late. There was no help +for Becky now, he said. The next moment the master faced the school. +Every eye sank under his gaze. There was that in it which smote even +the innocent with fear. There was silence while one might count ten +--the master was gathering his wrath. Then he spoke: "Who tore this book?" + +There was not a sound. One could have heard a pin drop. The stillness +continued; the master searched face after face for signs of guilt. + +"Benjamin Rogers, did you tear this book?" + +A denial. Another pause. + +"Joseph Harper, did you?" + +Another denial. Tom's uneasiness grew more and more intense under the +slow torture of these proceedings. The master scanned the ranks of +boys--considered a while, then turned to the girls: + +"Amy Lawrence?" + +A shake of the head. + +"Gracie Miller?" + +The same sign. + +"Susan Harper, did you do this?" + +Another negative. The next girl was Becky Thatcher. Tom was trembling +from head to foot with excitement and a sense of the hopelessness of +the situation. + +"Rebecca Thatcher" [Tom glanced at her face--it was white with terror] +--"did you tear--no, look me in the face" [her hands rose in appeal] +--"did you tear this book?" + +A thought shot like lightning through Tom's brain. He sprang to his +feet and shouted--"I done it!" + +The school stared in perplexity at this incredible folly. Tom stood a +moment, to gather his dismembered faculties; and when he stepped +forward to go to his punishment the surprise, the gratitude, the +adoration that shone upon him out of poor Becky's eyes seemed pay +enough for a hundred floggings. Inspired by the splendor of his own +act, he took without an outcry the most merciless flaying that even Mr. +Dobbins had ever administered; and also received with indifference the +added cruelty of a command to remain two hours after school should be +dismissed--for he knew who would wait for him outside till his +captivity was done, and not count the tedious time as loss, either. + +Tom went to bed that night planning vengeance against Alfred Temple; +for with shame and repentance Becky had told him all, not forgetting +her own treachery; but even the longing for vengeance had to give way, +soon, to pleasanter musings, and he fell asleep at last with Becky's +latest words lingering dreamily in his ear-- + +"Tom, how COULD you be so noble!" + + + +CHAPTER XXI + +VACATION was approaching. The schoolmaster, always severe, grew +severer and more exacting than ever, for he wanted the school to make a +good showing on "Examination" day. His rod and his ferule were seldom +idle now--at least among the smaller pupils. Only the biggest boys, and +young ladies of eighteen and twenty, escaped lashing. Mr. Dobbins' +lashings were very vigorous ones, too; for although he carried, under +his wig, a perfectly bald and shiny head, he had only reached middle +age, and there was no sign of feebleness in his muscle. As the great +day approached, all the tyranny that was in him came to the surface; he +seemed to take a vindictive pleasure in punishing the least +shortcomings. The consequence was, that the smaller boys spent their +days in terror and suffering and their nights in plotting revenge. They +threw away no opportunity to do the master a mischief. But he kept +ahead all the time. The retribution that followed every vengeful +success was so sweeping and majestic that the boys always retired from +the field badly worsted. At last they conspired together and hit upon a +plan that promised a dazzling victory. They swore in the sign-painter's +boy, told him the scheme, and asked his help. He had his own reasons +for being delighted, for the master boarded in his father's family and +had given the boy ample cause to hate him. The master's wife would go +on a visit to the country in a few days, and there would be nothing to +interfere with the plan; the master always prepared himself for great +occasions by getting pretty well fuddled, and the sign-painter's boy +said that when the dominie had reached the proper condition on +Examination Evening he would "manage the thing" while he napped in his +chair; then he would have him awakened at the right time and hurried +away to school. + +In the fulness of time the interesting occasion arrived. At eight in +the evening the schoolhouse was brilliantly lighted, and adorned with +wreaths and festoons of foliage and flowers. The master sat throned in +his great chair upon a raised platform, with his blackboard behind him. +He was looking tolerably mellow. Three rows of benches on each side and +six rows in front of him were occupied by the dignitaries of the town +and by the parents of the pupils. To his left, back of the rows of +citizens, was a spacious temporary platform upon which were seated the +scholars who were to take part in the exercises of the evening; rows of +small boys, washed and dressed to an intolerable state of discomfort; +rows of gawky big boys; snowbanks of girls and young ladies clad in +lawn and muslin and conspicuously conscious of their bare arms, their +grandmothers' ancient trinkets, their bits of pink and blue ribbon and +the flowers in their hair. All the rest of the house was filled with +non-participating scholars. + +The exercises began. A very little boy stood up and sheepishly +recited, "You'd scarce expect one of my age to speak in public on the +stage," etc.--accompanying himself with the painfully exact and +spasmodic gestures which a machine might have used--supposing the +machine to be a trifle out of order. But he got through safely, though +cruelly scared, and got a fine round of applause when he made his +manufactured bow and retired. + +A little shamefaced girl lisped, "Mary had a little lamb," etc., +performed a compassion-inspiring curtsy, got her meed of applause, and +sat down flushed and happy. + +Tom Sawyer stepped forward with conceited confidence and soared into +the unquenchable and indestructible "Give me liberty or give me death" +speech, with fine fury and frantic gesticulation, and broke down in the +middle of it. A ghastly stage-fright seized him, his legs quaked under +him and he was like to choke. True, he had the manifest sympathy of the +house but he had the house's silence, too, which was even worse than +its sympathy. The master frowned, and this completed the disaster. Tom +struggled awhile and then retired, utterly defeated. There was a weak +attempt at applause, but it died early. + +"The Boy Stood on the Burning Deck" followed; also "The Assyrian Came +Down," and other declamatory gems. Then there were reading exercises, +and a spelling fight. The meagre Latin class recited with honor. The +prime feature of the evening was in order, now--original "compositions" +by the young ladies. Each in her turn stepped forward to the edge of +the platform, cleared her throat, held up her manuscript (tied with +dainty ribbon), and proceeded to read, with labored attention to +"expression" and punctuation. The themes were the same that had been +illuminated upon similar occasions by their mothers before them, their +grandmothers, and doubtless all their ancestors in the female line +clear back to the Crusades. "Friendship" was one; "Memories of Other +Days"; "Religion in History"; "Dream Land"; "The Advantages of +Culture"; "Forms of Political Government Compared and Contrasted"; +"Melancholy"; "Filial Love"; "Heart Longings," etc., etc. + +A prevalent feature in these compositions was a nursed and petted +melancholy; another was a wasteful and opulent gush of "fine language"; +another was a tendency to lug in by the ears particularly prized words +and phrases until they were worn entirely out; and a peculiarity that +conspicuously marked and marred them was the inveterate and intolerable +sermon that wagged its crippled tail at the end of each and every one +of them. No matter what the subject might be, a brain-racking effort +was made to squirm it into some aspect or other that the moral and +religious mind could contemplate with edification. The glaring +insincerity of these sermons was not sufficient to compass the +banishment of the fashion from the schools, and it is not sufficient +to-day; it never will be sufficient while the world stands, perhaps. +There is no school in all our land where the young ladies do not feel +obliged to close their compositions with a sermon; and you will find +that the sermon of the most frivolous and the least religious girl in +the school is always the longest and the most relentlessly pious. But +enough of this. Homely truth is unpalatable. + +Let us return to the "Examination." The first composition that was +read was one entitled "Is this, then, Life?" Perhaps the reader can +endure an extract from it: + + "In the common walks of life, with what delightful + emotions does the youthful mind look forward to some + anticipated scene of festivity! Imagination is busy + sketching rose-tinted pictures of joy. In fancy, the + voluptuous votary of fashion sees herself amid the + festive throng, 'the observed of all observers.' Her + graceful form, arrayed in snowy robes, is whirling + through the mazes of the joyous dance; her eye is + brightest, her step is lightest in the gay assembly. + + "In such delicious fancies time quickly glides by, + and the welcome hour arrives for her entrance into + the Elysian world, of which she has had such bright + dreams. How fairy-like does everything appear to + her enchanted vision! Each new scene is more charming + than the last. But after a while she finds that + beneath this goodly exterior, all is vanity, the + flattery which once charmed her soul, now grates + harshly upon her ear; the ball-room has lost its + charms; and with wasted health and imbittered heart, + she turns away with the conviction that earthly + pleasures cannot satisfy the longings of the soul!" + +And so forth and so on. There was a buzz of gratification from time to +time during the reading, accompanied by whispered ejaculations of "How +sweet!" "How eloquent!" "So true!" etc., and after the thing had closed +with a peculiarly afflicting sermon the applause was enthusiastic. + +Then arose a slim, melancholy girl, whose face had the "interesting" +paleness that comes of pills and indigestion, and read a "poem." Two +stanzas of it will do: + + "A MISSOURI MAIDEN'S FAREWELL TO ALABAMA + + "Alabama, good-bye! I love thee well! + But yet for a while do I leave thee now! + Sad, yes, sad thoughts of thee my heart doth swell, + And burning recollections throng my brow! + For I have wandered through thy flowery woods; + Have roamed and read near Tallapoosa's stream; + Have listened to Tallassee's warring floods, + And wooed on Coosa's side Aurora's beam. + + "Yet shame I not to bear an o'er-full heart, + Nor blush to turn behind my tearful eyes; + 'Tis from no stranger land I now must part, + 'Tis to no strangers left I yield these sighs. + Welcome and home were mine within this State, + Whose vales I leave--whose spires fade fast from me + And cold must be mine eyes, and heart, and tete, + When, dear Alabama! they turn cold on thee!" + +There were very few there who knew what "tete" meant, but the poem was +very satisfactory, nevertheless. + +Next appeared a dark-complexioned, black-eyed, black-haired young +lady, who paused an impressive moment, assumed a tragic expression, and +began to read in a measured, solemn tone: + + "A VISION + + "Dark and tempestuous was night. Around the + throne on high not a single star quivered; but + the deep intonations of the heavy thunder + constantly vibrated upon the ear; whilst the + terrific lightning revelled in angry mood + through the cloudy chambers of heaven, seeming + to scorn the power exerted over its terror by + the illustrious Franklin! Even the boisterous + winds unanimously came forth from their mystic + homes, and blustered about as if to enhance by + their aid the wildness of the scene. + + "At such a time, so dark, so dreary, for human + sympathy my very spirit sighed; but instead thereof, + + "'My dearest friend, my counsellor, my comforter + and guide--My joy in grief, my second bliss + in joy,' came to my side. She moved like one of + those bright beings pictured in the sunny walks + of fancy's Eden by the romantic and young, a + queen of beauty unadorned save by her own + transcendent loveliness. So soft was her step, it + failed to make even a sound, and but for the + magical thrill imparted by her genial touch, as + other unobtrusive beauties, she would have glided + away un-perceived--unsought. A strange sadness + rested upon her features, like icy tears upon + the robe of December, as she pointed to the + contending elements without, and bade me contemplate + the two beings presented." + +This nightmare occupied some ten pages of manuscript and wound up with +a sermon so destructive of all hope to non-Presbyterians that it took +the first prize. This composition was considered to be the very finest +effort of the evening. The mayor of the village, in delivering the +prize to the author of it, made a warm speech in which he said that it +was by far the most "eloquent" thing he had ever listened to, and that +Daniel Webster himself might well be proud of it. + +It may be remarked, in passing, that the number of compositions in +which the word "beauteous" was over-fondled, and human experience +referred to as "life's page," was up to the usual average. + +Now the master, mellow almost to the verge of geniality, put his chair +aside, turned his back to the audience, and began to draw a map of +America on the blackboard, to exercise the geography class upon. But he +made a sad business of it with his unsteady hand, and a smothered +titter rippled over the house. He knew what the matter was, and set +himself to right it. He sponged out lines and remade them; but he only +distorted them more than ever, and the tittering was more pronounced. +He threw his entire attention upon his work, now, as if determined not +to be put down by the mirth. He felt that all eyes were fastened upon +him; he imagined he was succeeding, and yet the tittering continued; it +even manifestly increased. And well it might. There was a garret above, +pierced with a scuttle over his head; and down through this scuttle +came a cat, suspended around the haunches by a string; she had a rag +tied about her head and jaws to keep her from mewing; as she slowly +descended she curved upward and clawed at the string, she swung +downward and clawed at the intangible air. The tittering rose higher +and higher--the cat was within six inches of the absorbed teacher's +head--down, down, a little lower, and she grabbed his wig with her +desperate claws, clung to it, and was snatched up into the garret in an +instant with her trophy still in her possession! And how the light did +blaze abroad from the master's bald pate--for the sign-painter's boy +had GILDED it! + +That broke up the meeting. The boys were avenged. Vacation had come. + + NOTE:--The pretended "compositions" quoted in + this chapter are taken without alteration from a + volume entitled "Prose and Poetry, by a Western + Lady"--but they are exactly and precisely after + the schoolgirl pattern, and hence are much + happier than any mere imitations could be. + + + +CHAPTER XXII + +TOM joined the new order of Cadets of Temperance, being attracted by +the showy character of their "regalia." He promised to abstain from +smoking, chewing, and profanity as long as he remained a member. Now he +found out a new thing--namely, that to promise not to do a thing is the +surest way in the world to make a body want to go and do that very +thing. Tom soon found himself tormented with a desire to drink and +swear; the desire grew to be so intense that nothing but the hope of a +chance to display himself in his red sash kept him from withdrawing +from the order. Fourth of July was coming; but he soon gave that up +--gave it up before he had worn his shackles over forty-eight hours--and +fixed his hopes upon old Judge Frazer, justice of the peace, who was +apparently on his deathbed and would have a big public funeral, since +he was so high an official. During three days Tom was deeply concerned +about the Judge's condition and hungry for news of it. Sometimes his +hopes ran high--so high that he would venture to get out his regalia +and practise before the looking-glass. But the Judge had a most +discouraging way of fluctuating. At last he was pronounced upon the +mend--and then convalescent. Tom was disgusted; and felt a sense of +injury, too. He handed in his resignation at once--and that night the +Judge suffered a relapse and died. Tom resolved that he would never +trust a man like that again. + +The funeral was a fine thing. The Cadets paraded in a style calculated +to kill the late member with envy. Tom was a free boy again, however +--there was something in that. He could drink and swear, now--but found +to his surprise that he did not want to. The simple fact that he could, +took the desire away, and the charm of it. + +Tom presently wondered to find that his coveted vacation was beginning +to hang a little heavily on his hands. + +He attempted a diary--but nothing happened during three days, and so +he abandoned it. + +The first of all the negro minstrel shows came to town, and made a +sensation. Tom and Joe Harper got up a band of performers and were +happy for two days. + +Even the Glorious Fourth was in some sense a failure, for it rained +hard, there was no procession in consequence, and the greatest man in +the world (as Tom supposed), Mr. Benton, an actual United States +Senator, proved an overwhelming disappointment--for he was not +twenty-five feet high, nor even anywhere in the neighborhood of it. + +A circus came. The boys played circus for three days afterward in +tents made of rag carpeting--admission, three pins for boys, two for +girls--and then circusing was abandoned. + +A phrenologist and a mesmerizer came--and went again and left the +village duller and drearier than ever. + +There were some boys-and-girls' parties, but they were so few and so +delightful that they only made the aching voids between ache the harder. + +Becky Thatcher was gone to her Constantinople home to stay with her +parents during vacation--so there was no bright side to life anywhere. + +The dreadful secret of the murder was a chronic misery. It was a very +cancer for permanency and pain. + +Then came the measles. + +During two long weeks Tom lay a prisoner, dead to the world and its +happenings. He was very ill, he was interested in nothing. When he got +upon his feet at last and moved feebly down-town, a melancholy change +had come over everything and every creature. There had been a +"revival," and everybody had "got religion," not only the adults, but +even the boys and girls. Tom went about, hoping against hope for the +sight of one blessed sinful face, but disappointment crossed him +everywhere. He found Joe Harper studying a Testament, and turned sadly +away from the depressing spectacle. He sought Ben Rogers, and found him +visiting the poor with a basket of tracts. He hunted up Jim Hollis, who +called his attention to the precious blessing of his late measles as a +warning. Every boy he encountered added another ton to his depression; +and when, in desperation, he flew for refuge at last to the bosom of +Huckleberry Finn and was received with a Scriptural quotation, his +heart broke and he crept home and to bed realizing that he alone of all +the town was lost, forever and forever. + +And that night there came on a terrific storm, with driving rain, +awful claps of thunder and blinding sheets of lightning. He covered his +head with the bedclothes and waited in a horror of suspense for his +doom; for he had not the shadow of a doubt that all this hubbub was +about him. He believed he had taxed the forbearance of the powers above +to the extremity of endurance and that this was the result. It might +have seemed to him a waste of pomp and ammunition to kill a bug with a +battery of artillery, but there seemed nothing incongruous about the +getting up such an expensive thunderstorm as this to knock the turf +from under an insect like himself. + +By and by the tempest spent itself and died without accomplishing its +object. The boy's first impulse was to be grateful, and reform. His +second was to wait--for there might not be any more storms. + +The next day the doctors were back; Tom had relapsed. The three weeks +he spent on his back this time seemed an entire age. When he got abroad +at last he was hardly grateful that he had been spared, remembering how +lonely was his estate, how companionless and forlorn he was. He drifted +listlessly down the street and found Jim Hollis acting as judge in a +juvenile court that was trying a cat for murder, in the presence of her +victim, a bird. He found Joe Harper and Huck Finn up an alley eating a +stolen melon. Poor lads! they--like Tom--had suffered a relapse. + + + +CHAPTER XXIII + +AT last the sleepy atmosphere was stirred--and vigorously: the murder +trial came on in the court. It became the absorbing topic of village +talk immediately. Tom could not get away from it. Every reference to +the murder sent a shudder to his heart, for his troubled conscience and +fears almost persuaded him that these remarks were put forth in his +hearing as "feelers"; he did not see how he could be suspected of +knowing anything about the murder, but still he could not be +comfortable in the midst of this gossip. It kept him in a cold shiver +all the time. He took Huck to a lonely place to have a talk with him. +It would be some relief to unseal his tongue for a little while; to +divide his burden of distress with another sufferer. Moreover, he +wanted to assure himself that Huck had remained discreet. + +"Huck, have you ever told anybody about--that?" + +"'Bout what?" + +"You know what." + +"Oh--'course I haven't." + +"Never a word?" + +"Never a solitary word, so help me. What makes you ask?" + +"Well, I was afeard." + +"Why, Tom Sawyer, we wouldn't be alive two days if that got found out. +YOU know that." + +Tom felt more comfortable. After a pause: + +"Huck, they couldn't anybody get you to tell, could they?" + +"Get me to tell? Why, if I wanted that half-breed devil to drownd me +they could get me to tell. They ain't no different way." + +"Well, that's all right, then. I reckon we're safe as long as we keep +mum. But let's swear again, anyway. It's more surer." + +"I'm agreed." + +So they swore again with dread solemnities. + +"What is the talk around, Huck? I've heard a power of it." + +"Talk? Well, it's just Muff Potter, Muff Potter, Muff Potter all the +time. It keeps me in a sweat, constant, so's I want to hide som'ers." + +"That's just the same way they go on round me. I reckon he's a goner. +Don't you feel sorry for him, sometimes?" + +"Most always--most always. He ain't no account; but then he hain't +ever done anything to hurt anybody. Just fishes a little, to get money +to get drunk on--and loafs around considerable; but lord, we all do +that--leastways most of us--preachers and such like. But he's kind of +good--he give me half a fish, once, when there warn't enough for two; +and lots of times he's kind of stood by me when I was out of luck." + +"Well, he's mended kites for me, Huck, and knitted hooks on to my +line. I wish we could get him out of there." + +"My! we couldn't get him out, Tom. And besides, 'twouldn't do any +good; they'd ketch him again." + +"Yes--so they would. But I hate to hear 'em abuse him so like the +dickens when he never done--that." + +"I do too, Tom. Lord, I hear 'em say he's the bloodiest looking +villain in this country, and they wonder he wasn't ever hung before." + +"Yes, they talk like that, all the time. I've heard 'em say that if he +was to get free they'd lynch him." + +"And they'd do it, too." + +The boys had a long talk, but it brought them little comfort. As the +twilight drew on, they found themselves hanging about the neighborhood +of the little isolated jail, perhaps with an undefined hope that +something would happen that might clear away their difficulties. But +nothing happened; there seemed to be no angels or fairies interested in +this luckless captive. + +The boys did as they had often done before--went to the cell grating +and gave Potter some tobacco and matches. He was on the ground floor +and there were no guards. + +His gratitude for their gifts had always smote their consciences +before--it cut deeper than ever, this time. They felt cowardly and +treacherous to the last degree when Potter said: + +"You've been mighty good to me, boys--better'n anybody else in this +town. And I don't forget it, I don't. Often I says to myself, says I, +'I used to mend all the boys' kites and things, and show 'em where the +good fishin' places was, and befriend 'em what I could, and now they've +all forgot old Muff when he's in trouble; but Tom don't, and Huck +don't--THEY don't forget him, says I, 'and I don't forget them.' Well, +boys, I done an awful thing--drunk and crazy at the time--that's the +only way I account for it--and now I got to swing for it, and it's +right. Right, and BEST, too, I reckon--hope so, anyway. Well, we won't +talk about that. I don't want to make YOU feel bad; you've befriended +me. But what I want to say, is, don't YOU ever get drunk--then you won't +ever get here. Stand a litter furder west--so--that's it; it's a prime +comfort to see faces that's friendly when a body's in such a muck of +trouble, and there don't none come here but yourn. Good friendly +faces--good friendly faces. Git up on one another's backs and let me +touch 'em. That's it. Shake hands--yourn'll come through the bars, but +mine's too big. Little hands, and weak--but they've helped Muff Potter +a power, and they'd help him more if they could." + +Tom went home miserable, and his dreams that night were full of +horrors. The next day and the day after, he hung about the court-room, +drawn by an almost irresistible impulse to go in, but forcing himself +to stay out. Huck was having the same experience. They studiously +avoided each other. Each wandered away, from time to time, but the same +dismal fascination always brought them back presently. Tom kept his +ears open when idlers sauntered out of the court-room, but invariably +heard distressing news--the toils were closing more and more +relentlessly around poor Potter. At the end of the second day the +village talk was to the effect that Injun Joe's evidence stood firm and +unshaken, and that there was not the slightest question as to what the +jury's verdict would be. + +Tom was out late, that night, and came to bed through the window. He +was in a tremendous state of excitement. It was hours before he got to +sleep. All the village flocked to the court-house the next morning, for +this was to be the great day. Both sexes were about equally represented +in the packed audience. After a long wait the jury filed in and took +their places; shortly afterward, Potter, pale and haggard, timid and +hopeless, was brought in, with chains upon him, and seated where all +the curious eyes could stare at him; no less conspicuous was Injun Joe, +stolid as ever. There was another pause, and then the judge arrived and +the sheriff proclaimed the opening of the court. The usual whisperings +among the lawyers and gathering together of papers followed. These +details and accompanying delays worked up an atmosphere of preparation +that was as impressive as it was fascinating. + +Now a witness was called who testified that he found Muff Potter +washing in the brook, at an early hour of the morning that the murder +was discovered, and that he immediately sneaked away. After some +further questioning, counsel for the prosecution said: + +"Take the witness." + +The prisoner raised his eyes for a moment, but dropped them again when +his own counsel said: + +"I have no questions to ask him." + +The next witness proved the finding of the knife near the corpse. +Counsel for the prosecution said: + +"Take the witness." + +"I have no questions to ask him," Potter's lawyer replied. + +A third witness swore he had often seen the knife in Potter's +possession. + +"Take the witness." + +Counsel for Potter declined to question him. The faces of the audience +began to betray annoyance. Did this attorney mean to throw away his +client's life without an effort? + +Several witnesses deposed concerning Potter's guilty behavior when +brought to the scene of the murder. They were allowed to leave the +stand without being cross-questioned. + +Every detail of the damaging circumstances that occurred in the +graveyard upon that morning which all present remembered so well was +brought out by credible witnesses, but none of them were cross-examined +by Potter's lawyer. The perplexity and dissatisfaction of the house +expressed itself in murmurs and provoked a reproof from the bench. +Counsel for the prosecution now said: + +"By the oaths of citizens whose simple word is above suspicion, we +have fastened this awful crime, beyond all possibility of question, +upon the unhappy prisoner at the bar. We rest our case here." + +A groan escaped from poor Potter, and he put his face in his hands and +rocked his body softly to and fro, while a painful silence reigned in +the court-room. Many men were moved, and many women's compassion +testified itself in tears. Counsel for the defence rose and said: + +"Your honor, in our remarks at the opening of this trial, we +foreshadowed our purpose to prove that our client did this fearful deed +while under the influence of a blind and irresponsible delirium +produced by drink. We have changed our mind. We shall not offer that +plea." [Then to the clerk:] "Call Thomas Sawyer!" + +A puzzled amazement awoke in every face in the house, not even +excepting Potter's. Every eye fastened itself with wondering interest +upon Tom as he rose and took his place upon the stand. The boy looked +wild enough, for he was badly scared. The oath was administered. + +"Thomas Sawyer, where were you on the seventeenth of June, about the +hour of midnight?" + +Tom glanced at Injun Joe's iron face and his tongue failed him. The +audience listened breathless, but the words refused to come. After a +few moments, however, the boy got a little of his strength back, and +managed to put enough of it into his voice to make part of the house +hear: + +"In the graveyard!" + +"A little bit louder, please. Don't be afraid. You were--" + +"In the graveyard." + +A contemptuous smile flitted across Injun Joe's face. + +"Were you anywhere near Horse Williams' grave?" + +"Yes, sir." + +"Speak up--just a trifle louder. How near were you?" + +"Near as I am to you." + +"Were you hidden, or not?" + +"I was hid." + +"Where?" + +"Behind the elms that's on the edge of the grave." + +Injun Joe gave a barely perceptible start. + +"Any one with you?" + +"Yes, sir. I went there with--" + +"Wait--wait a moment. Never mind mentioning your companion's name. We +will produce him at the proper time. Did you carry anything there with +you." + +Tom hesitated and looked confused. + +"Speak out, my boy--don't be diffident. The truth is always +respectable. What did you take there?" + +"Only a--a--dead cat." + +There was a ripple of mirth, which the court checked. + +"We will produce the skeleton of that cat. Now, my boy, tell us +everything that occurred--tell it in your own way--don't skip anything, +and don't be afraid." + +Tom began--hesitatingly at first, but as he warmed to his subject his +words flowed more and more easily; in a little while every sound ceased +but his own voice; every eye fixed itself upon him; with parted lips +and bated breath the audience hung upon his words, taking no note of +time, rapt in the ghastly fascinations of the tale. The strain upon +pent emotion reached its climax when the boy said: + +"--and as the doctor fetched the board around and Muff Potter fell, +Injun Joe jumped with the knife and--" + +Crash! Quick as lightning the half-breed sprang for a window, tore his +way through all opposers, and was gone! + + + +CHAPTER XXIV + +TOM was a glittering hero once more--the pet of the old, the envy of +the young. His name even went into immortal print, for the village +paper magnified him. There were some that believed he would be +President, yet, if he escaped hanging. + +As usual, the fickle, unreasoning world took Muff Potter to its bosom +and fondled him as lavishly as it had abused him before. But that sort +of conduct is to the world's credit; therefore it is not well to find +fault with it. + +Tom's days were days of splendor and exultation to him, but his nights +were seasons of horror. Injun Joe infested all his dreams, and always +with doom in his eye. Hardly any temptation could persuade the boy to +stir abroad after nightfall. Poor Huck was in the same state of +wretchedness and terror, for Tom had told the whole story to the lawyer +the night before the great day of the trial, and Huck was sore afraid +that his share in the business might leak out, yet, notwithstanding +Injun Joe's flight had saved him the suffering of testifying in court. +The poor fellow had got the attorney to promise secrecy, but what of +that? Since Tom's harassed conscience had managed to drive him to the +lawyer's house by night and wring a dread tale from lips that had been +sealed with the dismalest and most formidable of oaths, Huck's +confidence in the human race was well-nigh obliterated. + +Daily Muff Potter's gratitude made Tom glad he had spoken; but nightly +he wished he had sealed up his tongue. + +Half the time Tom was afraid Injun Joe would never be captured; the +other half he was afraid he would be. He felt sure he never could draw +a safe breath again until that man was dead and he had seen the corpse. + +Rewards had been offered, the country had been scoured, but no Injun +Joe was found. One of those omniscient and awe-inspiring marvels, a +detective, came up from St. Louis, moused around, shook his head, +looked wise, and made that sort of astounding success which members of +that craft usually achieve. That is to say, he "found a clew." But you +can't hang a "clew" for murder, and so after that detective had got +through and gone home, Tom felt just as insecure as he was before. + +The slow days drifted on, and each left behind it a slightly lightened +weight of apprehension. + + + +CHAPTER XXV + +THERE comes a time in every rightly-constructed boy's life when he has +a raging desire to go somewhere and dig for hidden treasure. This +desire suddenly came upon Tom one day. He sallied out to find Joe +Harper, but failed of success. Next he sought Ben Rogers; he had gone +fishing. Presently he stumbled upon Huck Finn the Red-Handed. Huck +would answer. Tom took him to a private place and opened the matter to +him confidentially. Huck was willing. Huck was always willing to take a +hand in any enterprise that offered entertainment and required no +capital, for he had a troublesome superabundance of that sort of time +which is not money. "Where'll we dig?" said Huck. + +"Oh, most anywhere." + +"Why, is it hid all around?" + +"No, indeed it ain't. It's hid in mighty particular places, Huck +--sometimes on islands, sometimes in rotten chests under the end of a +limb of an old dead tree, just where the shadow falls at midnight; but +mostly under the floor in ha'nted houses." + +"Who hides it?" + +"Why, robbers, of course--who'd you reckon? Sunday-school +sup'rintendents?" + +"I don't know. If 'twas mine I wouldn't hide it; I'd spend it and have +a good time." + +"So would I. But robbers don't do that way. They always hide it and +leave it there." + +"Don't they come after it any more?" + +"No, they think they will, but they generally forget the marks, or +else they die. Anyway, it lays there a long time and gets rusty; and by +and by somebody finds an old yellow paper that tells how to find the +marks--a paper that's got to be ciphered over about a week because it's +mostly signs and hy'roglyphics." + +"Hyro--which?" + +"Hy'roglyphics--pictures and things, you know, that don't seem to mean +anything." + +"Have you got one of them papers, Tom?" + +"No." + +"Well then, how you going to find the marks?" + +"I don't want any marks. They always bury it under a ha'nted house or +on an island, or under a dead tree that's got one limb sticking out. +Well, we've tried Jackson's Island a little, and we can try it again +some time; and there's the old ha'nted house up the Still-House branch, +and there's lots of dead-limb trees--dead loads of 'em." + +"Is it under all of them?" + +"How you talk! No!" + +"Then how you going to know which one to go for?" + +"Go for all of 'em!" + +"Why, Tom, it'll take all summer." + +"Well, what of that? Suppose you find a brass pot with a hundred +dollars in it, all rusty and gray, or rotten chest full of di'monds. +How's that?" + +Huck's eyes glowed. + +"That's bully. Plenty bully enough for me. Just you gimme the hundred +dollars and I don't want no di'monds." + +"All right. But I bet you I ain't going to throw off on di'monds. Some +of 'em's worth twenty dollars apiece--there ain't any, hardly, but's +worth six bits or a dollar." + +"No! Is that so?" + +"Cert'nly--anybody'll tell you so. Hain't you ever seen one, Huck?" + +"Not as I remember." + +"Oh, kings have slathers of them." + +"Well, I don' know no kings, Tom." + +"I reckon you don't. But if you was to go to Europe you'd see a raft +of 'em hopping around." + +"Do they hop?" + +"Hop?--your granny! No!" + +"Well, what did you say they did, for?" + +"Shucks, I only meant you'd SEE 'em--not hopping, of course--what do +they want to hop for?--but I mean you'd just see 'em--scattered around, +you know, in a kind of a general way. Like that old humpbacked Richard." + +"Richard? What's his other name?" + +"He didn't have any other name. Kings don't have any but a given name." + +"No?" + +"But they don't." + +"Well, if they like it, Tom, all right; but I don't want to be a king +and have only just a given name, like a nigger. But say--where you +going to dig first?" + +"Well, I don't know. S'pose we tackle that old dead-limb tree on the +hill t'other side of Still-House branch?" + +"I'm agreed." + +So they got a crippled pick and a shovel, and set out on their +three-mile tramp. They arrived hot and panting, and threw themselves +down in the shade of a neighboring elm to rest and have a smoke. + +"I like this," said Tom. + +"So do I." + +"Say, Huck, if we find a treasure here, what you going to do with your +share?" + +"Well, I'll have pie and a glass of soda every day, and I'll go to +every circus that comes along. I bet I'll have a gay time." + +"Well, ain't you going to save any of it?" + +"Save it? What for?" + +"Why, so as to have something to live on, by and by." + +"Oh, that ain't any use. Pap would come back to thish-yer town some +day and get his claws on it if I didn't hurry up, and I tell you he'd +clean it out pretty quick. What you going to do with yourn, Tom?" + +"I'm going to buy a new drum, and a sure-'nough sword, and a red +necktie and a bull pup, and get married." + +"Married!" + +"That's it." + +"Tom, you--why, you ain't in your right mind." + +"Wait--you'll see." + +"Well, that's the foolishest thing you could do. Look at pap and my +mother. Fight! Why, they used to fight all the time. I remember, mighty +well." + +"That ain't anything. The girl I'm going to marry won't fight." + +"Tom, I reckon they're all alike. They'll all comb a body. Now you +better think 'bout this awhile. I tell you you better. What's the name +of the gal?" + +"It ain't a gal at all--it's a girl." + +"It's all the same, I reckon; some says gal, some says girl--both's +right, like enough. Anyway, what's her name, Tom?" + +"I'll tell you some time--not now." + +"All right--that'll do. Only if you get married I'll be more lonesomer +than ever." + +"No you won't. You'll come and live with me. Now stir out of this and +we'll go to digging." + +They worked and sweated for half an hour. No result. They toiled +another half-hour. Still no result. Huck said: + +"Do they always bury it as deep as this?" + +"Sometimes--not always. Not generally. I reckon we haven't got the +right place." + +So they chose a new spot and began again. The labor dragged a little, +but still they made progress. They pegged away in silence for some +time. Finally Huck leaned on his shovel, swabbed the beaded drops from +his brow with his sleeve, and said: + +"Where you going to dig next, after we get this one?" + +"I reckon maybe we'll tackle the old tree that's over yonder on +Cardiff Hill back of the widow's." + +"I reckon that'll be a good one. But won't the widow take it away from +us, Tom? It's on her land." + +"SHE take it away! Maybe she'd like to try it once. Whoever finds one +of these hid treasures, it belongs to him. It don't make any difference +whose land it's on." + +That was satisfactory. The work went on. By and by Huck said: + +"Blame it, we must be in the wrong place again. What do you think?" + +"It is mighty curious, Huck. I don't understand it. Sometimes witches +interfere. I reckon maybe that's what's the trouble now." + +"Shucks! Witches ain't got no power in the daytime." + +"Well, that's so. I didn't think of that. Oh, I know what the matter +is! What a blamed lot of fools we are! You got to find out where the +shadow of the limb falls at midnight, and that's where you dig!" + +"Then consound it, we've fooled away all this work for nothing. Now +hang it all, we got to come back in the night. It's an awful long way. +Can you get out?" + +"I bet I will. We've got to do it to-night, too, because if somebody +sees these holes they'll know in a minute what's here and they'll go +for it." + +"Well, I'll come around and maow to-night." + +"All right. Let's hide the tools in the bushes." + +The boys were there that night, about the appointed time. They sat in +the shadow waiting. It was a lonely place, and an hour made solemn by +old traditions. Spirits whispered in the rustling leaves, ghosts lurked +in the murky nooks, the deep baying of a hound floated up out of the +distance, an owl answered with his sepulchral note. The boys were +subdued by these solemnities, and talked little. By and by they judged +that twelve had come; they marked where the shadow fell, and began to +dig. Their hopes commenced to rise. Their interest grew stronger, and +their industry kept pace with it. The hole deepened and still deepened, +but every time their hearts jumped to hear the pick strike upon +something, they only suffered a new disappointment. It was only a stone +or a chunk. At last Tom said: + +"It ain't any use, Huck, we're wrong again." + +"Well, but we CAN'T be wrong. We spotted the shadder to a dot." + +"I know it, but then there's another thing." + +"What's that?". + +"Why, we only guessed at the time. Like enough it was too late or too +early." + +Huck dropped his shovel. + +"That's it," said he. "That's the very trouble. We got to give this +one up. We can't ever tell the right time, and besides this kind of +thing's too awful, here this time of night with witches and ghosts +a-fluttering around so. I feel as if something's behind me all the time; +and I'm afeard to turn around, becuz maybe there's others in front +a-waiting for a chance. I been creeping all over, ever since I got here." + +"Well, I've been pretty much so, too, Huck. They most always put in a +dead man when they bury a treasure under a tree, to look out for it." + +"Lordy!" + +"Yes, they do. I've always heard that." + +"Tom, I don't like to fool around much where there's dead people. A +body's bound to get into trouble with 'em, sure." + +"I don't like to stir 'em up, either. S'pose this one here was to +stick his skull out and say something!" + +"Don't Tom! It's awful." + +"Well, it just is. Huck, I don't feel comfortable a bit." + +"Say, Tom, let's give this place up, and try somewheres else." + +"All right, I reckon we better." + +"What'll it be?" + +Tom considered awhile; and then said: + +"The ha'nted house. That's it!" + +"Blame it, I don't like ha'nted houses, Tom. Why, they're a dern sight +worse'n dead people. Dead people might talk, maybe, but they don't come +sliding around in a shroud, when you ain't noticing, and peep over your +shoulder all of a sudden and grit their teeth, the way a ghost does. I +couldn't stand such a thing as that, Tom--nobody could." + +"Yes, but, Huck, ghosts don't travel around only at night. They won't +hender us from digging there in the daytime." + +"Well, that's so. But you know mighty well people don't go about that +ha'nted house in the day nor the night." + +"Well, that's mostly because they don't like to go where a man's been +murdered, anyway--but nothing's ever been seen around that house except +in the night--just some blue lights slipping by the windows--no regular +ghosts." + +"Well, where you see one of them blue lights flickering around, Tom, +you can bet there's a ghost mighty close behind it. It stands to +reason. Becuz you know that they don't anybody but ghosts use 'em." + +"Yes, that's so. But anyway they don't come around in the daytime, so +what's the use of our being afeard?" + +"Well, all right. We'll tackle the ha'nted house if you say so--but I +reckon it's taking chances." + +They had started down the hill by this time. There in the middle of +the moonlit valley below them stood the "ha'nted" house, utterly +isolated, its fences gone long ago, rank weeds smothering the very +doorsteps, the chimney crumbled to ruin, the window-sashes vacant, a +corner of the roof caved in. The boys gazed awhile, half expecting to +see a blue light flit past a window; then talking in a low tone, as +befitted the time and the circumstances, they struck far off to the +right, to give the haunted house a wide berth, and took their way +homeward through the woods that adorned the rearward side of Cardiff +Hill. + + + +CHAPTER XXVI + +ABOUT noon the next day the boys arrived at the dead tree; they had +come for their tools. Tom was impatient to go to the haunted house; +Huck was measurably so, also--but suddenly said: + +"Lookyhere, Tom, do you know what day it is?" + +Tom mentally ran over the days of the week, and then quickly lifted +his eyes with a startled look in them-- + +"My! I never once thought of it, Huck!" + +"Well, I didn't neither, but all at once it popped onto me that it was +Friday." + +"Blame it, a body can't be too careful, Huck. We might 'a' got into an +awful scrape, tackling such a thing on a Friday." + +"MIGHT! Better say we WOULD! There's some lucky days, maybe, but +Friday ain't." + +"Any fool knows that. I don't reckon YOU was the first that found it +out, Huck." + +"Well, I never said I was, did I? And Friday ain't all, neither. I had +a rotten bad dream last night--dreampt about rats." + +"No! Sure sign of trouble. Did they fight?" + +"No." + +"Well, that's good, Huck. When they don't fight it's only a sign that +there's trouble around, you know. All we got to do is to look mighty +sharp and keep out of it. We'll drop this thing for to-day, and play. +Do you know Robin Hood, Huck?" + +"No. Who's Robin Hood?" + +"Why, he was one of the greatest men that was ever in England--and the +best. He was a robber." + +"Cracky, I wisht I was. Who did he rob?" + +"Only sheriffs and bishops and rich people and kings, and such like. +But he never bothered the poor. He loved 'em. He always divided up with +'em perfectly square." + +"Well, he must 'a' been a brick." + +"I bet you he was, Huck. Oh, he was the noblest man that ever was. +They ain't any such men now, I can tell you. He could lick any man in +England, with one hand tied behind him; and he could take his yew bow +and plug a ten-cent piece every time, a mile and a half." + +"What's a YEW bow?" + +"I don't know. It's some kind of a bow, of course. And if he hit that +dime only on the edge he would set down and cry--and curse. But we'll +play Robin Hood--it's nobby fun. I'll learn you." + +"I'm agreed." + +So they played Robin Hood all the afternoon, now and then casting a +yearning eye down upon the haunted house and passing a remark about the +morrow's prospects and possibilities there. As the sun began to sink +into the west they took their way homeward athwart the long shadows of +the trees and soon were buried from sight in the forests of Cardiff +Hill. + +On Saturday, shortly after noon, the boys were at the dead tree again. +They had a smoke and a chat in the shade, and then dug a little in +their last hole, not with great hope, but merely because Tom said there +were so many cases where people had given up a treasure after getting +down within six inches of it, and then somebody else had come along and +turned it up with a single thrust of a shovel. The thing failed this +time, however, so the boys shouldered their tools and went away feeling +that they had not trifled with fortune, but had fulfilled all the +requirements that belong to the business of treasure-hunting. + +When they reached the haunted house there was something so weird and +grisly about the dead silence that reigned there under the baking sun, +and something so depressing about the loneliness and desolation of the +place, that they were afraid, for a moment, to venture in. Then they +crept to the door and took a trembling peep. They saw a weed-grown, +floorless room, unplastered, an ancient fireplace, vacant windows, a +ruinous staircase; and here, there, and everywhere hung ragged and +abandoned cobwebs. They presently entered, softly, with quickened +pulses, talking in whispers, ears alert to catch the slightest sound, +and muscles tense and ready for instant retreat. + +In a little while familiarity modified their fears and they gave the +place a critical and interested examination, rather admiring their own +boldness, and wondering at it, too. Next they wanted to look up-stairs. +This was something like cutting off retreat, but they got to daring +each other, and of course there could be but one result--they threw +their tools into a corner and made the ascent. Up there were the same +signs of decay. In one corner they found a closet that promised +mystery, but the promise was a fraud--there was nothing in it. Their +courage was up now and well in hand. They were about to go down and +begin work when-- + +"Sh!" said Tom. + +"What is it?" whispered Huck, blanching with fright. + +"Sh!... There!... Hear it?" + +"Yes!... Oh, my! Let's run!" + +"Keep still! Don't you budge! They're coming right toward the door." + +The boys stretched themselves upon the floor with their eyes to +knot-holes in the planking, and lay waiting, in a misery of fear. + +"They've stopped.... No--coming.... Here they are. Don't whisper +another word, Huck. My goodness, I wish I was out of this!" + +Two men entered. Each boy said to himself: "There's the old deaf and +dumb Spaniard that's been about town once or twice lately--never saw +t'other man before." + +"T'other" was a ragged, unkempt creature, with nothing very pleasant +in his face. The Spaniard was wrapped in a serape; he had bushy white +whiskers; long white hair flowed from under his sombrero, and he wore +green goggles. When they came in, "t'other" was talking in a low voice; +they sat down on the ground, facing the door, with their backs to the +wall, and the speaker continued his remarks. His manner became less +guarded and his words more distinct as he proceeded: + +"No," said he, "I've thought it all over, and I don't like it. It's +dangerous." + +"Dangerous!" grunted the "deaf and dumb" Spaniard--to the vast +surprise of the boys. "Milksop!" + +This voice made the boys gasp and quake. It was Injun Joe's! There was +silence for some time. Then Joe said: + +"What's any more dangerous than that job up yonder--but nothing's come +of it." + +"That's different. Away up the river so, and not another house about. +'Twon't ever be known that we tried, anyway, long as we didn't succeed." + +"Well, what's more dangerous than coming here in the daytime!--anybody +would suspicion us that saw us." + +"I know that. But there warn't any other place as handy after that +fool of a job. I want to quit this shanty. I wanted to yesterday, only +it warn't any use trying to stir out of here, with those infernal boys +playing over there on the hill right in full view." + +"Those infernal boys" quaked again under the inspiration of this +remark, and thought how lucky it was that they had remembered it was +Friday and concluded to wait a day. They wished in their hearts they +had waited a year. + +The two men got out some food and made a luncheon. After a long and +thoughtful silence, Injun Joe said: + +"Look here, lad--you go back up the river where you belong. Wait there +till you hear from me. I'll take the chances on dropping into this town +just once more, for a look. We'll do that 'dangerous' job after I've +spied around a little and think things look well for it. Then for +Texas! We'll leg it together!" + +This was satisfactory. Both men presently fell to yawning, and Injun +Joe said: + +"I'm dead for sleep! It's your turn to watch." + +He curled down in the weeds and soon began to snore. His comrade +stirred him once or twice and he became quiet. Presently the watcher +began to nod; his head drooped lower and lower, both men began to snore +now. + +The boys drew a long, grateful breath. Tom whispered: + +"Now's our chance--come!" + +Huck said: + +"I can't--I'd die if they was to wake." + +Tom urged--Huck held back. At last Tom rose slowly and softly, and +started alone. But the first step he made wrung such a hideous creak +from the crazy floor that he sank down almost dead with fright. He +never made a second attempt. The boys lay there counting the dragging +moments till it seemed to them that time must be done and eternity +growing gray; and then they were grateful to note that at last the sun +was setting. + +Now one snore ceased. Injun Joe sat up, stared around--smiled grimly +upon his comrade, whose head was drooping upon his knees--stirred him +up with his foot and said: + +"Here! YOU'RE a watchman, ain't you! All right, though--nothing's +happened." + +"My! have I been asleep?" + +"Oh, partly, partly. Nearly time for us to be moving, pard. What'll we +do with what little swag we've got left?" + +"I don't know--leave it here as we've always done, I reckon. No use to +take it away till we start south. Six hundred and fifty in silver's +something to carry." + +"Well--all right--it won't matter to come here once more." + +"No--but I'd say come in the night as we used to do--it's better." + +"Yes: but look here; it may be a good while before I get the right +chance at that job; accidents might happen; 'tain't in such a very good +place; we'll just regularly bury it--and bury it deep." + +"Good idea," said the comrade, who walked across the room, knelt down, +raised one of the rearward hearth-stones and took out a bag that +jingled pleasantly. He subtracted from it twenty or thirty dollars for +himself and as much for Injun Joe, and passed the bag to the latter, +who was on his knees in the corner, now, digging with his bowie-knife. + +The boys forgot all their fears, all their miseries in an instant. +With gloating eyes they watched every movement. Luck!--the splendor of +it was beyond all imagination! Six hundred dollars was money enough to +make half a dozen boys rich! Here was treasure-hunting under the +happiest auspices--there would not be any bothersome uncertainty as to +where to dig. They nudged each other every moment--eloquent nudges and +easily understood, for they simply meant--"Oh, but ain't you glad NOW +we're here!" + +Joe's knife struck upon something. + +"Hello!" said he. + +"What is it?" said his comrade. + +"Half-rotten plank--no, it's a box, I believe. Here--bear a hand and +we'll see what it's here for. Never mind, I've broke a hole." + +He reached his hand in and drew it out-- + +"Man, it's money!" + +The two men examined the handful of coins. They were gold. The boys +above were as excited as themselves, and as delighted. + +Joe's comrade said: + +"We'll make quick work of this. There's an old rusty pick over amongst +the weeds in the corner the other side of the fireplace--I saw it a +minute ago." + +He ran and brought the boys' pick and shovel. Injun Joe took the pick, +looked it over critically, shook his head, muttered something to +himself, and then began to use it. The box was soon unearthed. It was +not very large; it was iron bound and had been very strong before the +slow years had injured it. The men contemplated the treasure awhile in +blissful silence. + +"Pard, there's thousands of dollars here," said Injun Joe. + +"'Twas always said that Murrel's gang used to be around here one +summer," the stranger observed. + +"I know it," said Injun Joe; "and this looks like it, I should say." + +"Now you won't need to do that job." + +The half-breed frowned. Said he: + +"You don't know me. Least you don't know all about that thing. 'Tain't +robbery altogether--it's REVENGE!" and a wicked light flamed in his +eyes. "I'll need your help in it. When it's finished--then Texas. Go +home to your Nance and your kids, and stand by till you hear from me." + +"Well--if you say so; what'll we do with this--bury it again?" + +"Yes. [Ravishing delight overhead.] NO! by the great Sachem, no! +[Profound distress overhead.] I'd nearly forgot. That pick had fresh +earth on it! [The boys were sick with terror in a moment.] What +business has a pick and a shovel here? What business with fresh earth +on them? Who brought them here--and where are they gone? Have you heard +anybody?--seen anybody? What! bury it again and leave them to come and +see the ground disturbed? Not exactly--not exactly. We'll take it to my +den." + +"Why, of course! Might have thought of that before. You mean Number +One?" + +"No--Number Two--under the cross. The other place is bad--too common." + +"All right. It's nearly dark enough to start." + +Injun Joe got up and went about from window to window cautiously +peeping out. Presently he said: + +"Who could have brought those tools here? Do you reckon they can be +up-stairs?" + +The boys' breath forsook them. Injun Joe put his hand on his knife, +halted a moment, undecided, and then turned toward the stairway. The +boys thought of the closet, but their strength was gone. The steps came +creaking up the stairs--the intolerable distress of the situation woke +the stricken resolution of the lads--they were about to spring for the +closet, when there was a crash of rotten timbers and Injun Joe landed +on the ground amid the debris of the ruined stairway. He gathered +himself up cursing, and his comrade said: + +"Now what's the use of all that? If it's anybody, and they're up +there, let them STAY there--who cares? If they want to jump down, now, +and get into trouble, who objects? It will be dark in fifteen minutes +--and then let them follow us if they want to. I'm willing. In my +opinion, whoever hove those things in here caught a sight of us and +took us for ghosts or devils or something. I'll bet they're running +yet." + +Joe grumbled awhile; then he agreed with his friend that what daylight +was left ought to be economized in getting things ready for leaving. +Shortly afterward they slipped out of the house in the deepening +twilight, and moved toward the river with their precious box. + +Tom and Huck rose up, weak but vastly relieved, and stared after them +through the chinks between the logs of the house. Follow? Not they. +They were content to reach ground again without broken necks, and take +the townward track over the hill. They did not talk much. They were too +much absorbed in hating themselves--hating the ill luck that made them +take the spade and the pick there. But for that, Injun Joe never would +have suspected. He would have hidden the silver with the gold to wait +there till his "revenge" was satisfied, and then he would have had the +misfortune to find that money turn up missing. Bitter, bitter luck that +the tools were ever brought there! + +They resolved to keep a lookout for that Spaniard when he should come +to town spying out for chances to do his revengeful job, and follow him +to "Number Two," wherever that might be. Then a ghastly thought +occurred to Tom. + +"Revenge? What if he means US, Huck!" + +"Oh, don't!" said Huck, nearly fainting. + +They talked it all over, and as they entered town they agreed to +believe that he might possibly mean somebody else--at least that he +might at least mean nobody but Tom, since only Tom had testified. + +Very, very small comfort it was to Tom to be alone in danger! Company +would be a palpable improvement, he thought. + + + +CHAPTER XXVII + +THE adventure of the day mightily tormented Tom's dreams that night. +Four times he had his hands on that rich treasure and four times it +wasted to nothingness in his fingers as sleep forsook him and +wakefulness brought back the hard reality of his misfortune. As he lay +in the early morning recalling the incidents of his great adventure, he +noticed that they seemed curiously subdued and far away--somewhat as if +they had happened in another world, or in a time long gone by. Then it +occurred to him that the great adventure itself must be a dream! There +was one very strong argument in favor of this idea--namely, that the +quantity of coin he had seen was too vast to be real. He had never seen +as much as fifty dollars in one mass before, and he was like all boys +of his age and station in life, in that he imagined that all references +to "hundreds" and "thousands" were mere fanciful forms of speech, and +that no such sums really existed in the world. He never had supposed +for a moment that so large a sum as a hundred dollars was to be found +in actual money in any one's possession. If his notions of hidden +treasure had been analyzed, they would have been found to consist of a +handful of real dimes and a bushel of vague, splendid, ungraspable +dollars. + +But the incidents of his adventure grew sensibly sharper and clearer +under the attrition of thinking them over, and so he presently found +himself leaning to the impression that the thing might not have been a +dream, after all. This uncertainty must be swept away. He would snatch +a hurried breakfast and go and find Huck. Huck was sitting on the +gunwale of a flatboat, listlessly dangling his feet in the water and +looking very melancholy. Tom concluded to let Huck lead up to the +subject. If he did not do it, then the adventure would be proved to +have been only a dream. + +"Hello, Huck!" + +"Hello, yourself." + +Silence, for a minute. + +"Tom, if we'd 'a' left the blame tools at the dead tree, we'd 'a' got +the money. Oh, ain't it awful!" + +"'Tain't a dream, then, 'tain't a dream! Somehow I most wish it was. +Dog'd if I don't, Huck." + +"What ain't a dream?" + +"Oh, that thing yesterday. I been half thinking it was." + +"Dream! If them stairs hadn't broke down you'd 'a' seen how much dream +it was! I've had dreams enough all night--with that patch-eyed Spanish +devil going for me all through 'em--rot him!" + +"No, not rot him. FIND him! Track the money!" + +"Tom, we'll never find him. A feller don't have only one chance for +such a pile--and that one's lost. I'd feel mighty shaky if I was to see +him, anyway." + +"Well, so'd I; but I'd like to see him, anyway--and track him out--to +his Number Two." + +"Number Two--yes, that's it. I been thinking 'bout that. But I can't +make nothing out of it. What do you reckon it is?" + +"I dono. It's too deep. Say, Huck--maybe it's the number of a house!" + +"Goody!... No, Tom, that ain't it. If it is, it ain't in this +one-horse town. They ain't no numbers here." + +"Well, that's so. Lemme think a minute. Here--it's the number of a +room--in a tavern, you know!" + +"Oh, that's the trick! They ain't only two taverns. We can find out +quick." + +"You stay here, Huck, till I come." + +Tom was off at once. He did not care to have Huck's company in public +places. He was gone half an hour. He found that in the best tavern, No. +2 had long been occupied by a young lawyer, and was still so occupied. +In the less ostentatious house, No. 2 was a mystery. The +tavern-keeper's young son said it was kept locked all the time, and he +never saw anybody go into it or come out of it except at night; he did +not know any particular reason for this state of things; had had some +little curiosity, but it was rather feeble; had made the most of the +mystery by entertaining himself with the idea that that room was +"ha'nted"; had noticed that there was a light in there the night before. + +"That's what I've found out, Huck. I reckon that's the very No. 2 +we're after." + +"I reckon it is, Tom. Now what you going to do?" + +"Lemme think." + +Tom thought a long time. Then he said: + +"I'll tell you. The back door of that No. 2 is the door that comes out +into that little close alley between the tavern and the old rattle trap +of a brick store. Now you get hold of all the door-keys you can find, +and I'll nip all of auntie's, and the first dark night we'll go there +and try 'em. And mind you, keep a lookout for Injun Joe, because he +said he was going to drop into town and spy around once more for a +chance to get his revenge. If you see him, you just follow him; and if +he don't go to that No. 2, that ain't the place." + +"Lordy, I don't want to foller him by myself!" + +"Why, it'll be night, sure. He mightn't ever see you--and if he did, +maybe he'd never think anything." + +"Well, if it's pretty dark I reckon I'll track him. I dono--I dono. +I'll try." + +"You bet I'll follow him, if it's dark, Huck. Why, he might 'a' found +out he couldn't get his revenge, and be going right after that money." + +"It's so, Tom, it's so. I'll foller him; I will, by jingoes!" + +"Now you're TALKING! Don't you ever weaken, Huck, and I won't." + + + +CHAPTER XXVIII + +THAT night Tom and Huck were ready for their adventure. They hung +about the neighborhood of the tavern until after nine, one watching the +alley at a distance and the other the tavern door. Nobody entered the +alley or left it; nobody resembling the Spaniard entered or left the +tavern door. The night promised to be a fair one; so Tom went home with +the understanding that if a considerable degree of darkness came on, +Huck was to come and "maow," whereupon he would slip out and try the +keys. But the night remained clear, and Huck closed his watch and +retired to bed in an empty sugar hogshead about twelve. + +Tuesday the boys had the same ill luck. Also Wednesday. But Thursday +night promised better. Tom slipped out in good season with his aunt's +old tin lantern, and a large towel to blindfold it with. He hid the +lantern in Huck's sugar hogshead and the watch began. An hour before +midnight the tavern closed up and its lights (the only ones +thereabouts) were put out. No Spaniard had been seen. Nobody had +entered or left the alley. Everything was auspicious. The blackness of +darkness reigned, the perfect stillness was interrupted only by +occasional mutterings of distant thunder. + +Tom got his lantern, lit it in the hogshead, wrapped it closely in the +towel, and the two adventurers crept in the gloom toward the tavern. +Huck stood sentry and Tom felt his way into the alley. Then there was a +season of waiting anxiety that weighed upon Huck's spirits like a +mountain. He began to wish he could see a flash from the lantern--it +would frighten him, but it would at least tell him that Tom was alive +yet. It seemed hours since Tom had disappeared. Surely he must have +fainted; maybe he was dead; maybe his heart had burst under terror and +excitement. In his uneasiness Huck found himself drawing closer and +closer to the alley; fearing all sorts of dreadful things, and +momentarily expecting some catastrophe to happen that would take away +his breath. There was not much to take away, for he seemed only able to +inhale it by thimblefuls, and his heart would soon wear itself out, the +way it was beating. Suddenly there was a flash of light and Tom came +tearing by him: "Run!" said he; "run, for your life!" + +He needn't have repeated it; once was enough; Huck was making thirty +or forty miles an hour before the repetition was uttered. The boys +never stopped till they reached the shed of a deserted slaughter-house +at the lower end of the village. Just as they got within its shelter +the storm burst and the rain poured down. As soon as Tom got his breath +he said: + +"Huck, it was awful! I tried two of the keys, just as soft as I could; +but they seemed to make such a power of racket that I couldn't hardly +get my breath I was so scared. They wouldn't turn in the lock, either. +Well, without noticing what I was doing, I took hold of the knob, and +open comes the door! It warn't locked! I hopped in, and shook off the +towel, and, GREAT CAESAR'S GHOST!" + +"What!--what'd you see, Tom?" + +"Huck, I most stepped onto Injun Joe's hand!" + +"No!" + +"Yes! He was lying there, sound asleep on the floor, with his old +patch on his eye and his arms spread out." + +"Lordy, what did you do? Did he wake up?" + +"No, never budged. Drunk, I reckon. I just grabbed that towel and +started!" + +"I'd never 'a' thought of the towel, I bet!" + +"Well, I would. My aunt would make me mighty sick if I lost it." + +"Say, Tom, did you see that box?" + +"Huck, I didn't wait to look around. I didn't see the box, I didn't +see the cross. I didn't see anything but a bottle and a tin cup on the +floor by Injun Joe; yes, I saw two barrels and lots more bottles in the +room. Don't you see, now, what's the matter with that ha'nted room?" + +"How?" + +"Why, it's ha'nted with whiskey! Maybe ALL the Temperance Taverns have +got a ha'nted room, hey, Huck?" + +"Well, I reckon maybe that's so. Who'd 'a' thought such a thing? But +say, Tom, now's a mighty good time to get that box, if Injun Joe's +drunk." + +"It is, that! You try it!" + +Huck shuddered. + +"Well, no--I reckon not." + +"And I reckon not, Huck. Only one bottle alongside of Injun Joe ain't +enough. If there'd been three, he'd be drunk enough and I'd do it." + +There was a long pause for reflection, and then Tom said: + +"Lookyhere, Huck, less not try that thing any more till we know Injun +Joe's not in there. It's too scary. Now, if we watch every night, we'll +be dead sure to see him go out, some time or other, and then we'll +snatch that box quicker'n lightning." + +"Well, I'm agreed. I'll watch the whole night long, and I'll do it +every night, too, if you'll do the other part of the job." + +"All right, I will. All you got to do is to trot up Hooper Street a +block and maow--and if I'm asleep, you throw some gravel at the window +and that'll fetch me." + +"Agreed, and good as wheat!" + +"Now, Huck, the storm's over, and I'll go home. It'll begin to be +daylight in a couple of hours. You go back and watch that long, will +you?" + +"I said I would, Tom, and I will. I'll ha'nt that tavern every night +for a year! I'll sleep all day and I'll stand watch all night." + +"That's all right. Now, where you going to sleep?" + +"In Ben Rogers' hayloft. He lets me, and so does his pap's nigger man, +Uncle Jake. I tote water for Uncle Jake whenever he wants me to, and +any time I ask him he gives me a little something to eat if he can +spare it. That's a mighty good nigger, Tom. He likes me, becuz I don't +ever act as if I was above him. Sometime I've set right down and eat +WITH him. But you needn't tell that. A body's got to do things when +he's awful hungry he wouldn't want to do as a steady thing." + +"Well, if I don't want you in the daytime, I'll let you sleep. I won't +come bothering around. Any time you see something's up, in the night, +just skip right around and maow." + + + +CHAPTER XXIX + +THE first thing Tom heard on Friday morning was a glad piece of news +--Judge Thatcher's family had come back to town the night before. Both +Injun Joe and the treasure sunk into secondary importance for a moment, +and Becky took the chief place in the boy's interest. He saw her and +they had an exhausting good time playing "hi-spy" and "gully-keeper" +with a crowd of their school-mates. The day was completed and crowned +in a peculiarly satisfactory way: Becky teased her mother to appoint +the next day for the long-promised and long-delayed picnic, and she +consented. The child's delight was boundless; and Tom's not more +moderate. The invitations were sent out before sunset, and straightway +the young folks of the village were thrown into a fever of preparation +and pleasurable anticipation. Tom's excitement enabled him to keep +awake until a pretty late hour, and he had good hopes of hearing Huck's +"maow," and of having his treasure to astonish Becky and the picnickers +with, next day; but he was disappointed. No signal came that night. + +Morning came, eventually, and by ten or eleven o'clock a giddy and +rollicking company were gathered at Judge Thatcher's, and everything +was ready for a start. It was not the custom for elderly people to mar +the picnics with their presence. The children were considered safe +enough under the wings of a few young ladies of eighteen and a few +young gentlemen of twenty-three or thereabouts. The old steam ferryboat +was chartered for the occasion; presently the gay throng filed up the +main street laden with provision-baskets. Sid was sick and had to miss +the fun; Mary remained at home to entertain him. The last thing Mrs. +Thatcher said to Becky, was: + +"You'll not get back till late. Perhaps you'd better stay all night +with some of the girls that live near the ferry-landing, child." + +"Then I'll stay with Susy Harper, mamma." + +"Very well. And mind and behave yourself and don't be any trouble." + +Presently, as they tripped along, Tom said to Becky: + +"Say--I'll tell you what we'll do. 'Stead of going to Joe Harper's +we'll climb right up the hill and stop at the Widow Douglas'. She'll +have ice-cream! She has it most every day--dead loads of it. And she'll +be awful glad to have us." + +"Oh, that will be fun!" + +Then Becky reflected a moment and said: + +"But what will mamma say?" + +"How'll she ever know?" + +The girl turned the idea over in her mind, and said reluctantly: + +"I reckon it's wrong--but--" + +"But shucks! Your mother won't know, and so what's the harm? All she +wants is that you'll be safe; and I bet you she'd 'a' said go there if +she'd 'a' thought of it. I know she would!" + +The Widow Douglas' splendid hospitality was a tempting bait. It and +Tom's persuasions presently carried the day. So it was decided to say +nothing anybody about the night's programme. Presently it occurred to +Tom that maybe Huck might come this very night and give the signal. The +thought took a deal of the spirit out of his anticipations. Still he +could not bear to give up the fun at Widow Douglas'. And why should he +give it up, he reasoned--the signal did not come the night before, so +why should it be any more likely to come to-night? The sure fun of the +evening outweighed the uncertain treasure; and, boy-like, he determined +to yield to the stronger inclination and not allow himself to think of +the box of money another time that day. + +Three miles below town the ferryboat stopped at the mouth of a woody +hollow and tied up. The crowd swarmed ashore and soon the forest +distances and craggy heights echoed far and near with shoutings and +laughter. All the different ways of getting hot and tired were gone +through with, and by-and-by the rovers straggled back to camp fortified +with responsible appetites, and then the destruction of the good things +began. After the feast there was a refreshing season of rest and chat +in the shade of spreading oaks. By-and-by somebody shouted: + +"Who's ready for the cave?" + +Everybody was. Bundles of candles were procured, and straightway there +was a general scamper up the hill. The mouth of the cave was up the +hillside--an opening shaped like a letter A. Its massive oaken door +stood unbarred. Within was a small chamber, chilly as an ice-house, and +walled by Nature with solid limestone that was dewy with a cold sweat. +It was romantic and mysterious to stand here in the deep gloom and look +out upon the green valley shining in the sun. But the impressiveness of +the situation quickly wore off, and the romping began again. The moment +a candle was lighted there was a general rush upon the owner of it; a +struggle and a gallant defence followed, but the candle was soon +knocked down or blown out, and then there was a glad clamor of laughter +and a new chase. But all things have an end. By-and-by the procession +went filing down the steep descent of the main avenue, the flickering +rank of lights dimly revealing the lofty walls of rock almost to their +point of junction sixty feet overhead. This main avenue was not more +than eight or ten feet wide. Every few steps other lofty and still +narrower crevices branched from it on either hand--for McDougal's cave +was but a vast labyrinth of crooked aisles that ran into each other and +out again and led nowhere. It was said that one might wander days and +nights together through its intricate tangle of rifts and chasms, and +never find the end of the cave; and that he might go down, and down, +and still down, into the earth, and it was just the same--labyrinth +under labyrinth, and no end to any of them. No man "knew" the cave. +That was an impossible thing. Most of the young men knew a portion of +it, and it was not customary to venture much beyond this known portion. +Tom Sawyer knew as much of the cave as any one. + +The procession moved along the main avenue some three-quarters of a +mile, and then groups and couples began to slip aside into branch +avenues, fly along the dismal corridors, and take each other by +surprise at points where the corridors joined again. Parties were able +to elude each other for the space of half an hour without going beyond +the "known" ground. + +By-and-by, one group after another came straggling back to the mouth +of the cave, panting, hilarious, smeared from head to foot with tallow +drippings, daubed with clay, and entirely delighted with the success of +the day. Then they were astonished to find that they had been taking no +note of time and that night was about at hand. The clanging bell had +been calling for half an hour. However, this sort of close to the day's +adventures was romantic and therefore satisfactory. When the ferryboat +with her wild freight pushed into the stream, nobody cared sixpence for +the wasted time but the captain of the craft. + +Huck was already upon his watch when the ferryboat's lights went +glinting past the wharf. He heard no noise on board, for the young +people were as subdued and still as people usually are who are nearly +tired to death. He wondered what boat it was, and why she did not stop +at the wharf--and then he dropped her out of his mind and put his +attention upon his business. The night was growing cloudy and dark. Ten +o'clock came, and the noise of vehicles ceased, scattered lights began +to wink out, all straggling foot-passengers disappeared, the village +betook itself to its slumbers and left the small watcher alone with the +silence and the ghosts. Eleven o'clock came, and the tavern lights were +put out; darkness everywhere, now. Huck waited what seemed a weary long +time, but nothing happened. His faith was weakening. Was there any use? +Was there really any use? Why not give it up and turn in? + +A noise fell upon his ear. He was all attention in an instant. The +alley door closed softly. He sprang to the corner of the brick store. +The next moment two men brushed by him, and one seemed to have +something under his arm. It must be that box! So they were going to +remove the treasure. Why call Tom now? It would be absurd--the men +would get away with the box and never be found again. No, he would +stick to their wake and follow them; he would trust to the darkness for +security from discovery. So communing with himself, Huck stepped out +and glided along behind the men, cat-like, with bare feet, allowing +them to keep just far enough ahead not to be invisible. + +They moved up the river street three blocks, then turned to the left +up a cross-street. They went straight ahead, then, until they came to +the path that led up Cardiff Hill; this they took. They passed by the +old Welshman's house, half-way up the hill, without hesitating, and +still climbed upward. Good, thought Huck, they will bury it in the old +quarry. But they never stopped at the quarry. They passed on, up the +summit. They plunged into the narrow path between the tall sumach +bushes, and were at once hidden in the gloom. Huck closed up and +shortened his distance, now, for they would never be able to see him. +He trotted along awhile; then slackened his pace, fearing he was +gaining too fast; moved on a piece, then stopped altogether; listened; +no sound; none, save that he seemed to hear the beating of his own +heart. The hooting of an owl came over the hill--ominous sound! But no +footsteps. Heavens, was everything lost! He was about to spring with +winged feet, when a man cleared his throat not four feet from him! +Huck's heart shot into his throat, but he swallowed it again; and then +he stood there shaking as if a dozen agues had taken charge of him at +once, and so weak that he thought he must surely fall to the ground. He +knew where he was. He knew he was within five steps of the stile +leading into Widow Douglas' grounds. Very well, he thought, let them +bury it there; it won't be hard to find. + +Now there was a voice--a very low voice--Injun Joe's: + +"Damn her, maybe she's got company--there's lights, late as it is." + +"I can't see any." + +This was that stranger's voice--the stranger of the haunted house. A +deadly chill went to Huck's heart--this, then, was the "revenge" job! +His thought was, to fly. Then he remembered that the Widow Douglas had +been kind to him more than once, and maybe these men were going to +murder her. He wished he dared venture to warn her; but he knew he +didn't dare--they might come and catch him. He thought all this and +more in the moment that elapsed between the stranger's remark and Injun +Joe's next--which was-- + +"Because the bush is in your way. Now--this way--now you see, don't +you?" + +"Yes. Well, there IS company there, I reckon. Better give it up." + +"Give it up, and I just leaving this country forever! Give it up and +maybe never have another chance. I tell you again, as I've told you +before, I don't care for her swag--you may have it. But her husband was +rough on me--many times he was rough on me--and mainly he was the +justice of the peace that jugged me for a vagrant. And that ain't all. +It ain't a millionth part of it! He had me HORSEWHIPPED!--horsewhipped +in front of the jail, like a nigger!--with all the town looking on! +HORSEWHIPPED!--do you understand? He took advantage of me and died. But +I'll take it out of HER." + +"Oh, don't kill her! Don't do that!" + +"Kill? Who said anything about killing? I would kill HIM if he was +here; but not her. When you want to get revenge on a woman you don't +kill her--bosh! you go for her looks. You slit her nostrils--you notch +her ears like a sow!" + +"By God, that's--" + +"Keep your opinion to yourself! It will be safest for you. I'll tie +her to the bed. If she bleeds to death, is that my fault? I'll not cry, +if she does. My friend, you'll help me in this thing--for MY sake +--that's why you're here--I mightn't be able alone. If you flinch, I'll +kill you. Do you understand that? And if I have to kill you, I'll kill +her--and then I reckon nobody'll ever know much about who done this +business." + +"Well, if it's got to be done, let's get at it. The quicker the +better--I'm all in a shiver." + +"Do it NOW? And company there? Look here--I'll get suspicious of you, +first thing you know. No--we'll wait till the lights are out--there's +no hurry." + +Huck felt that a silence was going to ensue--a thing still more awful +than any amount of murderous talk; so he held his breath and stepped +gingerly back; planted his foot carefully and firmly, after balancing, +one-legged, in a precarious way and almost toppling over, first on one +side and then on the other. He took another step back, with the same +elaboration and the same risks; then another and another, and--a twig +snapped under his foot! His breath stopped and he listened. There was +no sound--the stillness was perfect. His gratitude was measureless. Now +he turned in his tracks, between the walls of sumach bushes--turned +himself as carefully as if he were a ship--and then stepped quickly but +cautiously along. When he emerged at the quarry he felt secure, and so +he picked up his nimble heels and flew. Down, down he sped, till he +reached the Welshman's. He banged at the door, and presently the heads +of the old man and his two stalwart sons were thrust from windows. + +"What's the row there? Who's banging? What do you want?" + +"Let me in--quick! I'll tell everything." + +"Why, who are you?" + +"Huckleberry Finn--quick, let me in!" + +"Huckleberry Finn, indeed! It ain't a name to open many doors, I +judge! But let him in, lads, and let's see what's the trouble." + +"Please don't ever tell I told you," were Huck's first words when he +got in. "Please don't--I'd be killed, sure--but the widow's been good +friends to me sometimes, and I want to tell--I WILL tell if you'll +promise you won't ever say it was me." + +"By George, he HAS got something to tell, or he wouldn't act so!" +exclaimed the old man; "out with it and nobody here'll ever tell, lad." + +Three minutes later the old man and his sons, well armed, were up the +hill, and just entering the sumach path on tiptoe, their weapons in +their hands. Huck accompanied them no further. He hid behind a great +bowlder and fell to listening. There was a lagging, anxious silence, +and then all of a sudden there was an explosion of firearms and a cry. + +Huck waited for no particulars. He sprang away and sped down the hill +as fast as his legs could carry him. + + + +CHAPTER XXX + +AS the earliest suspicion of dawn appeared on Sunday morning, Huck +came groping up the hill and rapped gently at the old Welshman's door. +The inmates were asleep, but it was a sleep that was set on a +hair-trigger, on account of the exciting episode of the night. A call +came from a window: + +"Who's there!" + +Huck's scared voice answered in a low tone: + +"Please let me in! It's only Huck Finn!" + +"It's a name that can open this door night or day, lad!--and welcome!" + +These were strange words to the vagabond boy's ears, and the +pleasantest he had ever heard. He could not recollect that the closing +word had ever been applied in his case before. The door was quickly +unlocked, and he entered. Huck was given a seat and the old man and his +brace of tall sons speedily dressed themselves. + +"Now, my boy, I hope you're good and hungry, because breakfast will be +ready as soon as the sun's up, and we'll have a piping hot one, too +--make yourself easy about that! I and the boys hoped you'd turn up and +stop here last night." + +"I was awful scared," said Huck, "and I run. I took out when the +pistols went off, and I didn't stop for three mile. I've come now becuz +I wanted to know about it, you know; and I come before daylight becuz I +didn't want to run across them devils, even if they was dead." + +"Well, poor chap, you do look as if you'd had a hard night of it--but +there's a bed here for you when you've had your breakfast. No, they +ain't dead, lad--we are sorry enough for that. You see we knew right +where to put our hands on them, by your description; so we crept along +on tiptoe till we got within fifteen feet of them--dark as a cellar +that sumach path was--and just then I found I was going to sneeze. It +was the meanest kind of luck! I tried to keep it back, but no use +--'twas bound to come, and it did come! I was in the lead with my pistol +raised, and when the sneeze started those scoundrels a-rustling to get +out of the path, I sung out, 'Fire boys!' and blazed away at the place +where the rustling was. So did the boys. But they were off in a jiffy, +those villains, and we after them, down through the woods. I judge we +never touched them. They fired a shot apiece as they started, but their +bullets whizzed by and didn't do us any harm. As soon as we lost the +sound of their feet we quit chasing, and went down and stirred up the +constables. They got a posse together, and went off to guard the river +bank, and as soon as it is light the sheriff and a gang are going to +beat up the woods. My boys will be with them presently. I wish we had +some sort of description of those rascals--'twould help a good deal. +But you couldn't see what they were like, in the dark, lad, I suppose?" + +"Oh yes; I saw them down-town and follered them." + +"Splendid! Describe them--describe them, my boy!" + +"One's the old deaf and dumb Spaniard that's ben around here once or +twice, and t'other's a mean-looking, ragged--" + +"That's enough, lad, we know the men! Happened on them in the woods +back of the widow's one day, and they slunk away. Off with you, boys, +and tell the sheriff--get your breakfast to-morrow morning!" + +The Welshman's sons departed at once. As they were leaving the room +Huck sprang up and exclaimed: + +"Oh, please don't tell ANYbody it was me that blowed on them! Oh, +please!" + +"All right if you say it, Huck, but you ought to have the credit of +what you did." + +"Oh no, no! Please don't tell!" + +When the young men were gone, the old Welshman said: + +"They won't tell--and I won't. But why don't you want it known?" + +Huck would not explain, further than to say that he already knew too +much about one of those men and would not have the man know that he +knew anything against him for the whole world--he would be killed for +knowing it, sure. + +The old man promised secrecy once more, and said: + +"How did you come to follow these fellows, lad? Were they looking +suspicious?" + +Huck was silent while he framed a duly cautious reply. Then he said: + +"Well, you see, I'm a kind of a hard lot,--least everybody says so, +and I don't see nothing agin it--and sometimes I can't sleep much, on +account of thinking about it and sort of trying to strike out a new way +of doing. That was the way of it last night. I couldn't sleep, and so I +come along up-street 'bout midnight, a-turning it all over, and when I +got to that old shackly brick store by the Temperance Tavern, I backed +up agin the wall to have another think. Well, just then along comes +these two chaps slipping along close by me, with something under their +arm, and I reckoned they'd stole it. One was a-smoking, and t'other one +wanted a light; so they stopped right before me and the cigars lit up +their faces and I see that the big one was the deaf and dumb Spaniard, +by his white whiskers and the patch on his eye, and t'other one was a +rusty, ragged-looking devil." + +"Could you see the rags by the light of the cigars?" + +This staggered Huck for a moment. Then he said: + +"Well, I don't know--but somehow it seems as if I did." + +"Then they went on, and you--" + +"Follered 'em--yes. That was it. I wanted to see what was up--they +sneaked along so. I dogged 'em to the widder's stile, and stood in the +dark and heard the ragged one beg for the widder, and the Spaniard +swear he'd spile her looks just as I told you and your two--" + +"What! The DEAF AND DUMB man said all that!" + +Huck had made another terrible mistake! He was trying his best to keep +the old man from getting the faintest hint of who the Spaniard might +be, and yet his tongue seemed determined to get him into trouble in +spite of all he could do. He made several efforts to creep out of his +scrape, but the old man's eye was upon him and he made blunder after +blunder. Presently the Welshman said: + +"My boy, don't be afraid of me. I wouldn't hurt a hair of your head +for all the world. No--I'd protect you--I'd protect you. This Spaniard +is not deaf and dumb; you've let that slip without intending it; you +can't cover that up now. You know something about that Spaniard that +you want to keep dark. Now trust me--tell me what it is, and trust me +--I won't betray you." + +Huck looked into the old man's honest eyes a moment, then bent over +and whispered in his ear: + +"'Tain't a Spaniard--it's Injun Joe!" + +The Welshman almost jumped out of his chair. In a moment he said: + +"It's all plain enough, now. When you talked about notching ears and +slitting noses I judged that that was your own embellishment, because +white men don't take that sort of revenge. But an Injun! That's a +different matter altogether." + +During breakfast the talk went on, and in the course of it the old man +said that the last thing which he and his sons had done, before going +to bed, was to get a lantern and examine the stile and its vicinity for +marks of blood. They found none, but captured a bulky bundle of-- + +"Of WHAT?" + +If the words had been lightning they could not have leaped with a more +stunning suddenness from Huck's blanched lips. His eyes were staring +wide, now, and his breath suspended--waiting for the answer. The +Welshman started--stared in return--three seconds--five seconds--ten +--then replied: + +"Of burglar's tools. Why, what's the MATTER with you?" + +Huck sank back, panting gently, but deeply, unutterably grateful. The +Welshman eyed him gravely, curiously--and presently said: + +"Yes, burglar's tools. That appears to relieve you a good deal. But +what did give you that turn? What were YOU expecting we'd found?" + +Huck was in a close place--the inquiring eye was upon him--he would +have given anything for material for a plausible answer--nothing +suggested itself--the inquiring eye was boring deeper and deeper--a +senseless reply offered--there was no time to weigh it, so at a venture +he uttered it--feebly: + +"Sunday-school books, maybe." + +Poor Huck was too distressed to smile, but the old man laughed loud +and joyously, shook up the details of his anatomy from head to foot, +and ended by saying that such a laugh was money in a-man's pocket, +because it cut down the doctor's bill like everything. Then he added: + +"Poor old chap, you're white and jaded--you ain't well a bit--no +wonder you're a little flighty and off your balance. But you'll come +out of it. Rest and sleep will fetch you out all right, I hope." + +Huck was irritated to think he had been such a goose and betrayed such +a suspicious excitement, for he had dropped the idea that the parcel +brought from the tavern was the treasure, as soon as he had heard the +talk at the widow's stile. He had only thought it was not the treasure, +however--he had not known that it wasn't--and so the suggestion of a +captured bundle was too much for his self-possession. But on the whole +he felt glad the little episode had happened, for now he knew beyond +all question that that bundle was not THE bundle, and so his mind was +at rest and exceedingly comfortable. In fact, everything seemed to be +drifting just in the right direction, now; the treasure must be still +in No. 2, the men would be captured and jailed that day, and he and Tom +could seize the gold that night without any trouble or any fear of +interruption. + +Just as breakfast was completed there was a knock at the door. Huck +jumped for a hiding-place, for he had no mind to be connected even +remotely with the late event. The Welshman admitted several ladies and +gentlemen, among them the Widow Douglas, and noticed that groups of +citizens were climbing up the hill--to stare at the stile. So the news +had spread. The Welshman had to tell the story of the night to the +visitors. The widow's gratitude for her preservation was outspoken. + +"Don't say a word about it, madam. There's another that you're more +beholden to than you are to me and my boys, maybe, but he don't allow +me to tell his name. We wouldn't have been there but for him." + +Of course this excited a curiosity so vast that it almost belittled +the main matter--but the Welshman allowed it to eat into the vitals of +his visitors, and through them be transmitted to the whole town, for he +refused to part with his secret. When all else had been learned, the +widow said: + +"I went to sleep reading in bed and slept straight through all that +noise. Why didn't you come and wake me?" + +"We judged it warn't worth while. Those fellows warn't likely to come +again--they hadn't any tools left to work with, and what was the use of +waking you up and scaring you to death? My three negro men stood guard +at your house all the rest of the night. They've just come back." + +More visitors came, and the story had to be told and retold for a +couple of hours more. + +There was no Sabbath-school during day-school vacation, but everybody +was early at church. The stirring event was well canvassed. News came +that not a sign of the two villains had been yet discovered. When the +sermon was finished, Judge Thatcher's wife dropped alongside of Mrs. +Harper as she moved down the aisle with the crowd and said: + +"Is my Becky going to sleep all day? I just expected she would be +tired to death." + +"Your Becky?" + +"Yes," with a startled look--"didn't she stay with you last night?" + +"Why, no." + +Mrs. Thatcher turned pale, and sank into a pew, just as Aunt Polly, +talking briskly with a friend, passed by. Aunt Polly said: + +"Good-morning, Mrs. Thatcher. Good-morning, Mrs. Harper. I've got a +boy that's turned up missing. I reckon my Tom stayed at your house last +night--one of you. And now he's afraid to come to church. I've got to +settle with him." + +Mrs. Thatcher shook her head feebly and turned paler than ever. + +"He didn't stay with us," said Mrs. Harper, beginning to look uneasy. +A marked anxiety came into Aunt Polly's face. + +"Joe Harper, have you seen my Tom this morning?" + +"No'm." + +"When did you see him last?" + +Joe tried to remember, but was not sure he could say. The people had +stopped moving out of church. Whispers passed along, and a boding +uneasiness took possession of every countenance. Children were +anxiously questioned, and young teachers. They all said they had not +noticed whether Tom and Becky were on board the ferryboat on the +homeward trip; it was dark; no one thought of inquiring if any one was +missing. One young man finally blurted out his fear that they were +still in the cave! Mrs. Thatcher swooned away. Aunt Polly fell to +crying and wringing her hands. + +The alarm swept from lip to lip, from group to group, from street to +street, and within five minutes the bells were wildly clanging and the +whole town was up! The Cardiff Hill episode sank into instant +insignificance, the burglars were forgotten, horses were saddled, +skiffs were manned, the ferryboat ordered out, and before the horror +was half an hour old, two hundred men were pouring down highroad and +river toward the cave. + +All the long afternoon the village seemed empty and dead. Many women +visited Aunt Polly and Mrs. Thatcher and tried to comfort them. They +cried with them, too, and that was still better than words. All the +tedious night the town waited for news; but when the morning dawned at +last, all the word that came was, "Send more candles--and send food." +Mrs. Thatcher was almost crazed; and Aunt Polly, also. Judge Thatcher +sent messages of hope and encouragement from the cave, but they +conveyed no real cheer. + +The old Welshman came home toward daylight, spattered with +candle-grease, smeared with clay, and almost worn out. He found Huck +still in the bed that had been provided for him, and delirious with +fever. The physicians were all at the cave, so the Widow Douglas came +and took charge of the patient. She said she would do her best by him, +because, whether he was good, bad, or indifferent, he was the Lord's, +and nothing that was the Lord's was a thing to be neglected. The +Welshman said Huck had good spots in him, and the widow said: + +"You can depend on it. That's the Lord's mark. He don't leave it off. +He never does. Puts it somewhere on every creature that comes from his +hands." + +Early in the forenoon parties of jaded men began to straggle into the +village, but the strongest of the citizens continued searching. All the +news that could be gained was that remotenesses of the cavern were +being ransacked that had never been visited before; that every corner +and crevice was going to be thoroughly searched; that wherever one +wandered through the maze of passages, lights were to be seen flitting +hither and thither in the distance, and shoutings and pistol-shots sent +their hollow reverberations to the ear down the sombre aisles. In one +place, far from the section usually traversed by tourists, the names +"BECKY & TOM" had been found traced upon the rocky wall with +candle-smoke, and near at hand a grease-soiled bit of ribbon. Mrs. +Thatcher recognized the ribbon and cried over it. She said it was the +last relic she should ever have of her child; and that no other memorial +of her could ever be so precious, because this one parted latest from +the living body before the awful death came. Some said that now and +then, in the cave, a far-away speck of light would glimmer, and then a +glorious shout would burst forth and a score of men go trooping down the +echoing aisle--and then a sickening disappointment always followed; the +children were not there; it was only a searcher's light. + +Three dreadful days and nights dragged their tedious hours along, and +the village sank into a hopeless stupor. No one had heart for anything. +The accidental discovery, just made, that the proprietor of the +Temperance Tavern kept liquor on his premises, scarcely fluttered the +public pulse, tremendous as the fact was. In a lucid interval, Huck +feebly led up to the subject of taverns, and finally asked--dimly +dreading the worst--if anything had been discovered at the Temperance +Tavern since he had been ill. + +"Yes," said the widow. + +Huck started up in bed, wild-eyed: + +"What? What was it?" + +"Liquor!--and the place has been shut up. Lie down, child--what a turn +you did give me!" + +"Only tell me just one thing--only just one--please! Was it Tom Sawyer +that found it?" + +The widow burst into tears. "Hush, hush, child, hush! I've told you +before, you must NOT talk. You are very, very sick!" + +Then nothing but liquor had been found; there would have been a great +powwow if it had been the gold. So the treasure was gone forever--gone +forever! But what could she be crying about? Curious that she should +cry. + +These thoughts worked their dim way through Huck's mind, and under the +weariness they gave him he fell asleep. The widow said to herself: + +"There--he's asleep, poor wreck. Tom Sawyer find it! Pity but somebody +could find Tom Sawyer! Ah, there ain't many left, now, that's got hope +enough, or strength enough, either, to go on searching." + + + +CHAPTER XXXI + +NOW to return to Tom and Becky's share in the picnic. They tripped +along the murky aisles with the rest of the company, visiting the +familiar wonders of the cave--wonders dubbed with rather +over-descriptive names, such as "The Drawing-Room," "The Cathedral," +"Aladdin's Palace," and so on. Presently the hide-and-seek frolicking +began, and Tom and Becky engaged in it with zeal until the exertion +began to grow a trifle wearisome; then they wandered down a sinuous +avenue holding their candles aloft and reading the tangled web-work of +names, dates, post-office addresses, and mottoes with which the rocky +walls had been frescoed (in candle-smoke). Still drifting along and +talking, they scarcely noticed that they were now in a part of the cave +whose walls were not frescoed. They smoked their own names under an +overhanging shelf and moved on. Presently they came to a place where a +little stream of water, trickling over a ledge and carrying a limestone +sediment with it, had, in the slow-dragging ages, formed a laced and +ruffled Niagara in gleaming and imperishable stone. Tom squeezed his +small body behind it in order to illuminate it for Becky's +gratification. He found that it curtained a sort of steep natural +stairway which was enclosed between narrow walls, and at once the +ambition to be a discoverer seized him. Becky responded to his call, +and they made a smoke-mark for future guidance, and started upon their +quest. They wound this way and that, far down into the secret depths of +the cave, made another mark, and branched off in search of novelties to +tell the upper world about. In one place they found a spacious cavern, +from whose ceiling depended a multitude of shining stalactites of the +length and circumference of a man's leg; they walked all about it, +wondering and admiring, and presently left it by one of the numerous +passages that opened into it. This shortly brought them to a bewitching +spring, whose basin was incrusted with a frostwork of glittering +crystals; it was in the midst of a cavern whose walls were supported by +many fantastic pillars which had been formed by the joining of great +stalactites and stalagmites together, the result of the ceaseless +water-drip of centuries. Under the roof vast knots of bats had packed +themselves together, thousands in a bunch; the lights disturbed the +creatures and they came flocking down by hundreds, squeaking and +darting furiously at the candles. Tom knew their ways and the danger of +this sort of conduct. He seized Becky's hand and hurried her into the +first corridor that offered; and none too soon, for a bat struck +Becky's light out with its wing while she was passing out of the +cavern. The bats chased the children a good distance; but the fugitives +plunged into every new passage that offered, and at last got rid of the +perilous things. Tom found a subterranean lake, shortly, which +stretched its dim length away until its shape was lost in the shadows. +He wanted to explore its borders, but concluded that it would be best +to sit down and rest awhile, first. Now, for the first time, the deep +stillness of the place laid a clammy hand upon the spirits of the +children. Becky said: + +"Why, I didn't notice, but it seems ever so long since I heard any of +the others." + +"Come to think, Becky, we are away down below them--and I don't know +how far away north, or south, or east, or whichever it is. We couldn't +hear them here." + +Becky grew apprehensive. + +"I wonder how long we've been down here, Tom? We better start back." + +"Yes, I reckon we better. P'raps we better." + +"Can you find the way, Tom? It's all a mixed-up crookedness to me." + +"I reckon I could find it--but then the bats. If they put our candles +out it will be an awful fix. Let's try some other way, so as not to go +through there." + +"Well. But I hope we won't get lost. It would be so awful!" and the +girl shuddered at the thought of the dreadful possibilities. + +They started through a corridor, and traversed it in silence a long +way, glancing at each new opening, to see if there was anything +familiar about the look of it; but they were all strange. Every time +Tom made an examination, Becky would watch his face for an encouraging +sign, and he would say cheerily: + +"Oh, it's all right. This ain't the one, but we'll come to it right +away!" + +But he felt less and less hopeful with each failure, and presently +began to turn off into diverging avenues at sheer random, in desperate +hope of finding the one that was wanted. He still said it was "all +right," but there was such a leaden dread at his heart that the words +had lost their ring and sounded just as if he had said, "All is lost!" +Becky clung to his side in an anguish of fear, and tried hard to keep +back the tears, but they would come. At last she said: + +"Oh, Tom, never mind the bats, let's go back that way! We seem to get +worse and worse off all the time." + +"Listen!" said he. + +Profound silence; silence so deep that even their breathings were +conspicuous in the hush. Tom shouted. The call went echoing down the +empty aisles and died out in the distance in a faint sound that +resembled a ripple of mocking laughter. + +"Oh, don't do it again, Tom, it is too horrid," said Becky. + +"It is horrid, but I better, Becky; they might hear us, you know," and +he shouted again. + +The "might" was even a chillier horror than the ghostly laughter, it +so confessed a perishing hope. The children stood still and listened; +but there was no result. Tom turned upon the back track at once, and +hurried his steps. It was but a little while before a certain +indecision in his manner revealed another fearful fact to Becky--he +could not find his way back! + +"Oh, Tom, you didn't make any marks!" + +"Becky, I was such a fool! Such a fool! I never thought we might want +to come back! No--I can't find the way. It's all mixed up." + +"Tom, Tom, we're lost! we're lost! We never can get out of this awful +place! Oh, why DID we ever leave the others!" + +She sank to the ground and burst into such a frenzy of crying that Tom +was appalled with the idea that she might die, or lose her reason. He +sat down by her and put his arms around her; she buried her face in his +bosom, she clung to him, she poured out her terrors, her unavailing +regrets, and the far echoes turned them all to jeering laughter. Tom +begged her to pluck up hope again, and she said she could not. He fell +to blaming and abusing himself for getting her into this miserable +situation; this had a better effect. She said she would try to hope +again, she would get up and follow wherever he might lead if only he +would not talk like that any more. For he was no more to blame than +she, she said. + +So they moved on again--aimlessly--simply at random--all they could do +was to move, keep moving. For a little while, hope made a show of +reviving--not with any reason to back it, but only because it is its +nature to revive when the spring has not been taken out of it by age +and familiarity with failure. + +By-and-by Tom took Becky's candle and blew it out. This economy meant +so much! Words were not needed. Becky understood, and her hope died +again. She knew that Tom had a whole candle and three or four pieces in +his pockets--yet he must economize. + +By-and-by, fatigue began to assert its claims; the children tried to +pay attention, for it was dreadful to think of sitting down when time +was grown to be so precious, moving, in some direction, in any +direction, was at least progress and might bear fruit; but to sit down +was to invite death and shorten its pursuit. + +At last Becky's frail limbs refused to carry her farther. She sat +down. Tom rested with her, and they talked of home, and the friends +there, and the comfortable beds and, above all, the light! Becky cried, +and Tom tried to think of some way of comforting her, but all his +encouragements were grown threadbare with use, and sounded like +sarcasms. Fatigue bore so heavily upon Becky that she drowsed off to +sleep. Tom was grateful. He sat looking into her drawn face and saw it +grow smooth and natural under the influence of pleasant dreams; and +by-and-by a smile dawned and rested there. The peaceful face reflected +somewhat of peace and healing into his own spirit, and his thoughts +wandered away to bygone times and dreamy memories. While he was deep in +his musings, Becky woke up with a breezy little laugh--but it was +stricken dead upon her lips, and a groan followed it. + +"Oh, how COULD I sleep! I wish I never, never had waked! No! No, I +don't, Tom! Don't look so! I won't say it again." + +"I'm glad you've slept, Becky; you'll feel rested, now, and we'll find +the way out." + +"We can try, Tom; but I've seen such a beautiful country in my dream. +I reckon we are going there." + +"Maybe not, maybe not. Cheer up, Becky, and let's go on trying." + +They rose up and wandered along, hand in hand and hopeless. They tried +to estimate how long they had been in the cave, but all they knew was +that it seemed days and weeks, and yet it was plain that this could not +be, for their candles were not gone yet. A long time after this--they +could not tell how long--Tom said they must go softly and listen for +dripping water--they must find a spring. They found one presently, and +Tom said it was time to rest again. Both were cruelly tired, yet Becky +said she thought she could go a little farther. She was surprised to +hear Tom dissent. She could not understand it. They sat down, and Tom +fastened his candle to the wall in front of them with some clay. +Thought was soon busy; nothing was said for some time. Then Becky broke +the silence: + +"Tom, I am so hungry!" + +Tom took something out of his pocket. + +"Do you remember this?" said he. + +Becky almost smiled. + +"It's our wedding-cake, Tom." + +"Yes--I wish it was as big as a barrel, for it's all we've got." + +"I saved it from the picnic for us to dream on, Tom, the way grown-up +people do with wedding-cake--but it'll be our--" + +She dropped the sentence where it was. Tom divided the cake and Becky +ate with good appetite, while Tom nibbled at his moiety. There was +abundance of cold water to finish the feast with. By-and-by Becky +suggested that they move on again. Tom was silent a moment. Then he +said: + +"Becky, can you bear it if I tell you something?" + +Becky's face paled, but she thought she could. + +"Well, then, Becky, we must stay here, where there's water to drink. +That little piece is our last candle!" + +Becky gave loose to tears and wailings. Tom did what he could to +comfort her, but with little effect. At length Becky said: + +"Tom!" + +"Well, Becky?" + +"They'll miss us and hunt for us!" + +"Yes, they will! Certainly they will!" + +"Maybe they're hunting for us now, Tom." + +"Why, I reckon maybe they are. I hope they are." + +"When would they miss us, Tom?" + +"When they get back to the boat, I reckon." + +"Tom, it might be dark then--would they notice we hadn't come?" + +"I don't know. But anyway, your mother would miss you as soon as they +got home." + +A frightened look in Becky's face brought Tom to his senses and he saw +that he had made a blunder. Becky was not to have gone home that night! +The children became silent and thoughtful. In a moment a new burst of +grief from Becky showed Tom that the thing in his mind had struck hers +also--that the Sabbath morning might be half spent before Mrs. Thatcher +discovered that Becky was not at Mrs. Harper's. + +The children fastened their eyes upon their bit of candle and watched +it melt slowly and pitilessly away; saw the half inch of wick stand +alone at last; saw the feeble flame rise and fall, climb the thin +column of smoke, linger at its top a moment, and then--the horror of +utter darkness reigned! + +How long afterward it was that Becky came to a slow consciousness that +she was crying in Tom's arms, neither could tell. All that they knew +was, that after what seemed a mighty stretch of time, both awoke out of +a dead stupor of sleep and resumed their miseries once more. Tom said +it might be Sunday, now--maybe Monday. He tried to get Becky to talk, +but her sorrows were too oppressive, all her hopes were gone. Tom said +that they must have been missed long ago, and no doubt the search was +going on. He would shout and maybe some one would come. He tried it; +but in the darkness the distant echoes sounded so hideously that he +tried it no more. + +The hours wasted away, and hunger came to torment the captives again. +A portion of Tom's half of the cake was left; they divided and ate it. +But they seemed hungrier than before. The poor morsel of food only +whetted desire. + +By-and-by Tom said: + +"SH! Did you hear that?" + +Both held their breath and listened. There was a sound like the +faintest, far-off shout. Instantly Tom answered it, and leading Becky +by the hand, started groping down the corridor in its direction. +Presently he listened again; again the sound was heard, and apparently +a little nearer. + +"It's them!" said Tom; "they're coming! Come along, Becky--we're all +right now!" + +The joy of the prisoners was almost overwhelming. Their speed was +slow, however, because pitfalls were somewhat common, and had to be +guarded against. They shortly came to one and had to stop. It might be +three feet deep, it might be a hundred--there was no passing it at any +rate. Tom got down on his breast and reached as far down as he could. +No bottom. They must stay there and wait until the searchers came. They +listened; evidently the distant shoutings were growing more distant! a +moment or two more and they had gone altogether. The heart-sinking +misery of it! Tom whooped until he was hoarse, but it was of no use. He +talked hopefully to Becky; but an age of anxious waiting passed and no +sounds came again. + +The children groped their way back to the spring. The weary time +dragged on; they slept again, and awoke famished and woe-stricken. Tom +believed it must be Tuesday by this time. + +Now an idea struck him. There were some side passages near at hand. It +would be better to explore some of these than bear the weight of the +heavy time in idleness. He took a kite-line from his pocket, tied it to +a projection, and he and Becky started, Tom in the lead, unwinding the +line as he groped along. At the end of twenty steps the corridor ended +in a "jumping-off place." Tom got down on his knees and felt below, and +then as far around the corner as he could reach with his hands +conveniently; he made an effort to stretch yet a little farther to the +right, and at that moment, not twenty yards away, a human hand, holding +a candle, appeared from behind a rock! Tom lifted up a glorious shout, +and instantly that hand was followed by the body it belonged to--Injun +Joe's! Tom was paralyzed; he could not move. He was vastly gratified +the next moment, to see the "Spaniard" take to his heels and get +himself out of sight. Tom wondered that Joe had not recognized his +voice and come over and killed him for testifying in court. But the +echoes must have disguised the voice. Without doubt, that was it, he +reasoned. Tom's fright weakened every muscle in his body. He said to +himself that if he had strength enough to get back to the spring he +would stay there, and nothing should tempt him to run the risk of +meeting Injun Joe again. He was careful to keep from Becky what it was +he had seen. He told her he had only shouted "for luck." + +But hunger and wretchedness rise superior to fears in the long run. +Another tedious wait at the spring and another long sleep brought +changes. The children awoke tortured with a raging hunger. Tom believed +that it must be Wednesday or Thursday or even Friday or Saturday, now, +and that the search had been given over. He proposed to explore another +passage. He felt willing to risk Injun Joe and all other terrors. But +Becky was very weak. She had sunk into a dreary apathy and would not be +roused. She said she would wait, now, where she was, and die--it would +not be long. She told Tom to go with the kite-line and explore if he +chose; but she implored him to come back every little while and speak +to her; and she made him promise that when the awful time came, he +would stay by her and hold her hand until all was over. + +Tom kissed her, with a choking sensation in his throat, and made a +show of being confident of finding the searchers or an escape from the +cave; then he took the kite-line in his hand and went groping down one +of the passages on his hands and knees, distressed with hunger and sick +with bodings of coming doom. + + + +CHAPTER XXXII + +TUESDAY afternoon came, and waned to the twilight. The village of St. +Petersburg still mourned. The lost children had not been found. Public +prayers had been offered up for them, and many and many a private +prayer that had the petitioner's whole heart in it; but still no good +news came from the cave. The majority of the searchers had given up the +quest and gone back to their daily avocations, saying that it was plain +the children could never be found. Mrs. Thatcher was very ill, and a +great part of the time delirious. People said it was heartbreaking to +hear her call her child, and raise her head and listen a whole minute +at a time, then lay it wearily down again with a moan. Aunt Polly had +drooped into a settled melancholy, and her gray hair had grown almost +white. The village went to its rest on Tuesday night, sad and forlorn. + +Away in the middle of the night a wild peal burst from the village +bells, and in a moment the streets were swarming with frantic half-clad +people, who shouted, "Turn out! turn out! they're found! they're +found!" Tin pans and horns were added to the din, the population massed +itself and moved toward the river, met the children coming in an open +carriage drawn by shouting citizens, thronged around it, joined its +homeward march, and swept magnificently up the main street roaring +huzzah after huzzah! + +The village was illuminated; nobody went to bed again; it was the +greatest night the little town had ever seen. During the first half-hour +a procession of villagers filed through Judge Thatcher's house, seized +the saved ones and kissed them, squeezed Mrs. Thatcher's hand, tried to +speak but couldn't--and drifted out raining tears all over the place. + +Aunt Polly's happiness was complete, and Mrs. Thatcher's nearly so. It +would be complete, however, as soon as the messenger dispatched with +the great news to the cave should get the word to her husband. Tom lay +upon a sofa with an eager auditory about him and told the history of +the wonderful adventure, putting in many striking additions to adorn it +withal; and closed with a description of how he left Becky and went on +an exploring expedition; how he followed two avenues as far as his +kite-line would reach; how he followed a third to the fullest stretch of +the kite-line, and was about to turn back when he glimpsed a far-off +speck that looked like daylight; dropped the line and groped toward it, +pushed his head and shoulders through a small hole, and saw the broad +Mississippi rolling by! And if it had only happened to be night he would +not have seen that speck of daylight and would not have explored that +passage any more! He told how he went back for Becky and broke the good +news and she told him not to fret her with such stuff, for she was +tired, and knew she was going to die, and wanted to. He described how he +labored with her and convinced her; and how she almost died for joy when +she had groped to where she actually saw the blue speck of daylight; how +he pushed his way out at the hole and then helped her out; how they sat +there and cried for gladness; how some men came along in a skiff and Tom +hailed them and told them their situation and their famished condition; +how the men didn't believe the wild tale at first, "because," said they, +"you are five miles down the river below the valley the cave is in" +--then took them aboard, rowed to a house, gave them supper, made them +rest till two or three hours after dark and then brought them home. + +Before day-dawn, Judge Thatcher and the handful of searchers with him +were tracked out, in the cave, by the twine clews they had strung +behind them, and informed of the great news. + +Three days and nights of toil and hunger in the cave were not to be +shaken off at once, as Tom and Becky soon discovered. They were +bedridden all of Wednesday and Thursday, and seemed to grow more and +more tired and worn, all the time. Tom got about, a little, on +Thursday, was down-town Friday, and nearly as whole as ever Saturday; +but Becky did not leave her room until Sunday, and then she looked as +if she had passed through a wasting illness. + +Tom learned of Huck's sickness and went to see him on Friday, but +could not be admitted to the bedroom; neither could he on Saturday or +Sunday. He was admitted daily after that, but was warned to keep still +about his adventure and introduce no exciting topic. The Widow Douglas +stayed by to see that he obeyed. At home Tom learned of the Cardiff +Hill event; also that the "ragged man's" body had eventually been found +in the river near the ferry-landing; he had been drowned while trying +to escape, perhaps. + +About a fortnight after Tom's rescue from the cave, he started off to +visit Huck, who had grown plenty strong enough, now, to hear exciting +talk, and Tom had some that would interest him, he thought. Judge +Thatcher's house was on Tom's way, and he stopped to see Becky. The +Judge and some friends set Tom to talking, and some one asked him +ironically if he wouldn't like to go to the cave again. Tom said he +thought he wouldn't mind it. The Judge said: + +"Well, there are others just like you, Tom, I've not the least doubt. +But we have taken care of that. Nobody will get lost in that cave any +more." + +"Why?" + +"Because I had its big door sheathed with boiler iron two weeks ago, +and triple-locked--and I've got the keys." + +Tom turned as white as a sheet. + +"What's the matter, boy! Here, run, somebody! Fetch a glass of water!" + +The water was brought and thrown into Tom's face. + +"Ah, now you're all right. What was the matter with you, Tom?" + +"Oh, Judge, Injun Joe's in the cave!" + + + +CHAPTER XXXIII + +WITHIN a few minutes the news had spread, and a dozen skiff-loads of +men were on their way to McDougal's cave, and the ferryboat, well +filled with passengers, soon followed. Tom Sawyer was in the skiff that +bore Judge Thatcher. + +When the cave door was unlocked, a sorrowful sight presented itself in +the dim twilight of the place. Injun Joe lay stretched upon the ground, +dead, with his face close to the crack of the door, as if his longing +eyes had been fixed, to the latest moment, upon the light and the cheer +of the free world outside. Tom was touched, for he knew by his own +experience how this wretch had suffered. His pity was moved, but +nevertheless he felt an abounding sense of relief and security, now, +which revealed to him in a degree which he had not fully appreciated +before how vast a weight of dread had been lying upon him since the day +he lifted his voice against this bloody-minded outcast. + +Injun Joe's bowie-knife lay close by, its blade broken in two. The +great foundation-beam of the door had been chipped and hacked through, +with tedious labor; useless labor, too, it was, for the native rock +formed a sill outside it, and upon that stubborn material the knife had +wrought no effect; the only damage done was to the knife itself. But if +there had been no stony obstruction there the labor would have been +useless still, for if the beam had been wholly cut away Injun Joe could +not have squeezed his body under the door, and he knew it. So he had +only hacked that place in order to be doing something--in order to pass +the weary time--in order to employ his tortured faculties. Ordinarily +one could find half a dozen bits of candle stuck around in the crevices +of this vestibule, left there by tourists; but there were none now. The +prisoner had searched them out and eaten them. He had also contrived to +catch a few bats, and these, also, he had eaten, leaving only their +claws. The poor unfortunate had starved to death. In one place, near at +hand, a stalagmite had been slowly growing up from the ground for ages, +builded by the water-drip from a stalactite overhead. The captive had +broken off the stalagmite, and upon the stump had placed a stone, +wherein he had scooped a shallow hollow to catch the precious drop +that fell once in every three minutes with the dreary regularity of a +clock-tick--a dessertspoonful once in four and twenty hours. That drop +was falling when the Pyramids were new; when Troy fell; when the +foundations of Rome were laid; when Christ was crucified; when the +Conqueror created the British empire; when Columbus sailed; when the +massacre at Lexington was "news." It is falling now; it will still be +falling when all these things shall have sunk down the afternoon of +history, and the twilight of tradition, and been swallowed up in the +thick night of oblivion. Has everything a purpose and a mission? Did +this drop fall patiently during five thousand years to be ready for +this flitting human insect's need? and has it another important object +to accomplish ten thousand years to come? No matter. It is many and +many a year since the hapless half-breed scooped out the stone to catch +the priceless drops, but to this day the tourist stares longest at that +pathetic stone and that slow-dropping water when he comes to see the +wonders of McDougal's cave. Injun Joe's cup stands first in the list of +the cavern's marvels; even "Aladdin's Palace" cannot rival it. + +Injun Joe was buried near the mouth of the cave; and people flocked +there in boats and wagons from the towns and from all the farms and +hamlets for seven miles around; they brought their children, and all +sorts of provisions, and confessed that they had had almost as +satisfactory a time at the funeral as they could have had at the +hanging. + +This funeral stopped the further growth of one thing--the petition to +the governor for Injun Joe's pardon. The petition had been largely +signed; many tearful and eloquent meetings had been held, and a +committee of sappy women been appointed to go in deep mourning and wail +around the governor, and implore him to be a merciful ass and trample +his duty under foot. Injun Joe was believed to have killed five +citizens of the village, but what of that? If he had been Satan himself +there would have been plenty of weaklings ready to scribble their names +to a pardon-petition, and drip a tear on it from their permanently +impaired and leaky water-works. + +The morning after the funeral Tom took Huck to a private place to have +an important talk. Huck had learned all about Tom's adventure from the +Welshman and the Widow Douglas, by this time, but Tom said he reckoned +there was one thing they had not told him; that thing was what he +wanted to talk about now. Huck's face saddened. He said: + +"I know what it is. You got into No. 2 and never found anything but +whiskey. Nobody told me it was you; but I just knowed it must 'a' ben +you, soon as I heard 'bout that whiskey business; and I knowed you +hadn't got the money becuz you'd 'a' got at me some way or other and +told me even if you was mum to everybody else. Tom, something's always +told me we'd never get holt of that swag." + +"Why, Huck, I never told on that tavern-keeper. YOU know his tavern +was all right the Saturday I went to the picnic. Don't you remember you +was to watch there that night?" + +"Oh yes! Why, it seems 'bout a year ago. It was that very night that I +follered Injun Joe to the widder's." + +"YOU followed him?" + +"Yes--but you keep mum. I reckon Injun Joe's left friends behind him, +and I don't want 'em souring on me and doing me mean tricks. If it +hadn't ben for me he'd be down in Texas now, all right." + +Then Huck told his entire adventure in confidence to Tom, who had only +heard of the Welshman's part of it before. + +"Well," said Huck, presently, coming back to the main question, +"whoever nipped the whiskey in No. 2, nipped the money, too, I reckon +--anyways it's a goner for us, Tom." + +"Huck, that money wasn't ever in No. 2!" + +"What!" Huck searched his comrade's face keenly. "Tom, have you got on +the track of that money again?" + +"Huck, it's in the cave!" + +Huck's eyes blazed. + +"Say it again, Tom." + +"The money's in the cave!" + +"Tom--honest injun, now--is it fun, or earnest?" + +"Earnest, Huck--just as earnest as ever I was in my life. Will you go +in there with me and help get it out?" + +"I bet I will! I will if it's where we can blaze our way to it and not +get lost." + +"Huck, we can do that without the least little bit of trouble in the +world." + +"Good as wheat! What makes you think the money's--" + +"Huck, you just wait till we get in there. If we don't find it I'll +agree to give you my drum and every thing I've got in the world. I +will, by jings." + +"All right--it's a whiz. When do you say?" + +"Right now, if you say it. Are you strong enough?" + +"Is it far in the cave? I ben on my pins a little, three or four days, +now, but I can't walk more'n a mile, Tom--least I don't think I could." + +"It's about five mile into there the way anybody but me would go, +Huck, but there's a mighty short cut that they don't anybody but me +know about. Huck, I'll take you right to it in a skiff. I'll float the +skiff down there, and I'll pull it back again all by myself. You +needn't ever turn your hand over." + +"Less start right off, Tom." + +"All right. We want some bread and meat, and our pipes, and a little +bag or two, and two or three kite-strings, and some of these +new-fangled things they call lucifer matches. I tell you, many's +the time I wished I had some when I was in there before." + +A trifle after noon the boys borrowed a small skiff from a citizen who +was absent, and got under way at once. When they were several miles +below "Cave Hollow," Tom said: + +"Now you see this bluff here looks all alike all the way down from the +cave hollow--no houses, no wood-yards, bushes all alike. But do you see +that white place up yonder where there's been a landslide? Well, that's +one of my marks. We'll get ashore, now." + +They landed. + +"Now, Huck, where we're a-standing you could touch that hole I got out +of with a fishing-pole. See if you can find it." + +Huck searched all the place about, and found nothing. Tom proudly +marched into a thick clump of sumach bushes and said: + +"Here you are! Look at it, Huck; it's the snuggest hole in this +country. You just keep mum about it. All along I've been wanting to be +a robber, but I knew I'd got to have a thing like this, and where to +run across it was the bother. We've got it now, and we'll keep it +quiet, only we'll let Joe Harper and Ben Rogers in--because of course +there's got to be a Gang, or else there wouldn't be any style about it. +Tom Sawyer's Gang--it sounds splendid, don't it, Huck?" + +"Well, it just does, Tom. And who'll we rob?" + +"Oh, most anybody. Waylay people--that's mostly the way." + +"And kill them?" + +"No, not always. Hive them in the cave till they raise a ransom." + +"What's a ransom?" + +"Money. You make them raise all they can, off'n their friends; and +after you've kept them a year, if it ain't raised then you kill them. +That's the general way. Only you don't kill the women. You shut up the +women, but you don't kill them. They're always beautiful and rich, and +awfully scared. You take their watches and things, but you always take +your hat off and talk polite. They ain't anybody as polite as robbers +--you'll see that in any book. Well, the women get to loving you, and +after they've been in the cave a week or two weeks they stop crying and +after that you couldn't get them to leave. If you drove them out they'd +turn right around and come back. It's so in all the books." + +"Why, it's real bully, Tom. I believe it's better'n to be a pirate." + +"Yes, it's better in some ways, because it's close to home and +circuses and all that." + +By this time everything was ready and the boys entered the hole, Tom +in the lead. They toiled their way to the farther end of the tunnel, +then made their spliced kite-strings fast and moved on. A few steps +brought them to the spring, and Tom felt a shudder quiver all through +him. He showed Huck the fragment of candle-wick perched on a lump of +clay against the wall, and described how he and Becky had watched the +flame struggle and expire. + +The boys began to quiet down to whispers, now, for the stillness and +gloom of the place oppressed their spirits. They went on, and presently +entered and followed Tom's other corridor until they reached the +"jumping-off place." The candles revealed the fact that it was not +really a precipice, but only a steep clay hill twenty or thirty feet +high. Tom whispered: + +"Now I'll show you something, Huck." + +He held his candle aloft and said: + +"Look as far around the corner as you can. Do you see that? There--on +the big rock over yonder--done with candle-smoke." + +"Tom, it's a CROSS!" + +"NOW where's your Number Two? 'UNDER THE CROSS,' hey? Right yonder's +where I saw Injun Joe poke up his candle, Huck!" + +Huck stared at the mystic sign awhile, and then said with a shaky voice: + +"Tom, less git out of here!" + +"What! and leave the treasure?" + +"Yes--leave it. Injun Joe's ghost is round about there, certain." + +"No it ain't, Huck, no it ain't. It would ha'nt the place where he +died--away out at the mouth of the cave--five mile from here." + +"No, Tom, it wouldn't. It would hang round the money. I know the ways +of ghosts, and so do you." + +Tom began to fear that Huck was right. Misgivings gathered in his +mind. But presently an idea occurred to him-- + +"Lookyhere, Huck, what fools we're making of ourselves! Injun Joe's +ghost ain't a going to come around where there's a cross!" + +The point was well taken. It had its effect. + +"Tom, I didn't think of that. But that's so. It's luck for us, that +cross is. I reckon we'll climb down there and have a hunt for that box." + +Tom went first, cutting rude steps in the clay hill as he descended. +Huck followed. Four avenues opened out of the small cavern which the +great rock stood in. The boys examined three of them with no result. +They found a small recess in the one nearest the base of the rock, with +a pallet of blankets spread down in it; also an old suspender, some +bacon rind, and the well-gnawed bones of two or three fowls. But there +was no money-box. The lads searched and researched this place, but in +vain. Tom said: + +"He said UNDER the cross. Well, this comes nearest to being under the +cross. It can't be under the rock itself, because that sets solid on +the ground." + +They searched everywhere once more, and then sat down discouraged. +Huck could suggest nothing. By-and-by Tom said: + +"Lookyhere, Huck, there's footprints and some candle-grease on the +clay about one side of this rock, but not on the other sides. Now, +what's that for? I bet you the money IS under the rock. I'm going to +dig in the clay." + +"That ain't no bad notion, Tom!" said Huck with animation. + +Tom's "real Barlow" was out at once, and he had not dug four inches +before he struck wood. + +"Hey, Huck!--you hear that?" + +Huck began to dig and scratch now. Some boards were soon uncovered and +removed. They had concealed a natural chasm which led under the rock. +Tom got into this and held his candle as far under the rock as he +could, but said he could not see to the end of the rift. He proposed to +explore. He stooped and passed under; the narrow way descended +gradually. He followed its winding course, first to the right, then to +the left, Huck at his heels. Tom turned a short curve, by-and-by, and +exclaimed: + +"My goodness, Huck, lookyhere!" + +It was the treasure-box, sure enough, occupying a snug little cavern, +along with an empty powder-keg, a couple of guns in leather cases, two +or three pairs of old moccasins, a leather belt, and some other rubbish +well soaked with the water-drip. + +"Got it at last!" said Huck, ploughing among the tarnished coins with +his hand. "My, but we're rich, Tom!" + +"Huck, I always reckoned we'd get it. It's just too good to believe, +but we HAVE got it, sure! Say--let's not fool around here. Let's snake +it out. Lemme see if I can lift the box." + +It weighed about fifty pounds. Tom could lift it, after an awkward +fashion, but could not carry it conveniently. + +"I thought so," he said; "THEY carried it like it was heavy, that day +at the ha'nted house. I noticed that. I reckon I was right to think of +fetching the little bags along." + +The money was soon in the bags and the boys took it up to the cross +rock. + +"Now less fetch the guns and things," said Huck. + +"No, Huck--leave them there. They're just the tricks to have when we +go to robbing. We'll keep them there all the time, and we'll hold our +orgies there, too. It's an awful snug place for orgies." + +"What orgies?" + +"I dono. But robbers always have orgies, and of course we've got to +have them, too. Come along, Huck, we've been in here a long time. It's +getting late, I reckon. I'm hungry, too. We'll eat and smoke when we +get to the skiff." + +They presently emerged into the clump of sumach bushes, looked warily +out, found the coast clear, and were soon lunching and smoking in the +skiff. As the sun dipped toward the horizon they pushed out and got +under way. Tom skimmed up the shore through the long twilight, chatting +cheerily with Huck, and landed shortly after dark. + +"Now, Huck," said Tom, "we'll hide the money in the loft of the +widow's woodshed, and I'll come up in the morning and we'll count it +and divide, and then we'll hunt up a place out in the woods for it +where it will be safe. Just you lay quiet here and watch the stuff till +I run and hook Benny Taylor's little wagon; I won't be gone a minute." + +He disappeared, and presently returned with the wagon, put the two +small sacks into it, threw some old rags on top of them, and started +off, dragging his cargo behind him. When the boys reached the +Welshman's house, they stopped to rest. Just as they were about to move +on, the Welshman stepped out and said: + +"Hallo, who's that?" + +"Huck and Tom Sawyer." + +"Good! Come along with me, boys, you are keeping everybody waiting. +Here--hurry up, trot ahead--I'll haul the wagon for you. Why, it's not +as light as it might be. Got bricks in it?--or old metal?" + +"Old metal," said Tom. + +"I judged so; the boys in this town will take more trouble and fool +away more time hunting up six bits' worth of old iron to sell to the +foundry than they would to make twice the money at regular work. But +that's human nature--hurry along, hurry along!" + +The boys wanted to know what the hurry was about. + +"Never mind; you'll see, when we get to the Widow Douglas'." + +Huck said with some apprehension--for he was long used to being +falsely accused: + +"Mr. Jones, we haven't been doing nothing." + +The Welshman laughed. + +"Well, I don't know, Huck, my boy. I don't know about that. Ain't you +and the widow good friends?" + +"Yes. Well, she's ben good friends to me, anyway." + +"All right, then. What do you want to be afraid for?" + +This question was not entirely answered in Huck's slow mind before he +found himself pushed, along with Tom, into Mrs. Douglas' drawing-room. +Mr. Jones left the wagon near the door and followed. + +The place was grandly lighted, and everybody that was of any +consequence in the village was there. The Thatchers were there, the +Harpers, the Rogerses, Aunt Polly, Sid, Mary, the minister, the editor, +and a great many more, and all dressed in their best. The widow +received the boys as heartily as any one could well receive two such +looking beings. They were covered with clay and candle-grease. Aunt +Polly blushed crimson with humiliation, and frowned and shook her head +at Tom. Nobody suffered half as much as the two boys did, however. Mr. +Jones said: + +"Tom wasn't at home, yet, so I gave him up; but I stumbled on him and +Huck right at my door, and so I just brought them along in a hurry." + +"And you did just right," said the widow. "Come with me, boys." + +She took them to a bedchamber and said: + +"Now wash and dress yourselves. Here are two new suits of clothes +--shirts, socks, everything complete. They're Huck's--no, no thanks, +Huck--Mr. Jones bought one and I the other. But they'll fit both of you. +Get into them. We'll wait--come down when you are slicked up enough." + +Then she left. + + + +CHAPTER XXXIV + +HUCK said: "Tom, we can slope, if we can find a rope. The window ain't +high from the ground." + +"Shucks! what do you want to slope for?" + +"Well, I ain't used to that kind of a crowd. I can't stand it. I ain't +going down there, Tom." + +"Oh, bother! It ain't anything. I don't mind it a bit. I'll take care +of you." + +Sid appeared. + +"Tom," said he, "auntie has been waiting for you all the afternoon. +Mary got your Sunday clothes ready, and everybody's been fretting about +you. Say--ain't this grease and clay, on your clothes?" + +"Now, Mr. Siddy, you jist 'tend to your own business. What's all this +blow-out about, anyway?" + +"It's one of the widow's parties that she's always having. This time +it's for the Welshman and his sons, on account of that scrape they +helped her out of the other night. And say--I can tell you something, +if you want to know." + +"Well, what?" + +"Why, old Mr. Jones is going to try to spring something on the people +here to-night, but I overheard him tell auntie to-day about it, as a +secret, but I reckon it's not much of a secret now. Everybody knows +--the widow, too, for all she tries to let on she don't. Mr. Jones was +bound Huck should be here--couldn't get along with his grand secret +without Huck, you know!" + +"Secret about what, Sid?" + +"About Huck tracking the robbers to the widow's. I reckon Mr. Jones +was going to make a grand time over his surprise, but I bet you it will +drop pretty flat." + +Sid chuckled in a very contented and satisfied way. + +"Sid, was it you that told?" + +"Oh, never mind who it was. SOMEBODY told--that's enough." + +"Sid, there's only one person in this town mean enough to do that, and +that's you. If you had been in Huck's place you'd 'a' sneaked down the +hill and never told anybody on the robbers. You can't do any but mean +things, and you can't bear to see anybody praised for doing good ones. +There--no thanks, as the widow says"--and Tom cuffed Sid's ears and +helped him to the door with several kicks. "Now go and tell auntie if +you dare--and to-morrow you'll catch it!" + +Some minutes later the widow's guests were at the supper-table, and a +dozen children were propped up at little side-tables in the same room, +after the fashion of that country and that day. At the proper time Mr. +Jones made his little speech, in which he thanked the widow for the +honor she was doing himself and his sons, but said that there was +another person whose modesty-- + +And so forth and so on. He sprung his secret about Huck's share in the +adventure in the finest dramatic manner he was master of, but the +surprise it occasioned was largely counterfeit and not as clamorous and +effusive as it might have been under happier circumstances. However, +the widow made a pretty fair show of astonishment, and heaped so many +compliments and so much gratitude upon Huck that he almost forgot the +nearly intolerable discomfort of his new clothes in the entirely +intolerable discomfort of being set up as a target for everybody's gaze +and everybody's laudations. + +The widow said she meant to give Huck a home under her roof and have +him educated; and that when she could spare the money she would start +him in business in a modest way. Tom's chance was come. He said: + +"Huck don't need it. Huck's rich." + +Nothing but a heavy strain upon the good manners of the company kept +back the due and proper complimentary laugh at this pleasant joke. But +the silence was a little awkward. Tom broke it: + +"Huck's got money. Maybe you don't believe it, but he's got lots of +it. Oh, you needn't smile--I reckon I can show you. You just wait a +minute." + +Tom ran out of doors. The company looked at each other with a +perplexed interest--and inquiringly at Huck, who was tongue-tied. + +"Sid, what ails Tom?" said Aunt Polly. "He--well, there ain't ever any +making of that boy out. I never--" + +Tom entered, struggling with the weight of his sacks, and Aunt Polly +did not finish her sentence. Tom poured the mass of yellow coin upon +the table and said: + +"There--what did I tell you? Half of it's Huck's and half of it's mine!" + +The spectacle took the general breath away. All gazed, nobody spoke +for a moment. Then there was a unanimous call for an explanation. Tom +said he could furnish it, and he did. The tale was long, but brimful of +interest. There was scarcely an interruption from any one to break the +charm of its flow. When he had finished, Mr. Jones said: + +"I thought I had fixed up a little surprise for this occasion, but it +don't amount to anything now. This one makes it sing mighty small, I'm +willing to allow." + +The money was counted. The sum amounted to a little over twelve +thousand dollars. It was more than any one present had ever seen at one +time before, though several persons were there who were worth +considerably more than that in property. + + + +CHAPTER XXXV + +THE reader may rest satisfied that Tom's and Huck's windfall made a +mighty stir in the poor little village of St. Petersburg. So vast a +sum, all in actual cash, seemed next to incredible. It was talked +about, gloated over, glorified, until the reason of many of the +citizens tottered under the strain of the unhealthy excitement. Every +"haunted" house in St. Petersburg and the neighboring villages was +dissected, plank by plank, and its foundations dug up and ransacked for +hidden treasure--and not by boys, but men--pretty grave, unromantic +men, too, some of them. Wherever Tom and Huck appeared they were +courted, admired, stared at. The boys were not able to remember that +their remarks had possessed weight before; but now their sayings were +treasured and repeated; everything they did seemed somehow to be +regarded as remarkable; they had evidently lost the power of doing and +saying commonplace things; moreover, their past history was raked up +and discovered to bear marks of conspicuous originality. The village +paper published biographical sketches of the boys. + +The Widow Douglas put Huck's money out at six per cent., and Judge +Thatcher did the same with Tom's at Aunt Polly's request. Each lad had +an income, now, that was simply prodigious--a dollar for every week-day +in the year and half of the Sundays. It was just what the minister got +--no, it was what he was promised--he generally couldn't collect it. A +dollar and a quarter a week would board, lodge, and school a boy in +those old simple days--and clothe him and wash him, too, for that +matter. + +Judge Thatcher had conceived a great opinion of Tom. He said that no +commonplace boy would ever have got his daughter out of the cave. When +Becky told her father, in strict confidence, how Tom had taken her +whipping at school, the Judge was visibly moved; and when she pleaded +grace for the mighty lie which Tom had told in order to shift that +whipping from her shoulders to his own, the Judge said with a fine +outburst that it was a noble, a generous, a magnanimous lie--a lie that +was worthy to hold up its head and march down through history breast to +breast with George Washington's lauded Truth about the hatchet! Becky +thought her father had never looked so tall and so superb as when he +walked the floor and stamped his foot and said that. She went straight +off and told Tom about it. + +Judge Thatcher hoped to see Tom a great lawyer or a great soldier some +day. He said he meant to look to it that Tom should be admitted to the +National Military Academy and afterward trained in the best law school +in the country, in order that he might be ready for either career or +both. + +Huck Finn's wealth and the fact that he was now under the Widow +Douglas' protection introduced him into society--no, dragged him into +it, hurled him into it--and his sufferings were almost more than he +could bear. The widow's servants kept him clean and neat, combed and +brushed, and they bedded him nightly in unsympathetic sheets that had +not one little spot or stain which he could press to his heart and know +for a friend. He had to eat with a knife and fork; he had to use +napkin, cup, and plate; he had to learn his book, he had to go to +church; he had to talk so properly that speech was become insipid in +his mouth; whithersoever he turned, the bars and shackles of +civilization shut him in and bound him hand and foot. + +He bravely bore his miseries three weeks, and then one day turned up +missing. For forty-eight hours the widow hunted for him everywhere in +great distress. The public were profoundly concerned; they searched +high and low, they dragged the river for his body. Early the third +morning Tom Sawyer wisely went poking among some old empty hogsheads +down behind the abandoned slaughter-house, and in one of them he found +the refugee. Huck had slept there; he had just breakfasted upon some +stolen odds and ends of food, and was lying off, now, in comfort, with +his pipe. He was unkempt, uncombed, and clad in the same old ruin of +rags that had made him picturesque in the days when he was free and +happy. Tom routed him out, told him the trouble he had been causing, +and urged him to go home. Huck's face lost its tranquil content, and +took a melancholy cast. He said: + +"Don't talk about it, Tom. I've tried it, and it don't work; it don't +work, Tom. It ain't for me; I ain't used to it. The widder's good to +me, and friendly; but I can't stand them ways. She makes me get up just +at the same time every morning; she makes me wash, they comb me all to +thunder; she won't let me sleep in the woodshed; I got to wear them +blamed clothes that just smothers me, Tom; they don't seem to any air +git through 'em, somehow; and they're so rotten nice that I can't set +down, nor lay down, nor roll around anywher's; I hain't slid on a +cellar-door for--well, it 'pears to be years; I got to go to church and +sweat and sweat--I hate them ornery sermons! I can't ketch a fly in +there, I can't chaw. I got to wear shoes all Sunday. The widder eats by +a bell; she goes to bed by a bell; she gits up by a bell--everything's +so awful reg'lar a body can't stand it." + +"Well, everybody does that way, Huck." + +"Tom, it don't make no difference. I ain't everybody, and I can't +STAND it. It's awful to be tied up so. And grub comes too easy--I don't +take no interest in vittles, that way. I got to ask to go a-fishing; I +got to ask to go in a-swimming--dern'd if I hain't got to ask to do +everything. Well, I'd got to talk so nice it wasn't no comfort--I'd got +to go up in the attic and rip out awhile, every day, to git a taste in +my mouth, or I'd a died, Tom. The widder wouldn't let me smoke; she +wouldn't let me yell, she wouldn't let me gape, nor stretch, nor +scratch, before folks--" [Then with a spasm of special irritation and +injury]--"And dad fetch it, she prayed all the time! I never see such a +woman! I HAD to shove, Tom--I just had to. And besides, that school's +going to open, and I'd a had to go to it--well, I wouldn't stand THAT, +Tom. Looky here, Tom, being rich ain't what it's cracked up to be. It's +just worry and worry, and sweat and sweat, and a-wishing you was dead +all the time. Now these clothes suits me, and this bar'l suits me, and +I ain't ever going to shake 'em any more. Tom, I wouldn't ever got into +all this trouble if it hadn't 'a' ben for that money; now you just take +my sheer of it along with your'n, and gimme a ten-center sometimes--not +many times, becuz I don't give a dern for a thing 'thout it's tollable +hard to git--and you go and beg off for me with the widder." + +"Oh, Huck, you know I can't do that. 'Tain't fair; and besides if +you'll try this thing just a while longer you'll come to like it." + +"Like it! Yes--the way I'd like a hot stove if I was to set on it long +enough. No, Tom, I won't be rich, and I won't live in them cussed +smothery houses. I like the woods, and the river, and hogsheads, and +I'll stick to 'em, too. Blame it all! just as we'd got guns, and a +cave, and all just fixed to rob, here this dern foolishness has got to +come up and spile it all!" + +Tom saw his opportunity-- + +"Lookyhere, Huck, being rich ain't going to keep me back from turning +robber." + +"No! Oh, good-licks; are you in real dead-wood earnest, Tom?" + +"Just as dead earnest as I'm sitting here. But Huck, we can't let you +into the gang if you ain't respectable, you know." + +Huck's joy was quenched. + +"Can't let me in, Tom? Didn't you let me go for a pirate?" + +"Yes, but that's different. A robber is more high-toned than what a +pirate is--as a general thing. In most countries they're awful high up +in the nobility--dukes and such." + +"Now, Tom, hain't you always ben friendly to me? You wouldn't shet me +out, would you, Tom? You wouldn't do that, now, WOULD you, Tom?" + +"Huck, I wouldn't want to, and I DON'T want to--but what would people +say? Why, they'd say, 'Mph! Tom Sawyer's Gang! pretty low characters in +it!' They'd mean you, Huck. You wouldn't like that, and I wouldn't." + +Huck was silent for some time, engaged in a mental struggle. Finally +he said: + +"Well, I'll go back to the widder for a month and tackle it and see if +I can come to stand it, if you'll let me b'long to the gang, Tom." + +"All right, Huck, it's a whiz! Come along, old chap, and I'll ask the +widow to let up on you a little, Huck." + +"Will you, Tom--now will you? That's good. If she'll let up on some of +the roughest things, I'll smoke private and cuss private, and crowd +through or bust. When you going to start the gang and turn robbers?" + +"Oh, right off. We'll get the boys together and have the initiation +to-night, maybe." + +"Have the which?" + +"Have the initiation." + +"What's that?" + +"It's to swear to stand by one another, and never tell the gang's +secrets, even if you're chopped all to flinders, and kill anybody and +all his family that hurts one of the gang." + +"That's gay--that's mighty gay, Tom, I tell you." + +"Well, I bet it is. And all that swearing's got to be done at +midnight, in the lonesomest, awfulest place you can find--a ha'nted +house is the best, but they're all ripped up now." + +"Well, midnight's good, anyway, Tom." + +"Yes, so it is. And you've got to swear on a coffin, and sign it with +blood." + +"Now, that's something LIKE! Why, it's a million times bullier than +pirating. I'll stick to the widder till I rot, Tom; and if I git to be +a reg'lar ripper of a robber, and everybody talking 'bout it, I reckon +she'll be proud she snaked me in out of the wet." + + + +CONCLUSION + +SO endeth this chronicle. It being strictly a history of a BOY, it +must stop here; the story could not go much further without becoming +the history of a MAN. When one writes a novel about grown people, he +knows exactly where to stop--that is, with a marriage; but when he +writes of juveniles, he must stop where he best can. + +Most of the characters that perform in this book still live, and are +prosperous and happy. Some day it may seem worth while to take up the +story of the younger ones again and see what sort of men and women they +turned out to be; therefore it will be wisest not to reveal any of that +part of their lives at present. + + + + + +End of the Project Gutenberg EBook of The Adventures of Tom Sawyer, Complete +by Mark Twain (Samuel Clemens) diff --git a/vendor/github.com/klauspost/compress/testdata/e.txt b/vendor/github.com/klauspost/compress/testdata/e.txt new file mode 100644 index 0000000..5ca186f --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/e.txt @@ -0,0 +1 @@ +2.7182818284590452353602874713526624977572470936999595749669676277240766303535475945713821785251664274274663919320030599218174135966290435729003342952605956307381323286279434907632338298807531952510190115738341879307021540891499348841675092447614606680822648001684774118537423454424371075390777449920695517027618386062613313845830007520449338265602976067371132007093287091274437470472306969772093101416928368190255151086574637721112523897844250569536967707854499699679468644549059879316368892300987931277361782154249992295763514822082698951936680331825288693984964651058209392398294887933203625094431173012381970684161403970198376793206832823764648042953118023287825098194558153017567173613320698112509961818815930416903515988885193458072738667385894228792284998920868058257492796104841984443634632449684875602336248270419786232090021609902353043699418491463140934317381436405462531520961836908887070167683964243781405927145635490613031072085103837505101157477041718986106873969655212671546889570350354021234078498193343210681701210056278802351930332247450158539047304199577770935036604169973297250886876966403555707162268447162560798826517871341951246652010305921236677194325278675398558944896970964097545918569563802363701621120477427228364896134225164450781824423529486363721417402388934412479635743702637552944483379980161254922785092577825620926226483262779333865664816277251640191059004916449982893150566047258027786318641551956532442586982946959308019152987211725563475463964479101459040905862984967912874068705048958586717479854667757573205681288459205413340539220001137863009455606881667400169842055804033637953764520304024322566135278369511778838638744396625322498506549958862342818997077332761717839280349465014345588970719425863987727547109629537415211151368350627526023264847287039207643100595841166120545297030236472549296669381151373227536450988890313602057248176585118063036442812314965507047510254465011727211555194866850800368532281831521960037356252794495158284188294787610852639813955990067376482922443752871846245780361929819713991475644882626039033814418232625150974827987779964373089970388867782271383605772978824125611907176639465070633045279546618550966661856647097113444740160704626215680717481877844371436988218559670959102596862002353718588748569652200050311734392073211390803293634479727355955277349071783793421637012050054513263835440001863239914907054797780566978533580489669062951194324730995876552368128590413832411607226029983305353708761389396391779574540161372236187893652605381558415871869255386061647798340254351284396129460352913325942794904337299085731580290958631382683291477116396337092400316894586360606458459251269946557248391865642097526850823075442545993769170419777800853627309417101634349076964237222943523661255725088147792231519747780605696725380171807763603462459278778465850656050780844211529697521890874019660906651803516501792504619501366585436632712549639908549144200014574760819302212066024330096412704894390397177195180699086998606636583232278709376502260149291011517177635944602023249300280401867723910288097866605651183260043688508817157238669842242201024950551881694803221002515426494639812873677658927688163598312477886520141174110913601164995076629077943646005851941998560162647907615321038727557126992518275687989302761761146162549356495903798045838182323368612016243736569846703785853305275833337939907521660692380533698879565137285593883499894707416181550125397064648171946708348197214488898790676503795903669672494992545279033729636162658976039498576741397359441023744329709355477982629614591442936451428617158587339746791897571211956187385783644758448423555581050025611492391518893099463428413936080383091662818811503715284967059741625628236092168075150177725387402564253470879089137291722828611515915683725241630772254406337875931059826760944203261924285317018781772960235413060672136046000389661093647095141417185777014180606443636815464440053316087783143174440811949422975599314011888683314832802706553833004693290115744147563139997221703804617092894579096271662260740718749975359212756084414737823303270330168237193648002173285734935947564334129943024850235732214597843282641421684878721673367010615094243456984401873312810107945127223737886126058165668053714396127888732527373890392890506865324138062796025930387727697783792868409325365880733988457218746021005311483351323850047827169376218004904795597959290591655470505777514308175112698985188408718564026035305583737832422924185625644255022672155980274012617971928047139600689163828665277009752767069777036439260224372841840883251848770472638440379530166905465937461619323840363893131364327137688841026811219891275223056256756254701725086349765367288605966752740868627407912856576996313789753034660616669804218267724560530660773899624218340859882071864682623215080288286359746839654358856685503773131296587975810501214916207656769950659715344763470320853215603674828608378656803073062657633469774295634643716709397193060876963495328846833613038829431040800296873869117066666146800015121143442256023874474325250769387077775193299942137277211258843608715834835626961661980572526612206797540621062080649882918454395301529982092503005498257043390553570168653120526495614857249257386206917403695213533732531666345466588597286659451136441370331393672118569553952108458407244323835586063106806964924851232632699514603596037297253198368423363904632136710116192821711150282801604488058802382031981493096369596735832742024988245684941273860566491352526706046234450549227581151709314921879592718001940968866986837037302200475314338181092708030017205935530520700706072233999463990571311587099635777359027196285061146514837526209565346713290025994397663114545902685898979115837093419370441155121920117164880566945938131183843765620627846310490346293950029458341164824114969758326011800731699437393506966295712410273239138741754923071862454543222039552735295240245903805744502892246886285336542213815722131163288112052146489805180092024719391710555390113943316681515828843687606961102505171007392762385553386272553538830960671644662370922646809671254061869502143176211668140097595281493907222601112681153108387317617323235263605838173151034595736538223534992935822836851007810884634349983518404451704270189381994243410090575376257767571118090088164183319201962623416288166521374717325477727783488774366518828752156685719506371936565390389449366421764003121527870222366463635755503565576948886549500270853923617105502131147413744106134445544192101336172996285694899193369184729478580729156088510396781959429833186480756083679551496636448965592948187851784038773326247051945050419847742014183947731202815886845707290544057510601285258056594703046836344592652552137008068752009593453607316226118728173928074623094685367823106097921599360019946237993434210687813497346959246469752506246958616909178573976595199392993995567542714654910456860702099012606818704984178079173924071945996323060254707901774527513186809982284730860766536866855516467702911336827563107223346726113705490795365834538637196235856312618387156774118738527722922594743373785695538456246801013905727871016512966636764451872465653730402443684140814488732957847348490003019477888020460324660842875351848364959195082888323206522128104190448047247949291342284951970022601310430062410717971502793433263407995960531446053230488528972917659876016667811937932372453857209607582277178483361613582612896226118129455927462767137794487586753657544861407611931125958512655759734573015333642630767985443385761715333462325270572005303988289499034259566232975782488735029259166825894456894655992658454762694528780516501720674785417887982276806536650641910973434528878338621726156269582654478205672987756426325321594294418039943217000090542650763095588465895171709147607437136893319469090981904501290307099566226620303182649365733698419555776963787624918852865686607600566025605445711337286840205574416030837052312242587223438854123179481388550075689381124935386318635287083799845692619981794523364087429591180747453419551420351726184200845509170845682368200897739455842679214273477560879644279202708312150156406341341617166448069815483764491573900121217041547872591998943825364950514771379399147205219529079396137621107238494290616357604596231253506068537651423115349665683715116604220796394466621163255157729070978473156278277598788136491951257483328793771571459091064841642678309949723674420175862269402159407924480541255360431317992696739157542419296607312393763542139230617876753958711436104089409966089471418340698362993675362621545247298464213752891079884381306095552622720837518629837066787224430195793793786072107254277289071732854874374355781966511716618330881129120245204048682200072344035025448202834254187884653602591506445271657700044521097735585897622655484941621714989532383421600114062950718490427789258552743035221396835679018076406042138307308774460170842688272261177180842664333651780002171903449234264266292261456004337383868335555343453004264818473989215627086095650629340405264943244261445665921291225648893569655009154306426134252668472594914314239398845432486327461842846655985332312210466259890141712103446084271616619001257195870793217569698544013397622096749454185407118446433946990162698351607848924514058940946395267807354579700307051163682519487701189764002827648414160587206184185297189154019688253289309149665345753571427318482016384644832499037886069008072709327673127581966563941148961716832980455139729506687604740915420428429993541025829113502241690769431668574242522509026939034814856451303069925199590436384028429267412573422447765584177886171737265462085498294498946787350929581652632072258992368768457017823038096567883112289305809140572610865884845873101658151167533327674887014829167419701512559782572707406431808601428149024146780472327597684269633935773542930186739439716388611764209004068663398856841681003872389214483176070116684503887212364367043314091155733280182977988736590916659612402021778558854876176161989370794380056663364884365089144805571039765214696027662583599051987042300179465536788567430285974600143785483237068701190078499404930918919181649327259774030074879681484882342932023012128032327460392219687528340516906974194257614673978110715464186273369091584973185011183960482533518748438923177292613543024932562896371361977285456622924461644497284597867711574125670307871885109336344480149675240618536569532074170533486782754827815415561966911055101472799040386897220465550833170782394808785990501947563108984124144672821865459971596639015641941751820935932616316888380132758752601460507676098392625726411120135288591317848299475682472564885533357279772205543568126302535748216585414000805314820697137262149755576051890481622376790414926742600071045922695314835188137463887104273544767623577933993970632396604969145303273887874557905934937772320142954803345000695256980935282887783710670585567749481373858630385762823040694005665340584887527005308832459182183494318049834199639981458773435863115940570443683515285383609442955964360676090221741896883548131643997437764158365242234642619597390455450680695232850751868719449064767791886720306418630751053512149851051207313846648717547518382979990189317751550639981016466414592102406838294603208535554058147159273220677567669213664081505900806952540610628536408293276621931939933861623836069111767785448236129326858199965239275488427435414402884536455595124735546139403154952097397051896240157976832639450633230452192645049651735466775699295718989690470902730288544945416699791992948038254980285946029052763145580316514066229171223429375806143993484914362107993576737317948964252488813720435579287511385856973381976083524423240466778020948399639946684833774706725483618848273000648319163826022110555221246733323184463005504481849916996622087746140216157021029603318588727333298779352570182393861244026868339555870607758169954398469568540671174444932479519572159419645863736126915526457574786985964242176592896862383506370433939811671397544736228625506803682664135541448048997721373174119199970017293907303350869020922519124447393278376156321810842898207706974138707053266117683698647741787180202729412982310888796831880854367327806879771659111654224453806625861711729498038248879986504061563975629936962809358189761491017145343556659542757064194408833816841111166200759787244137082333917886114708228657531078536674695018462140736493917366254937783014074302668422150335117736471853872324040421037907750266020114814935482228916663640782450166815341213505278578539332606110249802273093636740213515386431693015267460536064351732154701091440650878823636764236831187390937464232609021646365627553976834019482932795750624399645272578624400375983422050808935129023122475970644105678361870877172333555465482598906861201410107222465904008553798235253885171623518256518482203125214950700378300411216212126052726059944320443056274522916128891766814160639131235975350390320077529587392412476451850809163911459296071156344204347133544720981178461451077872399140606290228276664309264900592249810291068759434533858330391178747575977065953570979640012224092199031158229259667913153991561438070129260780197022589662923368154312499412259460023399472228171056603931877226800493833148980338548909468685130789292064242819174795866199944411196208730498064385006852620258432842085582338566936649849720817046135376163584015342840674118587581546514598270228676671855309311923340191286170613364873183197560812569460089402953094429119590295968563923037689976327462283900735457144596414108229285922239332836210192822937243590283003884445701383771632056518351970100115722010956997890484964453434612129224964732356126321951155701565824427661599326463155806672053127596948538057364208384918887095176052287817339462747644656858900936266123311152910816041524100214195937349786431661556732702792109593543055579732660554677963552005378304619540636971842916168582734122217145885870814274090248185446421774876925093328785670674677381226752831653559245204578070541352576903253522738963847495646255940378924925007624386893776475310102323746733771474581625530698032499033676455430305274561512961214585944432150749051491453950981001388737926379964873728396416897555132275962011838248650746985492038097691932606437608743209385602815642849756549307909733854185583515789409814007691892389063090542534883896831762904120212949167195811935791203162514344096503132835216728021372415947344095498316138322505486708172221475138425166790445416617303200820330902895488808516797258495813407132180533988828139346049850532340472595097214331492586604248511405819579711564191458842833000525684776874305916390494306871343118796189637475503362820939949343690321031976898112055595369465424704173323895394046035325396758354395350516720261647961347790912327995264929045151148307923369382166010702872651938143844844532639517394110131152502750465749343063766541866128915264446926222884366299462732467958736383501937142786471398054038215513463223702071533134887083174146591492406359493020921122052610312390682941345696785958518393491382340884274312419099152870804332809132993078936867127413922890033069995875921815297612482409116951587789964090352577345938248232053055567238095022266790439614231852991989181065554412477204508510210071522352342792531266930108270633942321762570076323139159349709946933241013908779161651226804414809765618979735043151396066913258379033748620836695475083280318786707751177525663963479259219733577949555498655214193398170268639987388347010255262052312317215254062571636771270010760912281528326508984359568975961038372157726831170734552250194121701541318793651818502020877326906133592182000762327269503283827391243828198170871168108951187896746707073377869592565542713340052326706040004348843432902760360498027862160749469654989210474443927871934536701798673920803845633723311983855862638008516345597194441994344624761123844617615736242015935078520825600604101556889899501732554337298073561699861101908472096600708320280569917042590103876928658336557728758684250492690370934262028022399861803400211320742198642917383679176232826444645756330336556777374808644109969141827774253417010988435853189339175934511574023847292909015468559163792696196841000676598399744972047287881831200233383298030567865480871476464512824264478216644266616732096012564794514827125671326697067367144617795643752391742928503987022583734069852309190464967260243411270345611114149835783901793499713790913696706497637127248466613279908254305449295528594932793818341607827091326680865655921102733746700132583428715240835661522165574998431236278287106649401564670141943713823863454729606978693335973109537126499416282656463708490580151538205338326511289504938566468752921135932220265681856418260827538790002407915892646028490894922299966167437731347776134150965262448332709343898412056926145108857812249139616912534202918139898683901335795857624435194008943955180554746554000051766240202825944828833811886381749594284892013520090951007864941868256009273977667585642598378587497776669563350170748579027248701370264203283965756348010818356182372177082236423186591595883669487322411726504487268392328453010991677518376831599821263237123854357312681202445175401852132663740538802901249728180895021553100673598184430429105288459323064725590442355960551978839325930339572934663055160430923785677229293537208416693134575284011873746854691620648991164726909428982971065606801805807843600461866223562874591385185904416250663222249561448724413813849763797102676020845531824111963927941069619465426480006761727618115630063644321116224837379105623611358836334550102286170517890440570419577859833348463317921904494652923021469259756566389965893747728751393377105569802455757436190501772466214587592374418657530064998056688376964229825501195065837843125232135309371235243969149662310110328243570065781487677299160941153954063362752423712935549926713485031578238899567545287915578420483105749330060197958207739558522807307048950936235550769837881926357141779338750216344391014187576711938914416277109602859415809719913429313295145924373636456473035037374538503489286113141638094752301745088784885645741275003353303416138096560043105860548355773946625033230034341587814634602169235079216111013148948281895391028916816328709309713184139815427678818067628650978085718262117003140003377301581536334149093237034703637513354537634521050370995452942055232078817449370937677056009306353645510913481627378204985657055608784211964039972344556458607689515569686899384896439195225232309703301037277227710870564912966121061494072782442033414057441446459968236966118878411656290355117839944070961772567164919790168195234523807446299877664824873753313018142763910519234685081979001796519907050490865237442841652776611425351538665162781316090964802801234493372427866930894827913465443931965254154829494577875758599482099181824522449312077768250830768282335001597040419199560509705364696473142448453825888112602753909548852639708652339052941829691802357120545328231809270356491743371932080628731303589640570873779967845174740515317401384878082881006046388936711640477755985481263907504747295012609419990373721246201677030517790352952793168766305099837441859803498821239340919805055103821539827677291373138006715339240126954586376422065097810852907639079727841301764553247527073788764069366420012194745702358295481365781809867944020220280822637957006755393575808086318932075864444206644691649334467698180811716568665213389686173592450920801465312529777966137198695916451869432324246404401672381978020728394418264502183131483366019384891972317817154372192103946638473715630226701801343515930442853848941825678870721238520597263859224934763623122188113706307506918260109689069251417142514218153491532129077723748506635489170892850760234351768218355008829647410655814882049239533702270536705630750317499788187009989251020178015601042277836283644323729779929935160925884515772055232896978333126427671291093993103773425910592303277652667641874842441076564447767097790392324958416348527735171981064673837142742974468992320406932506062834468937543016787815320616009057693404906146176607094380110915443261929000745209895959201159412324102274845482605404361871836330268992858623582145643879695210235266673372434423091577183277565800211928270391042391966426911155333594569685782817020325495552528875464466074620294766116004435551604735044292127916358748473501590215522120388281168021413865865168464569964810015633741255098479730138656275460161279246359783661480163871602794405482710196290774543628092612567507181773641749763254436773503632580004042919906963117397787875081560227368824967077635559869284901628768699628053790181848148810833946900016380791075960745504688912686792812391148880036720729730801354431325347713094186717178607522981373539126772812593958220524289991371690685650421575056729991274177149279608831502358697816190894908487717722503860872618384947939757440664912760518878124233683125467278331513186758915668300679210215947336858591201395360301678110413444411030903388761520488296909104689167671555373346622545575975202624771242796225983278405833585897671474205724047439720232895903726148688388003174146490203843590358527993123871042845981608996101945691646983837718267264685264869172948414153004604004299585035164101899027529366867431834955447458124140190754681607770977920579383895378192128847409929537040546962226547278807248685508046571043123854873351653070570784584243335550958221912862797205455466267099131902370311779690892786623112661337671178512943059323281605826535623848164192144732543731002062738466812351691016359252588256806438946389880872735284406462208149513862275239938938734905082625472417781702582044129853760499827899020083498387362992498125742354568439023012261733665820546785671147973065077035475620567428300187473019197310881157516777005071432012726354601912460800451608108641835539669946936947322271670748972850464195392966434725254724357659192969949061670189061433616907056148280980363243454128229968275980226694045642181328624517549652147221620839824594576613342710564957193564431561774500828376935700995419541839029151033187933907614207467028867968594985439789457300768939890070073924697461812855764662265412913204052279071212820653775058280040897163467163709024906774736309136904002615646432159560910851092445162454420141442641660181385990017417408244245378610158433361777292580611159192008414091888191208858207627011483671760749046980914443057262211104583300789331698191603917150622792986282709446275915009683226345073725451366858172483498470080840163868209726371345205439802277866337293290829914010645589761697455978409211409167684020269370229231743334499986901841510888993165125090001163719114994852024821586396216294981753094623047604832399379391002142532996476235163569009445086058091202459904612118623318278614464727795523218635916551883057930657703331498510068357135624341881884405780028844018129031378653794869614630467726914552953690154167025838032477842272417994513653582260971652588356712133519546838335349801503269359798167463231847628306340588324731228951257944267639877946713121042763380872695738609314631539148548792514028885025189788076023838995615684850391995855029256054176767663145354058496296796781349420116003325874431438746248313850214980401681940795687219268462617287403480967931949965604299190281810597603263251746405016454606266765529010639868703668263299050577706266397868453584384057673298268163448646707439990917504018892319267557518354054956017732907127219134577524905771512773358423314008356080926962298894163047287780054743798498545562870729968407382937218623831766524716090967192007237658894226186550487552614557855898773008703234726418384831040394818743616224455286163287628541175946460497027724490799275146445792982549802258601001772437840167723166802004162547244179415547810554178036773553354467030326469619447560812831933095679685582771932031205941616693902049665352189672822671972640029493307384717544753761937017882976382487233361813499414541694736549254840633793674361541081593464960431603544354737728802361047743115330785159902977771499610274627769759612488879448609863349422852847651310277926279743981957617505591300993377368240510902583759345170015340522266144077237050890044496613295859536020556034009492820943862994618834790932894161098856594954213114335608810239423706087108026465913203560121875933791639666437282836752328391688865373751335794859860107569374889645657187292540448508624449947816273842517229343960137212406286783636675845331904743954740664015260871940915743955282773904303868772728262065663129387459875317749973799293043294371763801856280061141619563942414312254397099163565102848315765427037906837175764870230052388197498746636856292655058222887713221781440489538099681072143012394693530931524054081215705402274414521876541901428386744260011889041724570537470755550581632831687247110220353727166112304857340460879272501694701067831178927095527253222125224361673343366384756590949728221809418684074238351567868893421148203905824224324264643630201441787982022116248471657468291146315407563770222740135841109076078464780070182766336227978104546331131294044833570134869585165267459515187680033395522410548181767867772152798270250117195816577603549732923724732067853690257536233971216884390878879262188202305529937132397194333083536231248870386416194361506529551267334207198502259771408638122015980894363561808597010080081622557455039101321981979045520049618583777721048046635533806616517023595097133203631578945644487800945620369784973459902004606886572701865867757842758530645706617127194967371083950603267501532435909029491516973738110897934782297684100117657987098185725131372267749706609250481876835516003714638685918913011736805218743265426063700710595364425062760458252336880552521181566417553430681181548267844169315284408461087588214317641649835663127518728182948655658524206852221830755306118393326934164459415342651778653397980580828158806300749952897558204686612590853678738603318442905510689778698417735603118111677563872589911516803236547002987989628986181014596471307916144369564690909518788574398821730583884980809523077569358851616027719521488998358632323127308909861560777386006984035267826785387215920936255817889813416247486456433211043194821421299793188104636399541496539441501383868748384870224681829391860319598667962363489309283087840712400431022706137591368056518861313458307990705003607588327248867879324093380071864152853317943535073401891193638546730000660453783784472469288830546979000131248952100446949032058838294923613919284305249167833012980192255157050378521810552961623637523647962685751660066539364142273063001648652613891842243501797455993616794063303522111829071597538821839777552812981538570168702202620274678647916644030729018445497956399844836807851997088201407769199261674991148329821854382718946282165387064858588646221611410343570342878862979083418871606214430014533275029715104673156021000043869510583773779766003460887624861640938645252177935289947578496255243925598620521409052346250847830487046492688313289470553891357290706967599556298586669559721686506052072801342104355762779184021797626656484580261591407173477009039475168017709900129391137881248534255949312866653465033728846390649968460644741907524313323903404908195233044389559060547854954620263256676813262435925020249516275607080900436460421497025691488555265022810327762115842282433269528629137662675481993546118143913367579700141255870143319434764035725376914388899683088262844616425575034001428982557620386364384137906519612917777354183694676232982904981261717676191554292570438432239918482261744350470199171258214687683172646078959690569981353264435973965173473319484798758064137926885413552523275720457329477215706850016950046959758389373527538622664943456437071610511521617176237598050900553232154896062817794302268640579555845730600598376482703339859420098582351400179507104569019191359062304102336798080907240196312675268916362136351032648077232914950859151265812143823371072949148088472355286394195993455684156344577951727033374238129903260198160571971183950662758220321837136059718025940870615534713104482272716848395524105913605919812444978458110854511231668173534838253724825347636777581712867205865148285317273569069839935110763432091319780314031658897379628301178409806410175016511072932907832177487566289310650383806093372841399226733384778203302020700517188941706465146238366720632742644336612174011766914919235570905644803016342294301837655263108450172510307540942604409687066288066265900569082451407632599158164499361455172452057020443093722305550217222299706209749268609762787409626448772056043078634808885709143464793241536214303199965695610753570417207285334250171325558818113295504095217830139465216436594262960768570585698507157151317262928960072587601564840556088613165411835958628710665496282599535127193244635791046554389165150954187306071015034430609582302257455974944275067630926322529966338219395202927917973247094559691016402983683080426309910481567503623509654924302589575273521412445149542462972258510120707802110188106722347972579330653187713438466713807546383471635428854957610942841898601794658721444495198801550804042506452191484989920400007310672369944655246020908767882300064337725657385010969899058191290957079866699453765080407917852438222041070599278889267745752084287526377986730360561230710723922581504781379172731261234878334034473833573601973235946604273704635201327182592410906040097638585857716958419563109577748529579836844756803121874818202833941887076311731615289811756429711334181497218078040465077657204457082859417475114926179367379999220181789399433337731146911970737861041963986422166045588965683206701337505745038872111332436739840284188639147633491695114032583475841514170325690161784931455706904169858050217798497637014758914810543205854914100662201721719726878930012101267481270235940855162601689425111458499658315589660460091525797881670384625905383256920520425791378948827579603278877535466861441826827797651258953563761485994485049706638406266121957141911063246061774180577212381659872472432252969098533628440799030007594546281549235506086481557928961969617060715201589825299772803520002610888814176506636216905928021516429198484077446143617891415191517976537848282687018750030264867608433204658525470555882410254654806040437372771834769014720664234434374255514129178503032471263418076525187802925534774001104853996960549926508093910691337614841834884596365621526610332239417467064368340504749943339802285610313083038484571294767389856293937641914407036507544622061186499127249643799875806537850203753189972618014404667793050140301580709266213229273649718653952866567538572115133606114457222800851183757899219543063413692302293139751143702404830227357629039911794499248480915071002444078482866598579406525539141041497342780203520135419925977628178182825372022920108186449448349255421793982723279357095828748597126780783134286180750497175747373730296280477376908932558914598141724852658299510882230055223242218586191394795184220131553319634363922684259164168669438122537135960710031743651959027712571604588486044820674410935215327906816032054215967959066411120187618531256710150212239401285668608469435937408158536481912528004920724042172170913983123118054043277015835629513656274610248827706488865037765175678806872498861657094846665770674577000207144332525555736557083150320019082992096545498737419756608619533492312940263904930982014700371161829485939931199955070455381196711289367735249958182011774799788636393286405807810818657337668157893827656450642917396685579555053188715314552353070355994740186225988149854660737787698781542360397080977412361518245964026869979609564523828584235953564615185448165799966460648261396618720304839119560250381111550938420209894591555760083897989949964566262540514195610780090298667014635238532066032574466820259430618801773091109212741138269148784355679352572808875543164693077235363768226036080174040660997151176880434927489197133087822951123746632635635328517394189466510943745768270782209928468034684157443127739811044186762032954475468077511126663685479944460934809992951875666499902261686019672053749149951226823637895865245462813439289338365156536992413109638102559114643923805213907862893561660998836479175633176725856523591069520326895990054884753424160586689820067483163174286329119633399132709086065074595260357157323069712106423424081597068328707624437165532750228797802598690981111226558888151520837482450034463046505984569690276166958278982913613535306291331427881888249342136442417833519319786543940201465328083410341785272489879050919932369270996567133507711905899945951923990615156165480300145359212550696405345263823452155999210578191371030188979206408883974767667144727314254467923500524618849237455307575734902707342496298879996942094595961008702501329453325358045689285707241207965919809225550560061971283541270202072583994171175520920820151096509526685113897577150810849443508285458749912943857563115668324566827992991861539009255871716840495663991959154034218364537212023678608655364745175654879318925644085274489190918193411667583563439758886046349413111875241038425467937999203546910411935443113219136068129657568583611774564654674861061988591414805799318725367531243470335482637527081353105570818049642498584646147973467599315946514787025065271083508782350656532331797738656666181652390017664988485456054961300215776115255813396184027067814900350252876823607822107397102339146870159735868589015297010347780503292154014359595298683404657471756232196640515401477953167461726208727304820634652469109953327375561090578378455945469160223687689641425960164689647106348074109928546482353083540132332924864037318003195202317476206537726163717445360549726690601711176761047774971666890152163838974311714180622222345718567941507299526201086205084783127474791909996889937275229053674785020500038630036526218800670926674104806027341997756660029427941090400064654281074454007616429525362460261476180471744322889953285828397762184600967669267581270302806519535452053173536808954589902180783145775891280203970053633193821100095443241244197949192916205234421346395653840781209416214835001155883618421164283992454027590719621537570187067083731012246141362048926555668109467076386536083015847614512581588569610030337081197058344452874666198891534664244887911940711423940115986970795745946337170243268484864632018986352827092313047089215684758207753034387689978702323438584381125011714013265769320554911860153519551654627941175593967947958810333935413289702528893533748106257875620364294270257512121137330213811951395756419122685155962476203282038726342066227347868223036522019655729325905068134849292299647248229359787842720945578267329975853818536442370617353517653060396801087899490506654491544577952166038552398013798104340564182403396162494910454712104839439200945914647542424785991096900046541371091630096785951563947332190934511838669964622788855817353221326876634958059123761251203010983867841195725887799206041260049865895027247133146763722204388398558347770112599424691208308595666787531942465131444389971195968105937957532155524204659410081418351120174196853432672343271868099625045432475688702055341969199545300952644398446384346598830418262932239295612610045884644244285011551557765935780379565026806130721758672048541797157896401554276881090475899564605488362989140226580026134158039480357971019004151547655018391755772677897148793477372747525743898158705040701968215101218826088040084551332795162841280679678965570163917067779841529149397403158167896865448841319046368332179115059107813898261026271979696826411179918656038993895418928488851750122504754778999508544083983800725431468842988412616042682248823097788556495765424017114510393927980290997604904428832198976751320535115230545666467143795931915272680278210241540629795828828466355623580986725638200565215519951793551069127710538552661926903526081367717666435071213453983711357500975854405939558661737828297120544693182260401670308530911657973113259516101749193468250063285777004686987177255226525708428745733039859744230639751837209975339055095883623642814493247460522424051972825153787541962759327436278819283740253185668545040893929401040561666867664402868211607294830305236465560955351079987185041352121321534713770667681396211443891632403235741573773787908838267618458756361026435182951815392455211729022985278518025598478407179607904114472041476091765804302984501746867981277584971731733287305281134969591668387877072315968334322509070204019030503595891994666652037530271923764252552910347950343816357721698115464329245608951158732012675424975710520894362639501382962152214033621065422821876739580121286442788547491928976959315766891987305176388698461503354594898541849550251690616888419122873385522699976822609645007504500096116866129171093180282355042553653997166054753907348915189650027442328981181709248273610863801576007240601649547082331349361582435128299050405405333992577071321011503713898695076713447940748097845416328110406350804863393555238405735580863718763530261867971725608155328716436111474875107033512913923595452951407437943144900950809932872153235195999616750297532475931909938012968640379783553559071355708369947311923538531051736669154087312467233440702525006918026747725078958903448856673081487299464807786497709361969389290891718228134002845552513917355978456150353144603409441211512001738697261466786933733154341007587514908295822756919350542184106448264951943804240543255345965248373785310657979037977505031436474651422484768831323479762673689855474944277949916560108528257618964374464656819789319422077536824661110427671936481836360534108748971066866318805026555929568123959680449295166615409802610781691689418764353363449482900125929366840591370059526914934421861891742142561071896846626335874414976973921566392767687720145153302241853125308442727245771161505550519076276250016522166274796257424425420546785767478190959486500575711016264847833741198041625940813327229905891486422127968042984725356237202887830051788539737909455265135144073130049869453403245984236934627060242579432563660640597549471239092372458126154582526667304702319359866523378856244229188278436440434628094888288712101968642736370461639297485616780079779959696843367730352483047478240669928277140069031660709951473154191919911453182543906294573298686613524886500574780251977607442660798300291573030523199052185718628543687577860915726925232573171665625274275808460620177046433101212443409281314659760221360416223031167750085960128475289259463348312408766740128170543067985261868949895004918275008304998926472034986965363326210919830621495095877228260815566702155693484634079776879525038204442326697479264829899016938511552124688935873289878336267819361764023681714606495185508780596635354698788205094762016350757090024201498400967867845405354130050482404996646978558002628931826518708714613909521454987992300431779500489569529280112698632533646737179519363094399609176354568799002814515169743717518330632232942199132137614506411391269837128970829395360832883050256072727563548374205497856659895469089938558918441085605111510354367477810778500572718180809661542709143010161515013086522842238721618109043183163796046431523184434669799904865336375319295967726080853457652274714047941973192220960296582500937408249714373040087376988068797038047223488825819819025644086847749767508999164153502160223967816357097637814023962825054332801828798160046910336602415904504637333597488119998663995617171089911809851197616486499233594328274275983382931099806461605360243604040848379619072542165869409486682092396143083817303621520642297839982533698027039931804024928814430649614747600087654305571672697259114631990688823893005380061568007730984416061355843701277573463708822073792921409548717956947854414951731561828176343929570234710460088230637509877521391223419548471196982303169544468045517922669260631327498272520906329003279972932906827204647650366969765227673645419031639887433042226322021325368176044169612053532174352764937901877252263626883107879345194133825996368795020985033021472307603375442346871647223795507794130304865403488955400210765171630884759704098331306109510294140865574071074640401937347718815339902047036749084359309086354777210564861918603858715882024476138160390378532660185842568914109194464566162667753712365992832481865739251429498555141512136758288423285957759412684479036912662015308418041737698963759002546999454131659341985624780714434977201991702665380714107259910648709897259362243300706760476097690456341576573395549588448948093604077155688747288451838106069038026528318275560395905381507241627615047252487759578650784894547389096573312763852962664517004459626327934637721151028545472312880039058405918498833810711366073657536918428084655898982349219315205257478363855266205400703561310260405145079325925798227406012199249391735122145336707913500607486561657301854049217477162051678486507913573336334257685988361252720250944019430674728667983441293018131344299088234006652915385763779110955708000600143579956351811596764725075668367726052352939773016348235753572874236648294604770429166438403558846422370760111774821079625901180265548868995181239470625954254584491340203400196442965370643088660925268811549596291166168612036195319253262662271108142149856132646467211954801142455133946382385908540917878668826947602781853283155445565265933912487885639504644196022475186011405239187543742526581685003052301877096152411653980646785444273124462179491306502631062903402737260479940181929954454297256377507172705659271779285537195547433852182309492703218343678206382655341157162788603990157495208065443409462446634653253581574814022471260618973060860559065082163068709634119751925774318683671722139063093061019303182326666420628155129647685313861018672921889347039342072245556791239578260248978371473556820782675452142687314252252601795889759116238720807580527221031327444754083319215135934526961397220564699247718289310588394769170851420631557192703636345039529604362885088555160008371973526383838996789184600327073682083234847108471706160879195227388252347506380811606090840124222431476103563328940609282430125462013806032608121942876847907192546246309055749298781661271916548229644317263587524548607563020667656942355342774617635549231817456159185668061686428714964129290560130053913469569829490891003991259088290348791943368696942620662946948514931472688923571615032405542263391673583102728579723061998175868700492227418629077079508809336215346303842967525604369606110193842723883107587771653594778681499030978765900869583480043137176832954871752604714113064847270887246697164585218774442100900090916189819413456305028950484575822161887397443918833085509908566008543102796375247476265353031558684515120283396640547496946343986288291957510384781539068343717740714095628337554413567955424664601335663617305811711646062717854078898495334329100315985673932305693426085376230981047171826940937686754301837015557540822371538037838383342702379535934403549452173960327095407712107332936507766465603712364707109272580867897181182493799540477008369348889220963814281561595610931815183701135104790176383595168144627670903450457460997444500166918675661035889313483800512736411157304599205955471122443903196476642761038164285918037488354360663299436899730090925177601162043761411616688128178292382311221745850238080733727204908880095181889576314103157447684338100457385008523652069340710078955916549813037292944462306371284357984809871964143085146878525033128989319500645722582281175483887671061073178169281242483613796475692482076321356427357261609825142445262515952514875273805633150964052552659776922077806644338105562443538136258941809788015677378951310313157361136026047890761945591820289365770116416881703644242694283057457471567494391573593353763114830246668754727566653059819746822346578699972291792416156043557665183382167059157867799311835820189855730344883681934418305987021880502259192818047775223884407167894780414701414651073580452021499197980812095692195622632313741870979731320870864552236740416185590793816745658234353037283309503729022429802768451559528656923189798000383061378732434546500582722712325031420712488100290697226311129067629080951145758060270806092801504406139446350643069742785469477459876821004441453438033759717384777232052065301037861326418823586036569054773343070911759152582503029410738914441818378779490613137536794654893375260322906277631983337976816641721083140551864133302224787118511817036598365960493964571491686005656771360533192423185262166760222073368844844409234470948568027905894191829969467724456269443308241243846160408284006424867072583661011433404214473683453638496544701067827313169538435919120440283949541956874453676459875488726170687163109591315801609722382049772577307454562979127906177531663252857205858766376754282917933549923678212008601904369428956102301731743150352204665675088491593025926618816581008701658499456495586855628208747248318351516339189292646558880593601275151838235485893426165223086697314511412035659916934103076974774451947043836739600076578628245472064617380804602903639144493859012422380173377038154675297645596518492676039300171943042511794045679862114630138402371099347243455794730048929825402680821621522346560274258486595687074510352794291633405915025075992398611224340312056999780516223878772230396359709132856830486160362127579561601328561866388146004722200580017580282279272167842720649966956840905752590774886105493806116954293569077377792821084159737469613143291808510446953973485067590503662391722108732333169909603363771705474725026941732982890400239372879549386540463828596742216318201530139629734398479588628632934746650690284066719018081265539973675916799759010867483920062877888531102781695087545740384607594616919584610655963327283485609570305572502494416337066573150237126843581984154103154401008430380631442183776750349813408169325201240813452285974626715177152223063741359255747513535160669108359443999692315898156732033027129284241219651936303734407981204656795322986357374589031654007016472204989445629050395873788912680565516464274460174738175296313458739390484560414203426465560422112239134631023161290836446988901247285192778589195228773637440432659264672239982186452797664826673070168802722052338600372842903155828454593854349099449420750911108532138744823216151007808922516285123275724355101999038195993350032641446053470357293073912578481757987468353429629749652545426864234949270336399427519354240001973125098882419600095766257217621860474573769577649582201796258392376391717855799468922496750179251915218219624653575570564228220399546682648329822996167217080156801080799777126517156274295763666959661983507435667132218383358509536665806605597148376773866922551603463644386269977295750658468929599809168949981898588529537874489519527097766262684177088590284321676352132630838812766335363319004134332844347630067982023716933653652880580156390360562722752187272454764258840995216482554453662083811789117725225682611478014242896970967121967502094421226279437073328703410646312100557376727450271638975234111426287828736758358819056742163061523416789476056879277154789714326222041069587947186435439940738639948986836168919377836648327137363654676901173760246643082285362494712605173293777247276797635865806019396287718060679122426813922872134061694882029506831654589707623668302556167559477498715183426989208952182644710514911419441192277010977616645850068963849426165593473112961064282379048216056210094265076173838082479030510998790719611852832556787472942907151041468948104916751035295897242381802288151276582257190705537652455285511598636421244284176256230139538669970308943645907600684938040875210854159851278070333207779865635907968462191534944587677170063778573171211036517486371634098385626541555573292664616402279791195975248525300376741774056125700303625811704838385391207273191845064713669122576415213769896260940351804147432053600369234179035440735703058314741623452840188940808983125191307741823338981880316339159565954543405777784331681162551898060409183018907512170192983622897099598983405484962284289398469847938668614293324543983592637036699355184231661615244505980576745765335552338715678211466689996845227042954589710922163652573965950289645637766038988037941517917867910675199009966139206238732318786758420544279396366759104126821843375015743069045967947046685602358283919759975285865384338189120042853787549302768972168199113340697282255535300044743958830079799736518459131437946494086272149669719100359399974735262764126125995350902609540048669398955899487421379590802893196914845826873123710180229775301190684280440780938156598081694611679374425663244656799606363751546304833112722231812338371779800439731087402647536582575657351059978314264831879619843765495877803685261751835391844920488198629786329743136948511780579298636452193232481339393090754566368038513630619718033957979522539508697432546502659123585049283028832934489284591373621624852528877442891851104093746333590660233239711922814450735588373324057814862662207486215513375036775585494138678352928273109003823116855374520901095101174796663003330352534143230024288248051396631446632656081582045216883922312025671065388459503224002320453633895521539919011035217362720909565500846486605368975498478995875596103167696587161281951919668893326641203784750417081752273735270989343717167642329956935697166213782736138899530515711822960896394055380431939398453970864418654291655853168697537052760701061488025700785387150835779480952313152747735711713643356413242974208137266896149109564214803567792270566625834289773407718710649866150447478726164249976671481383053947984958938064202886667951943482750168192023591633247099185942520392818083953020434979919361853380201407072481627304313418985942503858404365993281651941497377286729589582881907490040331593436076189609669494800067194371424058105327517721952474344983414191979918179909864631583246021516575531754156198940698289315745851842783390581029411600498699307751428513021286202539508732388779357409781288187000829944831476678183644656510024467827445695591845768068704978044824105799710771577579093525803824227377612436908709875189149049904225568041463131309240101049368241449253427992201346380538342369643767428862595140146178201810734100565466708236854312816339049676558789901487477972479202502227218169405159042170892104287552188658308608452708423928652597536146290037780167001654671681605343292907573031466562485809639550080023347676187068086526878722783177420214068980703410506200235273632267291964034093571225623659496432076928058165514428643204955256838543079254299909353199329432966018220787933122323225928276556048763399988478426451731890365879756498207607478270258861409976050788036706732268192473513646356758611212953074644777149423343867876705824452296605797007134458987594126654609414211447540007211790607458330686866231309155780005966522736183536340439991445294960728379007338249976020630448806064574892740547730693971337007962746135534442514745423654662752252624869916077111131569725392943756732215758704952417232428206555322808868670153681482911738542735797154157943689491063759749151524510096986573825654899585216747260540468342338610760823605782941948009334370046866568258579827323875158302566720152604684361412652956519894291184887986819088277339147282063794512260294515707367105637720023427811802621502691790400488001808901847311751199425460594416773315777951735444490965752131026306836047140331442314298077895617051256930051804287472368435536402764392777908638966566390166776625678575354239947427919442544664643315554138265543388487778859972063679660692327601733858843763144148113561693030468420017434061395220072403658812798249143261731617813894970955038369479594617979829257740992171922783223006387384996138434398468502234780438733784470928703890536420557474836284616809363650973790900204118525835525201575239280826462555785658190226958376345342663420946214426672453987171047721482128157607275305173330963455909323664528978019175132987747952929099598069790148515839540444283988381797511245355548426126784217797728268989735007954505834273726937288386902125284843370917479603207479554080911491866208687184899550445210616155437083299502854903659617362726552868081324793106686855857401668022408227992433394360936223390321499357262507480617409173636062365464458476384647869520547719533384203403990244761056010612777546471464177412625548519830144627405538601855708359981544891286863480720710061787059669365218674805943569985859699554089329219507269337550235821561424994538234781138316591662683103065194730233419384164076823699357668723462219641322516076261161976034708844046473083172682611277723613381938490606534404043904909864126903479263503943531836741051762565704797064478004684323069430241749029731181951132935746854550484711078742905499870600373983113761544808189067620753424526993443755719446665453524088287267537759197074526286322840219629557247932987132852479994638938924943286917770190128914220188747760484939855471168524810559991574441551507431214406120333762869533792439547155394213121021954430556748370425907553004950664994802614794524739012802842646689229455664958621308118913500279654910344806150170407268010067948926855360944990373928383520627992820181576427054962997401900837493444950600754365525758905546552402103412862124809003162941975876195941956592556732874237856112669741771367104424821916671499611728903944393665340294226514575682907490402153401026923964977275904729573320027982816062130523130658731513076913832317193626664465502290735017347656293033318520949298475227462534564256702254695786484819977513326393221579478212493307051107367474918016345667888810782101151826314878755138027101379868751299375133303843885631415175908928986956197561123025310875057188962535763225834275763348421016668109884514141469311719314272028007223449941999003964948245457520704922091620614222912795322688239046498239081592961111003756999529251250673688233852648213896986384052437049402152187547825163347082430303521036927849762517317825860862215614519165573478940019558704784741658847364803865995119651409542615026615147651220820245816010801218275982577477652393859159165067449846149161165153821266726927461290533753163055654440793427876550267301214578324885948736899073512166118397877342715872870912311383472485146035661382188014840560716074652441118841800734067898587159273982452147328317214621907330492060817440914125388918087968538960627860118193099489240811702350413554126823863744341209267781729790694714759018264824761112414556423937732224538665992861551475342773370683344173073150805440138894084087253197595538897613986400165639906934600670780501058567196636796167140097031535132386972899001749862948883362389858632127176571330142071330179992326381982094042993377790345261665892577931395405145369730429462079488033141099249907113241694504241391265397274078984953073730364134893688060340009640631540701820289244667315059736321311926231179142794944897281477264038321021720718017561601025111179022163703476297572233435788863537030535008357679180120653016668316780269873860755423748298548246360981608957670421903145684942967286646362305101773132268579232832164818921732941553151386988781837232271364011755881332524294135348699384658137175857614330952147617551708342432434174779579226338663454959438736807839569911987059388085500837507984051126658973018149321061950769007587519836861526164087252594820126991923916722273718430385263107266000047367872474915828601694439920041571102706081507270147619679971490141639274282889578424398001497985658130305740620028554097382687819891158955487586486645709231721825870342960508203415938806006561845735081804032347750084214100574577342802985404049555529215986404933246481040773076611691605586804857302606467764258503301836174306413323887707999698641372275526317649662882467901094531117120243890323410259937511584651917675138077575448307953064925086002835629697045016137935696266759775923436166369375035368699454550392874449940328328128905560530091416446608691247256021455381248285307613556149618444364923014290938289373215312818797541139219415606631622784836152140668972661027123715779503062132916001988806369127647416567067485490795342762338253943990022498972883660263920518704790601584084302914787302246651371144395418253441269003331181914268070735159284180415100555199146564934872796969351992963117195821262627236458009708099166752820365818699111948365866102758375863322993225541477479210421324166848264953111826527351008031659958888814809945737293785681411438021523876706455063233067233939551964260397443829874822322662036352861302543796600943104500158604854027036789711934695579989189112302233381602302236277726084846296189550730850698061500281436425336666311433321645213882557346329366870956708432252564333895997812402164189946978348320376011613913855499933990786652305860332060641949298931012423081105800169745975038516887112037747631577311831360002742502722451570906304496369230938382329175076469684003556425503797106891999812319602533733677437970687713814747552190142928586781724044248049323750330957002929126630316970587409214456472022710796484778657310660832173093768033821742156446602190335203981531618935787083561603302255162155107179460621892674335641960083663483835896703409115513087820138723494714321400450513941428998350576038799343355677628023346565854351219361896876831439866735726040869511136649881229957801618882834124004126142251475184552502502640896823664946401177803776799157180146386554733265278569418005501363433953502870836220605121839418516239153709790768084909674194289061134979961034672077354959593868862427986411437928435620575955500144308051267664432183688321434583708549082240014585748228606859593502657405750939203135881722442164955416889785558265198046245527898343289578416968890756237467281044803018524217706136533236073856228166664597654076844715963930782091017090763377917711485205493367936868430832404126789220929930411890501756484917499452393770674524578019171841679541825554377930299249277892416277257788147974770446005423669346157135208417428211847353652367573702352791459837645712257646122605628127852169580892808988394594406165340521932514843306105322700231133680378433377389724881307874325614952744243584753011150345103737688223837573804282007358586938044331529253129961025096113761670187568525921208929131354473196308440066835155160913925692912175784379179004808848023029304392630921342768601226558630456913133560978156776098711809238440656353136182676923761613389237802972720736243967239854144480757286813436768000573823963610796223140429490728058551444771338682314499547929338131259971996894072233847404542592316639781608209399269744676323921370773991899853301483814622364299493902073285072098040905300059160091641710175605409814301906444379905831277826625762288108104414704097708248077905168225857235732665234414956169007985520848841886027352780861218049418060017941147110410688703738674378147161236141950474056521041002268987858525470689031657094677131822113205505046579701869337769278257145248837213394613987859786320048011792814546859096532616616068403160077901584946840224344163938313618742275417712170336151163782359059685168880561304838542087505126933144171705880517278127917564053282929427357971823360842784676292324980318169828654166132873909074116734612367109059236155113860447246378721244612580406931724769152219217409096880209008801535633471775664392125733993165330324425899852598966724744126503608416484160724482125980550754851232313331300621490042708542735985913041306918279258584509440150719217604794274047740253314305451367710311947544521321732225875550489799267468541529538871443696399406391099267018219539890685186755868574434469213792094590683677929528246795437302263472495359466300235998990248299853826140395410812427393530207575128774273992824866921285637240069184859771126480352376025469714309316636539718514623865421671429236191647402172547787238964043145364190541101514371773797752463632741619269990461595895793940622986041489302535678633503526382069821487003578061101552210224486633247184367035502326672749787730470216165019711937442505629639916559369593557640005236360445141148916155147776301876302136068825296274460238077523189646894043033182148655637014692476427395401909403584437251915352134557610698046469739424511797999048754951422010043090235713636892619493763602673645872492900162675597083797995647487354531686531900176427222751039446099641439322672532108666047912598938351926694497553568096931962642014042788365702610390456105151611792018698900673027082384103280213487456720062839744828713298223957579105420819286308176631987048287388639069922461848323992902685392499812367091421613488781501234093387999776097433615750910992585468475923085725368613605356762146929424264323906626708602846163376051573599050869800314239735368928435294958099434465414316189806451480849292695749412903363373410480943579407321266012450796613789442208485840536446021616517885568969302685188950832476793300404851688934411125834396590422211152736276278672366665845757559585409486248261694480201791748223085835007862255216359325125768382924978090431102048708975715033330963651576804501966025215527080352103848176167004443740572131294252820989545456276344353575741673638980108310579931697917916718271145837435222026387771805250290791645414791173616253155840768495583288190293564201219633684854080865928095131505012602919562576032932512847250469881908146475324342363863860247943921015193235101390117789997483527186469346024554247028375300033725403910085997650987642832802908445662021678362267272292737780213652404028817217012490974899454430826861772239385250883760749742195942655217301733355851389407457348144161511380845358039740277795072051893487170722955427683655826706766313911972211811528466502223383490906676554168336907959409404576472940901354356409277969379842065738891481990225399022315913388145851487225126560927576795873759207013915029216513720851137197522734365458411622066281660256333632074449918511469174455062297146086578736313585389023662557285424516018080487167823688885575325066254262367702604215835160174851981885460860036597606743233346410471991027562358645341748631726556391320606407754779439671383653877377610828300019937359760370467245737880967939894493795829602910746901609451288456550071458091887879542641820145369659962842686882363495879277007025298960996798975941955735253914237782443302746708282008722602053415292735847582937522487377937899136764642153727843553986244015856488692101644781661602962113570056638347990334049623875941092886778920270077504951511405782565295015024484968204744379710872943108541684540513016310902267112951959140520827546866418137305837933236150599142045255880213558474751516267815309465541240524091663857551298894834797423322854504140527354235070335984964593699534959698554244978249586929179182415068053002553370412778703476446244329205906832901886692400222391918714603175399666877477960121790688623311002908668305431787009355066944389131913333586368037447530664502418437136030852288582121720231274167009740351431532131803978033680228154223490183737494117973254478594157962104378787072154814091725163615415163381388912588517924237727229603497305533840942889918919161186249580560073570527227874940321250645426206304469470804277945973817146810395192821550688079136701210109944220737024613687196031491162370967939354636396448139025711768057799751751298979667073292674886430097398814873780767363792886767781170520534367705731566895899181530825761606591843760505051704242093231358724816618683821026679970982966436224723644898648976857100173643547336955619347638598187756855912376232580849341570570863450733443976604780386678461711520325115528237161469200634713570383377229877321365028868868859434051205798386937002783312365427450532283462669786446920780944052138528653384627970748017872477988461146015077617116261800781557915472305214759943058006652042710117125674185860274188801377931279938153727692612114066810156521441903567333926116697140453812010040811760123270513163743154487571768761575554916236601762880220601068655524141619314312671535587154866747899398685510873576261006923021359580838145290642217792987748784161516349497309700794368305080955621264592795333690631936594413261117944256602433064619312002953123619348034504503004315096798588111896950537335671086336886944665564112662287921812114121425167348136472449021275252555647623248505638391391630760976364990288930588053406631352470996993362568102360392264043588787550723319888417590521211390376609272658409023873553418516426444865247805763826160023858280693148922231457758783791564902227590699346481624734399733206013058796068136378152964615963260698744961105368384203105364183675373594176373955988088591188920114871545460924735613515979992999722298041707112256996310945945097765566409972722824015293663094891067963296735505830412258608050740410916678539569261234499102819759563955711753011823480304181029089719655278245770283085321733741593938595853203645590564229716679900322284081259569032886928291260139267587858284765599075828016611120063145411315144108875767081854894287737618991537664505164279985451077400771946398046265077776614053524831090497899859510873112620613018757108643735744708366215377470972660188656210681516328000908086198554303597948479869789466434027029290899143432223920333487108261968698934611177160561910681226015874410833093070377506876977485840324132474643763087889666151972556180371472590029550718424245405129246729039791532535999005557334600111693557020225722442772950263840538309433999383388018839553821540371447394465152512354603526742382254148328248990134023054550811390236768038649723899924257800315803725555410178461863478690646045865826036072306952576113184134225274786464852363324759102670562466350802553058142201552282050989197818420425028259521880098846231828512448393059455162005455907776121981297954040150653985341579053629101777939776957892084510979265382905626736402636703151957650493344879513766262192237185642999150828898080904189181015450813145034385734032579549707819385285699926238835221520814478940626889936085239827537174490903769904145555260249190126341431327373827075950390882531223536876389814182564965563294518709637484074360669912550026080424160562533591856230955376566866124027875883101021495284600804805028045254063691285010599912421270508133194975917146762267305044225075915290251742774636494555052325186322411388406191257012917881384181566918237215400893603475101448554254698937834239606460813666829750019379115061709452680984785152862123171377897417492087541064556959508967969794980679770961683057941674310519254486327358885118436597143583348756027405400165571178309126113117314169066606067613797690123141099672013123730329707678988740099317309687380126740538923612230370779727025191340850390101739924877352408881040807749924412635346413181858792480760553268122881584307471326768283097203149049868884456187976015468233715478415429742230166504759393312132256510189175368566338139736836336126010908419590215582111816677413843969205870515074254852744810154541079359513596653630049188769523677579147319184225806802539818418929888943038224766186405856591859943091324575886587044653095332668532261321209825839180538360814144791320319699276037194760191286674308615217243049852806380129834255379486287824758850820609389214668693729881191560115633701248675404205911464930888219050248857645752083363921499441937170268576222251074166230901665867067714568862793343153513505688216165112807318529333124070912343832502302341169501745502360505475824093175657701604884577017762183184615567978427541088499501610912720817913532406784267161792013428902861583277304794830971705537485109380418091491750245433432217445924133037928381694330975012918544596923388733288616144238100112755828623259628572648121538348900698511503485369544461542161283241700533583180520082915722904696365553178152398468725451306350506984981006205514844020769539324155096762680887603572463913955278222246439122592651921288446961107463586148252820017348957533954255019475442643148903233373926763409115527189768429887783617346613535388507656327107814312435018965109238453660236940276060642119384227665755210663671879603217527184404651560427289869560206997012906367847161654793068868305846508082886614111979138822898112498261434559408961813509226857611474609406147937240008842153535862052780125014270055274468359151840373309373580494342483940467505708347927948338133276237937844629209323999417593374917899786484958148818865149169302451512835579818112344900827168644548306546633975256079615935830821400021951611342337058359111545217293721664061708131602078213341260356852013161345136871600980378712556766143923146458085652084039744217352744813741215277475202259244561520365608268890193913957991844109971588312780020898275935898106482117936157951837937026741451400902833064466209280549839169261068975151083963132117128513257434964510681479694782619701483204392206140109523453209269311762298139422044308117317394338867965739135764377642819353621467837436136161591167926578700137748127848510041447845416464568496606699139509524527949914769441031612575776863713634644477006787131066832417871556281779122339077841275184193161188155887229676749605752053192594847679397486414128879475647133049543555044790277128690095643357913405127375570391806822344718167939329121448449553897728696601037841520390662890781218240141299368590465146519209198605347788576842696538459445700169758422531241268031418456268722581132040056433413524302102739213788415250475704533878002467378571470021087314693254557923134757243640544448132093266582986850659125571745568328831440322798049274104403921761438405750750288608423536966715191668510428001748971774811216784160854454400190449242294333666338347684438072624307319019363571067447363413698467328522605570126450123348367412135721830146848071241856625742852208909104583727386227300781566668914250733456373259567253354316171586533339843321723688126003809020585719930855573100508771533737446465211874481748868710652311198691114058503492239156755462142467550498676710264926176510110766876596258810039163948397811986615585196216487695936398904500383258041054420595482859955239065758108017936807080830518996468540836412752905182813744878769639548306385089756146421874889271294890398025623046812175145502330254086076115859321603465240763923593699949180470780496764486889980902123735780457040380820770357387588525976042434608851075199334470112741787878845674656640471901619633546770714090590826954225196409446319547658653032104723804625249971910690110456227579220926904132753699634145768795242244563973018311291451151322757841320376225862458224784696669785947914981610522628786944136373683125108310682898766123782697506343047263278453719024447970975017396831214493357290791648779915089163278018852504558488782722376705263811803792477835540018117452957747339714012352011459901984753358434861297092928529424139865507522507808919352104173963493428604871342370429572757862549365917805401652536330410692033704691093097588782938291296447890613200063096560747882082122140978472301680600835812336957051454650181292694364578357815608503303392466039553797630836137289498678842851139853615593352782103740733076818433040893624460576706096188294529171362940967592507631348636606011346115980434147450705511490716640635688739020690279453438236930531133440901381392849163507484449076828386687476663619303412376248380175840467851210698290605196112357188811150723607303158506622574566366740720668999061320627793994112805759798332878792144188725498543014546662945079670707688135022230580562225942983096887732856788971494623888272184647618153045844390967248232348259587963698908456664795754200195991919240707615823002328977439748112690476546256873684352229063217889227643289360535947903046811114130586348244566489159211382258867880972564351646404364328416076247766114349880319792230537889671148058968061594279189647401954989466232962162567264739015818692956765601444248501821713300527995551312539849919933907083138030214072556753022600033565715934283182650908979350869698950542635843046765145668997627989606295925119763672907762567862769469947280606094290314917493590511523235698715397127866718077578671910380368991445381484562682604003456798248689847811138328054940490519768008320299631757043011485087384048591850157264392187414592464617404735275250506783992273121600117160338604710710015235631159734711153198198710616109850375758965576728904060387168114313084172893710817412764581206119054145955378853200366615264923610030157044627231777788649806700723598889528747481372190175074700005571108178930354895017924552067329003818814068686247959272205591627902292600592107710510448103392878991286820705448979977319695574374529708195463942431669050083984398993036790655541596099324867822475424361758944371791403787168166189093900243862038610001362193667280872414291108080291896093127526202667881902085595708111853836166128848729527875143202956393295910508349687029060692838441522579419764824996318479414814660898281725690484184326061946254276693688953540732363428302189694947766126078346328490315128061501009539164530614554234923393806214007779256337619373052025699319099789404390847443596972052065999017828537676265683558625452697455260991024576619614037537859594506363227095122489241931813728141668427013096050734578659047904243852086508154491350136491698639048125666610843702294730266721499164849610746803261583352580352858275799038584091667618877199539888680431991650866887781701439663176815592262016991396613153738021294160006906947533431677802632207226265881842757216055461439677336258462997385077307751473833315101468395296411397329672457933540390136107395245686243008096720460995545708974893048753897955544443791303790422346037768729236001386569593952300768091377768847789746299699489949016141866131552200856673695770822720338936659590666350594330040363762591189195691561626122704788696510356062748423100605472091437069471661080277379848576543481249822444235828329813543645124092220896643987201997945619030397327254617823136363375927622656301565813545578319730419339269008282952718252138855126583037630477490625995514925943105307478901043009876580816508144862607975129633326675259272351611791836777128931053144471668835182920514343609292493191180249366051791485330421043899773019267686085347768149502299280938065840007311767895491286098112311307002535600347898600653805084532572431553654422067661352337408211307834360326940015926958459588297845649462271300855594293344520727007718206398887404742186697709349647758173683580193168322111365547392288184271373843690526638607662451284299368435082612881367358536293873792369928837047900484722240370919885912556341130849457067599032002751632513926694249485692320904596897775676762684224768120033279577059394613185252356456291805905295974791266162882381429824622654141067246487216174351317397697122228010100668178786776119825961537643641828573481088089988571570279722274734750248439022607880448075724807701621064670166965100202654371260046641935546165838945950143502160890185703558173661823437491622669077311800121188299737319891006060966841193266075165452741829459541189277264192546108246351931647783837078295218389645376236304858042774417907169146356546201215125418664885396161542055152375000426794253417764590821513675258479774465114750438460596325820468809667795709044645884673847481638045635188183210386594798204376334738389017759714236223057776395541011294523488098341476645559342209402059733452337956309441446698222457026367119493286653989491344225517746402732596722993581333110831711807234044326813737231209669052411856734897392234152750707954137453460386506786693396236535556479102508529284294227710593056660625152290924148057080971159783458351173168204129645967070633303569271821496292272073250126955216172649821895790908865085382490848904421755530946832055636316431893917626269931034289485184392539670922412565933079102365485294162132200251193795272480340133135247014182195618419055761030190199521647459734401211601239235679307823190770288415814605647291481745105388060109787505925537152356112290181284710137917215124667428500061818271276125025241876177485994084521492727902567005925854431027704636911098800554312457229683836980470864041706010966962231877065395275783874454229129966623016408054769705821417128636329650130416501278156397799631957412627634011130135082721772287129164002237230234809031485343677016544959380750634285293053131127965945266651960426350406454862543383772209428482543536823186182982713182489884498260285705690699045790998144649193654563259496570044689011049923939218088155626191834404362264965506449848521612498442375928443642612004256628602157801140467879662339228190804577624109076487087406157070486658398144845855803277997327929143195789110373530019873110486895656281917362036703039179710646309906285483702836118486672219457621775034511770110458001291255925462680537427727378863726783016568351092332280649908459179620305691566806180826586923920561895421631986004793961133953226395999749526798801074576466538377400437463695133685671362553184054638475191646737948743270916620098057717103475575333102702706317395612448413745782734376330101853438497450236265733191742446567787499665000938706441886733491099877926005340862442833450486907338279348425305698737469497333364267191968992849534561045719338665222471536681145666596959735075972188416698767321649331898967182978657974612216573922404856900225324160367805329990925438960169901664189038843548375648056012628830409421321300206164540821986138099462721214327234457806819925823202851398237118926541234460723597174777907172041523181575194793527456442984630888846385381068621715274531612303165705848974316209831401326306699896632888532682145204083110738032052784669279984003137878996525635126885368435559620598057278951754498694219326972133205286374577983487319388899574634252048213337552584571056619586932031563299451502519194559691231437579991138301656117185508816658756751184338145761060365142858427872190232598107834593970738225147111878311540875777560020664124562293239116606733386480367086953749244898068000217666674827426925968686433731916548717750106343608307376281613984107392410037196754833838054369880310983922140260514297591221159148505938770679068701351029862207502287721123345624421024715163941251258954337788492834236361124473822814504596821452253550035968325337489186278678359443979041598043992124889848660795045011701169092519383155609441705397900600291315024253848282782826223304151370929502192196508374714697845805550615914539506437316401173317807741497557116733034632008408954066541694665746735785483133770133628948904397670025863002540635264006601631712883920305576358989492412827022489373848906764385339931878608019223108328847459816417701264089078551777830131616162049792779670521847212730327970738223860581986744668610994383049960437407323195784473254857416239738852016202384784256163512597161783106850156299135559874758848151014815490937380933394074455700842090155903853444962128368313687375166780513082594599771257467939781491953642874321122421579851584491669362551569370916855252644720786527971466476760328471332985501945689772758983450586004316822658631176606237201721007922216410188299330808409384014213759697185976897042759041500946595252763487628135867117352364964121058854934496645898651826545634382851159137631569519895230262881794959971545221250667461174394884433312659432286710965281109501693028351496524082850120190831078678067061851145740970787563117610746428835593915985421673115153096948758378955979586132649569817205284291038172721213138681565524428109871168862743968021885581515367531218374119972919471325465199144188500672036481975944167950887487934416759598361960010994838744709079104099785974656112459851972157558134628546189728615020774374529539536929655449012953097288963767713353842429715394179547179095580120134210175150931491664699052366350233024087218654727629639065723341455005903913890253699317155917179823065162679744711857951506573868504088229934804445549850597823297898617029498418376255258757455303112991914341109413088238114443068843062655305601658801408561023324210300218460588586954418502977463085858496130037238190325162225570729975710727306066072916922978033647048840958711228045188511908718588299514331534128549297173849768523136276076868494780364948299904475715771141080958058141208956059471668626290036145602625334863284986816039463372436667112964460292915746181117789169695839947080954788863503281129626899231110099889317815313946681882028368363373822281414974006917942192888817139116283910295684918233358930813360131488748366464224381776081007739183393749346933644748150564933649323157235306109385796839902153381449126925350768211098738352197507736653475499431740580563099143218212547336281359488317681489194306530426029773885492974570569448783077945878865062970895499843760181694031056909587141386804846359853684034105948341788438963179956468815791937174656705047441528027712541569401365862097760735632832966564135817028088013546326104892768731829917950379944446328158595181380144716817284996793061814177131912099236282922612543236071226270324572637946863533391758737446552006008819975294017572421299723542069630427857950608911113416534893431149175314953530067419744979017235181671568754163484949491289001739377451431928382431183263265079530371177806185851153508809998200482761808307209649636476943066172549186143700971387567940218696710148540307471561091358933165600167252126542502898612259306484105898847129649230941215144563947889999327145875969555737090855150648002321476443037232466147111552578583071024936898814562568786834745518893385181791667579054210421036349316257870476543126790661216644142285017446278477132740595579600648343288827864837043456066966456899746910373987712891593313271266247505582258634928427718355831641593667712218537642376222104779338956378722902509543014182257180331300148113377736941508488867501893156994849838936052666818012783912005801431596441910546663236810148207799356523056490420711364192200177189107935243234322761787712568251126481332974354926568682748715986654943041648468220593921673359485057849622807932422649812705271398407720995707236227009245067665680069149966555737866411877079767754867028786431817941521796178310655030287157272282250812017060713380339641841211253856248920130010782462165136989511064611133562443838185366273563783436921279354709230119655914915800561707258518503167289370411936374780625824298250726464801821523430268081486978164824349353456855843696378384153838051184406043696871666416514036129729992912630842812149152469877429332305214999981829046119471676727503742221367186614654042534463141660649871499001000660041544868437352208483059495953182872280520828676300361091734508632133033647289584176588755345227938480297724485711815574893561311524926772006362198369980664159549388683836411891430443767715498026544959061738265591178545999378510861446014967645550103653971251138583505085112442517772923814396233043724036032603181442991365750246012787514117944901305803452199992701148071712847770301254994886841867572975189214295652512486943983729047410363121899124217339550688778643130750024823361832738729697376598820053895902935486054979802320400472236873557411858132734337978931582039412878989728973298812553514507641535360519462112217000676321611195841029252568536561813138784086477147099724553013170761712163186600291464501378587854802096244703771373587720086738054108140042311418525803293267396324596914044834665722042880679280616029884043400536534009706581694636096660911110968789751801325224478246957913251892122653056085866541115373584912790254654369020869419871125588453729063224423222287139122012248769976837147645598526739225904997885514250047585260297929306159913444898341973583316070107516452301310796620382579278533125161760789984630103493496981494261055367836366022561213767081421091373531780682420175737470287189310207606953355721704357535177461573524838432101571399813798596607129664438314791296359275429627129436142685922138993054980645399144588692472767598544271527788443836760149912897358259961869729756588978741082189422337344547375227693199222635973520722998387368484349176841191020246627479579564349615012657433845758638834735832242535328142047826934473129971189346354502994681747128179298167439644524956655532311649920677163664580318205849626132234652606175413532444702007661807418914040158148560001030119994109595492321434406067634769713089513389171050503856336503545166431774489640061738861761193622676890576955693918707703942304940038440622614449572516631017080642923345170422426679607075404028551182398361531383751432493056398381877995594942545196756559181968690885283434886050828529642437578712929439366177362830136595872723080969468398938676366226456791132977469812675226595621009318322081754694778878755356188335083870248295346078597023609865656376722755704495258739871812593441903785275571333409842450127258596692434317689018966145404453679047136294238156127656824247864736176671770647002431119711090007474065945650315375044177982192306323700872039212085499569681061379189029961178936752146022386905665481382858280449537530160921422195940638787074787991194920898374091788534417523064715030278397979864517336625329511775105559014160459873338186887977858817291976604516353353556047648420520888811722831990044504284486852338334530105533929637308039738230604714104525470094899407601215247602819963846343554852932377161410869591950786873276075400085220065031871239272857835807010762542769655355964789450166013816295177908531139811092831583216931563867459747449584385282701658246192092219529134323496779345585613140207765996142546463288677356891785576835169608392864188830094883324700447958316931533832382377876344426323456301679513671047510469669001217777128065522453689371871451567394733440447280450959433090683667110655953338602938000999949010642769859623260401863733572846679531229683156358145420890540651226419162015504500430562136991850941034609601030543816694795964585804425194905110733387679946734471718615647723811737035654917628707589456035519195603962301157866323750234725054461073979402475184415558178087962822231972692984516683306919505079993357259165675557294585962182052650473353712351623662770479333289322136141858785972771685682725303734836891911847197133753088446777943274857148827821608844765700041403499921376794209627560883081509438030705666022764678117533361028187800710219794428777313146387857817205661409023041499923248268982477222109852189758140879763486146763606368674611966620347304608917277240045953051376938375381543486981101990651706961774052218247422657652138152740612699012706880875386408669901461740890540981877671880076124151967064152117653084325544261017536348281196837493395825742541244634247233586360777980960199745187758845459645895956779558869098404768259253477849930457883128541747079059795909431627722327844578918694214929451540174214623240300841907975296782445969183509474202123617940309048634960534054931299919496087957952586977170236680033862505764938088740994009589948109397983231108838769236490221499111120870639202892490698435333152727991330986335454324971441378059132240814960156485679843966464780280409057580889190254236606774500413415794312112501275232250148067232979652230488493751166084976116412777395311302041566848265531411348993243747890268935173904043294851610659785832253168204202834993641595980197343889883020994152152288611175126686173051956249367180053845637855129171848417841594797435580617856680758491080185805695567990185198397660693358224779136504562705766735170961550493338390452612404395517449136885115987454340932040102218982707539212403241042424451570052968378815749468441508011138612561164102477190903050040240662278945607061512108266146098662040425010583978098192019726759010749924884966139441184159734610382401178556739080566483321039073867083298691078093495828888707110651559651222542929154212923108071159723275797510859911398076844732639426419452063138217862260999160086752446265457028969067192282283045169111363652774517975842147102219099906257373383472726498678244401048998507631630668050267115944636293525120269424810854530602810627264236538250773340575475701704367039596467715959261029438313074897245505729085688496091346323165819468660587092144653716755655531962091865952628448253731353698162517351930115341581171353292035873164168839107994000677266031617527582917398395852606454113318985505747847121053505795649095931672167565624818782002769963734155880000867852567422461511406015760115910256449002264980039498403358091309140197877843650167960167465370287466062584346329708303725980494653589318912163976013193079476972058034710553111117215859219066231028099212084069283091906017370764654655683413207556315315006453462321007133584907633048328153458698497332599801187479664273140279381289961720524540674695271948079930396730194274036466594154400092799908634806622334906695224044652158992864203435098858422692019340575496840904812955522654754650713532842543496616084954788090727649930252702815067862810825243222979985391759845188868387004477101866772159439708514664612871148749531862180941719676843144666435175837688436786081446319641912566574047718699160915550910878919431253671945651261878486910876729910565595155159739659034383628124629118117760949411880105946336671039049777312004243578115790429823045072038322781246413671297959415082918378213212876890545963586369344879749784841123274921331663162812456388238288715648447883142417650147980187858215768793063001153788998014623690135803753306246148576074932567807682651045738059018831237617271889933790487113395588485234240255002352200613574914318259142479829367775490496399350755839668967578364316618369307625603528602940662803255416535431518013714821941772672244005268401996533334184004345525296592918502940131600651124395297874364222806977720437363717873457948420238745151249157913139411148608416429347958793681868609689684640858334131017858142710955416293375915178392341303110543328703526599993904966822112768158316511246866451167351378214345336650598328347443536290312393672084593164394941881138607974670134709640378534907149089842317891739783650654751982883367395714360000003439863363212091718954899055748693397700245632475954504411422582410783866837655467400137324322809113692670682805397549111166171102397437749479335174036135005397581475520834285772800986189401984375446435081498218360112577632447389452051636938585136484259964518361856989088721789764694721246807900330925083496645841656554261294195108847197209106605105540933731954888406444080280579549008076040034154662137669606444293774985897353625591959618552448187940317374508256072895120945456562159540405425814886929842786582357673195799285293120866275922366115137445767916063621675267440451221051052090834707443986137829082352772895849625656881972792768694795806100573787084121444815034797422312103295359297822377134077549545477791813823542607184617108389097825964406170543546968567030745411634244134486308676327949177682923093183221341455482591367202823284396549001805653203960795517074496039006696990334199278212696767771835209083959545341866777944872740383733381985235884202840150981579594685874537989503257362809837592216229258598599123843993575573285028613155970362934249814178056461615863415338635077223269996508860870999964899373049307170967888740149746147542880387421250689212155876692242387434701120990859082164073576380817386959755176083877600277517253037133445654852635661720197563001580049790223419586738061442401502436288957503206533690825756785507020555105572381878574650371086308158185862815883054564662297694803970618265491385181326737485227188267917919091354407852685476254126683398240534022469989966652573155637645862251862823092085424412805997628505488913098331761884983352975136073772030571342739638126588567405013841074788943393996603591853934198416322617654857376671943132840050626295140357877264680649549355746326408186979718630218760025813995719923601345374229758918285167511358171472625828596940798518571870075823122317068134867930884899275181661399609753105295773584618525865211893339375771859916335112163441037910451845019023066893064178977808158101360449495409665363660370075881004450265734935127707426742578608784898185628869980851665713320835842613381142623855420315774246613108873106318111989880289722849790551075148403702290580483052731884959994156606537314021296702220821915862905952604040620011815269664910068587592655660567562963361434230232810747488395040380984981860056164646099819257616235478710913832967563761506732550860683433720438748186791668975746563456020002562889601191100980453350423842063824039434163502977688802779835087481178298349417211674919425601608685332435385951152061809031241698182079314615062073826097180458265687043623935757495737332781578904386011378078508110273049446611821957450170106059384336519458628360682108585130499820420578458577175933849015564447305834515291412561679970569657426139901681932056241927977282026714297258700193234337873153939403115411184101414292741703537542003698760608765500109345299007034032401334806388514095769557147190364152027721127070187421548123931953220997506553022646844227700020589045922742423904937051507367764629844971682121994198274794049092601715727439368569721862936007387077810797440975556627807371228030350048829843919546433753355787895064018998685060281902452191177018634505171087023903398550540704454189088472042376499749035038518949505897971286631644699407490959473411581934618336692169573605081585080837952036335619947691937965065016808710250735070825260046821242820434367245824478859256555487861614478717581068572356895150707602217433511627331709472765932413249132702425519391509083601346239612335001086614623850633127072987745618984384288764099836164964775714638573247333226653894523588365972955159905187411779288608760239306160016168434070611663449248395156319152882728822831375458678269830696691220130954815935450754923554167766876455212545681242936427474153815692219503331560151614492247512488957534835926226263545406704767033866410025277276800886383266629488582740369655329362236090572479794734434077704284318507901973469071141230364111729224929307731939309795452877412451183953480382210373644697046967493042810911797232448615413264031578430955396671061468083815548947146733652483679138566431084747848676243012018489329109615281108087617422779131629345494425395422727309645057976122885347393189600810965202090151104579377602529543130188938184010247010134929317443562883578609861545691161669857388024973756940558138630581099823372565164920155443216861690537054630176154809626620800633059320775897175589925862195462096455464624399535391743228225433267174308492508396461328929584567927365409119947616225155964704061297047759818551878441419948614013153859322060745185909608884280218943358691959604936409651570327527570641500776261323783648149005245481413195989296398441371781402764122087644989688629798910870164270169014007825748311598976330612951195680427485317886333041169767175063822135213839779138443325644288490872919067009802496281560626258636942322658490628628035057282983101266919109637258378149363774960594515216932644945188292639525772348420077356021656909077097264985642831778694777804964343991762549216500608626285329471055602670413384500507827390640287529864161287496473708235188892189612641279553536442286955430551308700009878557534223100547153412810957024870812654319123261956462149376527526356402127388765103883255007364899937167183280028398832319373301564123277185395654932422977953016534830128490677845037490891749347389015649588574802194996722621185874361039774946338633057887487405540005440439344888192044102134790034598411927024921557026873700970995205391930979319495883265922171508324621942300185974396706491149559411733728199869021311629886680267446443489233020607003821262841723679627307191405008084085703978151998148822390059948911946474438682533745889962375133378280532928272016815977970066488394482446332210928320504045983008943565954267256879714918703447338237767914829203283196838105907715727191903042365315650957464549643425328069510396558733549803850995143463506175361480050195045201350200180281506933241918267855737764414097080945745624854867704904368368717590918057269794010465019484853146726642978667687697789291431128505043098192949736165944259471754765135205245072597538577958372797702972231435199958499522344049394502115428867244188717409524554771867484911475031801773304689909317974472957035192387686405544278134169807249382219749124257510162187439772902147704638010731470653154201300583810458905006764557332998149945854655105526374914354195867992595981412218735238407957416123372264063860431988936249867649693592569592128495906254446474331759999685163660305216426770428154681777589339252115538590526823311608302751194384823861552852465010329467297198112105314125898165100120742688143577590825227466863206188376830450921784582526239594189673003640808624233657620979111641766331328852352062487922978959456450333733139422384778582717195412347860434376165241568717943562570215636666680088531006728947033079540804583324192188488870712275670333173939262509073556164513677064199539111948881240659821685787131385056850623094155206877987539740658484250135205615103489821873770245063583314243624807432542464195984647411575625441010389671576677263196442524931941806472423789334668561083789808830313571333157729435664956078125304917594015895146954965223118559669048559467607968190167266634650186182955669893965019614544401768162810604465068448139561667220729261210164692339016793399632833013163850830967942792934551268435760356901970523138364640961311774904600772840862214747547653221505518116489887879087780918009050706040061220010051271575991225725282523378026809030528461581739558198122397010092017202251606352922464781615533532275453264543087093320924631855976580561717446840450048285353396546862678852330044967795580761661801833668792312510460809773895565488962815089519622093675058841609752282328250433712970186608193748968699961301486924694482420723632912367052542145464162968910442981633373266871675946715392611950649224725627254543274193495995569590243279097174392258098103601486364409101491734183079646345064833303404765711827040276868271418084574998493392039317445402616663674646668754385093967129918067471909885312710726724428584870694307099756567949198418996425748884764622030325637751112534060087936904565779272035205921345924272965206683338510673615276261016026647772485083344719891986802656197236420847504962661607797092906844757798251795569758235084371746103310387911789239441630112634077535773520558040066982523191225570519133631407211349723226549151062961739050617857127509403623146700931176133132018631158730886798239298009805089491510788371194099750375473674305745187265414016446924576792185753680363289139664155342066705623272936001177781498886100830877849571709880858667023104043242526785955562077310543072298032125941107957349146684680220501816192150766649106862033378713826058987655210423668198670177861672671972374156917880001690656659046965316154923604061891820982414006103779407166342002735828911994182647812782659666207030384795881442790246669264032799404016800137293477301530941805070587421153284642203006550763966756168318897005152026656649929417382840327305940740147117478464839241225676523593418554066440983706083636457657081801664285044258224551650808864421212113914352453935225522162483791737330329812349528984098613273709957407786789349311975204237925022851375880436791854547836416773151821457226504640800104202100410766027807729152555503218182387221708112766208665317651926458452495269685376314437998340336947124447247796973890514941120010934140073794061859447165516612674930799374705772930521750426383798367668159183589049652163726492960837147204067428996276720315410211504333742057182854090136325721437592054640471894328548696883599785122262130812989581571391597464534806099601555877223193450760315411663112963843719400333736013305526352571490454327925190794007111504785378036370897340146753465517470747096935814912797188187854376797751675927822300312945518595042883902735494672667647506072643698761394806879080593531793001711000214417701504495496412454361656210150919997862972495905809191825255486358703529320142005857057855419217730505342687533799076038746689684283402648733290888881745453047194740939258407362058242849349024756883352446212456101562729065130618520732925434179252299417447855189995098959999877410951464170076989305620163502192692653166599093238118295411937545448509428621839424186218067457128099385258842631930670182098008050900019819621758458932516877698594110522845465835679362969619219080897536813210484518784516230623911878024604050824909336069998094776253792973597037759066145994638578378211017122446355845171941670344732162722443265914858595797823752976323442911242311368603724514438765801271594060878788638511089680883165505046309006148832545452819908256238805872042843941834687865142541377686054291079721004271658 diff --git a/vendor/github.com/klauspost/compress/testdata/gettysburg.txt b/vendor/github.com/klauspost/compress/testdata/gettysburg.txt new file mode 100644 index 0000000..2c9bcde --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/gettysburg.txt @@ -0,0 +1,29 @@ + Four score and seven years ago our fathers brought forth on +this continent, a new nation, conceived in Liberty, and dedicated +to the proposition that all men are created equal. + Now we are engaged in a great Civil War, testing whether that +nation, or any nation so conceived and so dedicated, can long +endure. + We are met on a great battle-field of that war. + We have come to dedicate a portion of that field, as a final +resting place for those who here gave their lives that that +nation might live. It is altogether fitting and proper that +we should do this. + But, in a larger sense, we can not dedicate - we can not +consecrate - we can not hallow - this ground. + The brave men, living and dead, who struggled here, have +consecrated it, far above our poor power to add or detract. +The world will little note, nor long remember what we say here, +but it can never forget what they did here. + It is for us the living, rather, to be dedicated here to the +unfinished work which they who fought here have thus far so +nobly advanced. It is rather for us to be here dedicated to +the great task remaining before us - that from these honored +dead we take increased devotion to that cause for which they +gave the last full measure of devotion - + that we here highly resolve that these dead shall not have +died in vain - that this nation, under God, shall have a new +birth of freedom - and that government of the people, by the +people, for the people, shall not perish from this earth. + +Abraham Lincoln, November 19, 1863, Gettysburg, Pennsylvania diff --git a/vendor/github.com/klauspost/compress/testdata/pi.txt b/vendor/github.com/klauspost/compress/testdata/pi.txt new file mode 100644 index 0000000..ca99bbc --- /dev/null +++ b/vendor/github.com/klauspost/compress/testdata/pi.txt @@ -0,0 +1 @@ +3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275900994657640789512694683983525957098258226205224894077267194782684826014769909026401363944374553050682034962524517493996514314298091906592509372216964615157098583874105978859597729754989301617539284681382686838689427741559918559252459539594310499725246808459872736446958486538367362226260991246080512438843904512441365497627807977156914359977001296160894416948685558484063534220722258284886481584560285060168427394522674676788952521385225499546667278239864565961163548862305774564980355936345681743241125150760694794510965960940252288797108931456691368672287489405601015033086179286809208747609178249385890097149096759852613655497818931297848216829989487226588048575640142704775551323796414515237462343645428584447952658678210511413547357395231134271661021359695362314429524849371871101457654035902799344037420073105785390621983874478084784896833214457138687519435064302184531910484810053706146806749192781911979399520614196634287544406437451237181921799983910159195618146751426912397489409071864942319615679452080951465502252316038819301420937621378559566389377870830390697920773467221825625996615014215030680384477345492026054146659252014974428507325186660021324340881907104863317346496514539057962685610055081066587969981635747363840525714591028970641401109712062804390397595156771577004203378699360072305587631763594218731251471205329281918261861258673215791984148488291644706095752706957220917567116722910981690915280173506712748583222871835209353965725121083579151369882091444210067510334671103141267111369908658516398315019701651511685171437657618351556508849099898599823873455283316355076479185358932261854896321329330898570642046752590709154814165498594616371802709819943099244889575712828905923233260972997120844335732654893823911932597463667305836041428138830320382490375898524374417029132765618093773444030707469211201913020330380197621101100449293215160842444859637669838952286847831235526582131449576857262433441893039686426243410773226978028073189154411010446823252716201052652272111660396665573092547110557853763466820653109896526918620564769312570586356620185581007293606598764861179104533488503461136576867532494416680396265797877185560845529654126654085306143444318586769751456614068007002378776591344017127494704205622305389945613140711270004078547332699390814546646458807972708266830634328587856983052358089330657574067954571637752542021149557615814002501262285941302164715509792592309907965473761255176567513575178296664547791745011299614890304639947132962107340437518957359614589019389713111790429782856475032031986915140287080859904801094121472213179476477726224142548545403321571853061422881375850430633217518297986622371721591607716692547487389866549494501146540628433663937900397692656721463853067360965712091807638327166416274888800786925602902284721040317211860820419000422966171196377921337575114959501566049631862947265473642523081770367515906735023507283540567040386743513622224771589150495309844489333096340878076932599397805419341447377441842631298608099888687413260472156951623965864573021631598193195167353812974167729478672422924654366800980676928238280689964004824354037014163149658979409243237896907069779422362508221688957383798623001593776471651228935786015881617557829735233446042815126272037343146531977774160319906655418763979293344195215413418994854447345673831624993419131814809277771038638773431772075456545322077709212019051660962804909263601975988281613323166636528619326686336062735676303544776280350450777235547105859548702790814356240145171806246436267945612753181340783303362542327839449753824372058353114771199260638133467768796959703098339130771098704085913374641442822772634659470474587847787201927715280731767907707157213444730605700733492436931138350493163128404251219256517980694113528013147013047816437885185290928545201165839341965621349143415956258658655705526904965209858033850722426482939728584783163057777560688876446248246857926039535277348030480290058760758251047470916439613626760449256274204208320856611906254543372131535958450687724602901618766795240616342522577195429162991930645537799140373404328752628889639958794757291746426357455254079091451357111369410911939325191076020825202618798531887705842972591677813149699009019211697173727847684726860849003377024242916513005005168323364350389517029893922334517220138128069650117844087451960121228599371623130171144484640903890644954440061986907548516026327505298349187407866808818338510228334508504860825039302133219715518430635455007668282949304137765527939751754613953984683393638304746119966538581538420568533862186725233402830871123282789212507712629463229563989898935821167456270102183564622013496715188190973038119800497340723961036854066431939509790190699639552453005450580685501956730229219139339185680344903982059551002263535361920419947455385938102343955449597783779023742161727111723643435439478221818528624085140066604433258885698670543154706965747458550332323342107301545940516553790686627333799585115625784322988273723198987571415957811196358330059408730681216028764962867446047746491599505497374256269010490377819868359381465741268049256487985561453723478673303904688383436346553794986419270563872931748723320837601123029911367938627089438799362016295154133714248928307220126901475466847653576164773794675200490757155527819653621323926406160136358155907422020203187277605277219005561484255518792530343513984425322341576233610642506390497500865627109535919465897514131034822769306247435363256916078154781811528436679570611086153315044521274739245449454236828860613408414863776700961207151249140430272538607648236341433462351897576645216413767969031495019108575984423919862916421939949072362346468441173940326591840443780513338945257423995082965912285085558215725031071257012668302402929525220118726767562204154205161841634847565169998116141010029960783869092916030288400269104140792886215078424516709087000699282120660418371806535567252532567532861291042487761825829765157959847035622262934860034158722980534989650226291748788202734209222245339856264766914905562842503912757710284027998066365825488926488025456610172967026640765590429099456815065265305371829412703369313785178609040708667114965583434347693385781711386455873678123014587687126603489139095620099393610310291616152881384379099042317473363948045759314931405297634757481193567091101377517210080315590248530906692037671922033229094334676851422144773793937517034436619910403375111735471918550464490263655128162288244625759163330391072253837421821408835086573917715096828874782656995995744906617583441375223970968340800535598491754173818839994469748676265516582765848358845314277568790029095170283529716344562129640435231176006651012412006597558512761785838292041974844236080071930457618932349229279650198751872127267507981255470958904556357921221033346697499235630254947802490114195212382815309114079073860251522742995818072471625916685451333123948049470791191532673430282441860414263639548000448002670496248201792896476697583183271314251702969234889627668440323260927524960357996469256504936818360900323809293459588970695365349406034021665443755890045632882250545255640564482465151875471196218443965825337543885690941130315095261793780029741207665147939425902989695946995565761218656196733786236256125216320862869222103274889218654364802296780705765615144632046927906821207388377814233562823608963208068222468012248261177185896381409183903673672220888321513755600372798394004152970028783076670944474560134556417254370906979396122571429894671543578468788614445812314593571984922528471605049221242470141214780573455105008019086996033027634787081081754501193071412233908663938339529425786905076431006383519834389341596131854347546495569781038293097164651438407007073604112373599843452251610507027056235266012764848308407611830130527932054274628654036036745328651057065874882256981579367897669742205750596834408697350201410206723585020072452256326513410559240190274216248439140359989535394590944070469120914093870012645600162374288021092764579310657922955249887275846101264836999892256959688159205600101655256375678566722796619885782794848855834397518744545512965634434803966420557982936804352202770984294232533022576341807039476994159791594530069752148293366555661567873640053666564165473217043903521329543529169414599041608753201868379370234888689479151071637852902345292440773659495630510074210871426134974595615138498713757047101787957310422969066670214498637464595280824369445789772330048764765241339075920434019634039114732023380715095222010682563427471646024335440051521266932493419673977041595683753555166730273900749729736354964533288869844061196496162773449518273695588220757355176651589855190986665393549481068873206859907540792342402300925900701731960362254756478940647548346647760411463233905651343306844953979070903023460461470961696886885014083470405460742958699138296682468185710318879065287036650832431974404771855678934823089431068287027228097362480939962706074726455399253994428081137369433887294063079261595995462624629707062594845569034711972996409089418059534393251236235508134949004364278527138315912568989295196427287573946914272534366941532361004537304881985517065941217352462589548730167600298865925786628561249665523533829428785425340483083307016537228563559152534784459818313411290019992059813522051173365856407826484942764411376393866924803118364453698589175442647399882284621844900877769776312795722672655562596282542765318300134070922334365779160128093179401718598599933849235495640057099558561134980252499066984233017350358044081168552653117099570899427328709258487894436460050410892266917835258707859512983441729535195378855345737426085902908176515578039059464087350612322611200937310804854852635722825768203416050484662775045003126200800799804925485346941469775164932709504934639382432227188515974054702148289711177792376122578873477188196825462981268685817050740272550263329044976277894423621674119186269439650671515779586756482399391760426017633870454990176143641204692182370764887834196896861181558158736062938603810171215855272668300823834046564758804051380801633638874216371406435495561868964112282140753302655100424104896783528588290243670904887118190909494533144218287661810310073547705498159680772009474696134360928614849417850171807793068108546900094458995279424398139213505586422196483491512639012803832001097738680662877923971801461343244572640097374257007359210031541508936793008169980536520276007277496745840028362405346037263416554259027601834840306811381855105979705664007509426087885735796037324514146786703688098806097164258497595138069309449401515422221943291302173912538355915031003330325111749156969174502714943315155885403922164097229101129035521815762823283182342548326111912800928252561902052630163911477247331485739107775874425387611746578671169414776421441111263583553871361011023267987756410246824032264834641766369806637857681349204530224081972785647198396308781543221166912246415911776732253264335686146186545222681268872684459684424161078540167681420808850280054143613146230821025941737562389942075713627516745731891894562835257044133543758575342698699472547031656613991999682628247270641336222178923903176085428943733935618891651250424404008952719837873864805847268954624388234375178852014395600571048119498842390606136957342315590796703461491434478863604103182350736502778590897578272731305048893989009923913503373250855982655867089242612429473670193907727130706869170926462548423240748550366080136046689511840093668609546325002145852930950000907151058236267293264537382104938724996699339424685516483261134146110680267446637334375340764294026682973865220935701626384648528514903629320199199688285171839536691345222444708045923966028171565515656661113598231122506289058549145097157553900243931535190902107119457300243880176615035270862602537881797519478061013715004489917210022201335013106016391541589578037117792775225978742891917915522417189585361680594741234193398420218745649256443462392531953135103311476394911995072858430658361935369329699289837914941939406085724863968836903265564364216644257607914710869984315733749648835292769328220762947282381537409961545598798259891093717126218283025848112389011968221429457667580718653806506487026133892822994972574530332838963818439447707794022843598834100358385423897354243956475556840952248445541392394100016207693636846776413017819659379971557468541946334893748439129742391433659360410035234377706588867781139498616478747140793263858738624732889645643598774667638479466504074111825658378878454858148962961273998413442726086061872455452360643153710112746809778704464094758280348769758948328241239292960582948619196670918958089833201210318430340128495116203534280144127617285830243559830032042024512072872535581195840149180969253395075778400067465526031446167050827682772223534191102634163157147406123850425845988419907611287258059113935689601431668283176323567325417073420817332230462987992804908514094790368878687894930546955703072619009502076433493359106024545086453628935456862958531315337183868265617862273637169757741830239860065914816164049449650117321313895747062088474802365371031150898427992754426853277974311395143574172219759799359685252285745263796289612691572357986620573408375766873884266405990993505000813375432454635967504844235284874701443545419576258473564216198134073468541117668831186544893776979566517279662326714810338643913751865946730024434500544995399742372328712494834706044063471606325830649829795510109541836235030309453097335834462839476304775645015008507578949548931393944899216125525597701436858943585877526379625597081677643800125436502371412783467926101995585224717220177723700417808419423948725406801556035998390548985723546745642390585850216719031395262944554391316631345308939062046784387785054239390524731362012947691874975191011472315289326772533918146607300089027768963114810902209724520759167297007850580717186381054967973100167870850694207092232908070383263453452038027860990556900134137182368370991949516489600755049341267876436746384902063964019766685592335654639138363185745698147196210841080961884605456039038455343729141446513474940784884423772175154334260306698831768331001133108690421939031080143784334151370924353013677631084913516156422698475074303297167469640666531527035325467112667522460551199581831963763707617991919203579582007595605302346267757943936307463056901080114942714100939136913810725813781357894005599500183542511841721360557275221035268037357265279224173736057511278872181908449006178013889710770822931002797665935838758909395688148560263224393726562472776037890814458837855019702843779362407825052704875816470324581290878395232453237896029841669225489649715606981192186584926770403956481278102179913217416305810554598801300484562997651121241536374515005635070127815926714241342103301566165356024733807843028655257222753049998837015348793008062601809623815161366903341111386538510919367393835229345888322550887064507539473952043968079067086806445096986548801682874343786126453815834280753061845485903798217994599681154419742536344399602902510015888272164745006820704193761584547123183460072629339550548239557137256840232268213012476794522644820910235647752723082081063518899152692889108455571126603965034397896278250016110153235160519655904211844949907789992007329476905868577878720982901352956613978884860509786085957017731298155314951681467176959760994210036183559138777817698458758104466283998806006162298486169353373865787735983361613384133853684211978938900185295691967804554482858483701170967212535338758621582310133103877668272115726949518179589754693992642197915523385766231676275475703546994148929041301863861194391962838870543677743224276809132365449485366768000001065262485473055861598999140170769838548318875014293890899506854530765116803337322265175662207526951791442252808165171667766727930354851542040238174608923283917032754257508676551178593950027933895920576682789677644531840404185540104351348389531201326378369283580827193783126549617459970567450718332065034556644034490453627560011250184335607361222765949278393706478426456763388188075656121689605041611390390639601620221536849410926053876887148379895599991120991646464411918568277004574243434021672276445589330127781586869525069499364610175685060167145354315814801054588605645501332037586454858403240298717093480910556211671546848477803944756979804263180991756422809873998766973237695737015808068229045992123661689025962730430679316531149401764737693873514093361833216142802149763399189835484875625298752423873077559555955465196394401821840998412489826236737714672260616336432964063357281070788758164043814850188411431885988276944901193212968271588841338694346828590066640806314077757725705630729400492940302420498416565479736705485580445865720227637840466823379852827105784319753541795011347273625774080213476826045022851579795797647467022840999561601569108903845824502679265942055503958792298185264800706837650418365620945554346135134152570065974881916341359556719649654032187271602648593049039787489589066127250794828276938953521753621850796297785146188432719223223810158744450528665238022532843891375273845892384422535472653098171578447834215822327020690287232330053862163479885094695472004795231120150432932266282727632177908840087861480221475376578105819702226309717495072127248479478169572961423658595782090830733233560348465318730293026659645013718375428897557971449924654038681799213893469244741985097334626793321072686870768062639919361965044099542167627840914669856925715074315740793805323925239477557441591845821562518192155233709607483329234921034514626437449805596103307994145347784574699992128599999399612281615219314888769388022281083001986016549416542616968586788372609587745676182507275992950893180521872924610867639958916145855058397274209809097817293239301067663868240401113040247007350857828724627134946368531815469690466968693925472519413992914652423857762550047485295476814795467007050347999588867695016124972282040303995463278830695976249361510102436555352230690612949388599015734661023712235478911292547696176005047974928060721268039226911027772261025441492215765045081206771735712027180242968106203776578837166909109418074487814049075517820385653909910477594141321543284406250301802757169650820964273484146957263978842560084531214065935809041271135920041975985136254796160632288736181367373244506079244117639975974619383584574915988097667447093006546342423460634237474666080431701260052055928493695941434081468529815053947178900451835755154125223590590687264878635752541911288877371766374860276606349603536794702692322971868327717393236192007774522126247518698334951510198642698878471719396649769070825217423365662725928440620430214113719922785269984698847702323823840055655517889087661360130477098438611687052310553149162517283732728676007248172987637569816335415074608838663640693470437206688651275688266149730788657015685016918647488541679154596507234287730699853713904300266530783987763850323818215535597323530686043010675760838908627049841888595138091030423595782495143988590113185835840667472370297149785084145853085781339156270760356390763947311455495832266945702494139831634332378975955680856836297253867913275055542524491943589128405045226953812179131914513500993846311774017971512283785460116035955402864405902496466930707769055481028850208085800878115773817191741776017330738554758006056014337743299012728677253043182519757916792969965041460706645712588834697979642931622965520168797300035646304579308840327480771811555330909887025505207680463034608658165394876951960044084820659673794731680864156456505300498816164905788311543454850526600698230931577765003780704661264706021457505793270962047825615247145918965223608396645624105195510522357239739512881816405978591427914816542632892004281609136937773722299983327082082969955737727375667615527113922588055201898876201141680054687365580633471603734291703907986396522961312801782679717289822936070288069087768660593252746378405397691848082041021944719713869256084162451123980620113184541244782050110798760717155683154078865439041210873032402010685341947230476666721749869868547076781205124736792479193150856444775379853799732234456122785843296846647513336573692387201464723679427870042503255589926884349592876124007558756946413705625140011797133166207153715436006876477318675587148783989081074295309410605969443158477539700943988394914432353668539209946879645066533985738887866147629443414010498889931600512076781035886116602029611936396821349607501116498327856353161451684576956871090029997698412632665023477167286573785790857466460772283415403114415294188047825438761770790430001566986776795760909966936075594965152736349811896413043311662774712338817406037317439705406703109676765748695358789670031925866259410510533584384656023391796749267844763708474978333655579007384191473198862713525954625181604342253729962863267496824058060296421146386436864224724887283434170441573482481833301640566959668866769563491416328426414974533349999480002669987588815935073578151958899005395120853510357261373640343675347141048360175464883004078464167452167371904831096767113443494819262681110739948250607394950735031690197318521195526356325843390998224986240670310768318446607291248747540316179699411397387765899868554170318847788675929026070043212666179192235209382278788809886335991160819235355570464634911320859189796132791319756490976000139962344455350143464268604644958624769094347048293294140411146540923988344435159133201077394411184074107684981066347241048239358274019449356651610884631256785297769734684303061462418035852933159734583038455410337010916767763742762102137013548544509263071901147318485749233181672072137279355679528443925481560913728128406333039373562420016045664557414588166052166608738748047243391212955877763906969037078828527753894052460758496231574369171131761347838827194168606625721036851321566478001476752310393578606896111259960281839309548709059073861351914591819510297327875571049729011487171897180046961697770017913919613791417162707018958469214343696762927459109940060084983568425201915593703701011049747339493877885989417433031785348707603221982970579751191440510994235883034546353492349826883624043327267415540301619505680654180939409982020609994140216890900708213307230896621197755306659188141191577836272927461561857103721724710095214236964830864102592887457999322374955191221951903424452307535133806856807354464995127203174487195403976107308060269906258076020292731455252078079914184290638844373499681458273372072663917670201183004648190002413083508846584152148991276106513741539435657211390328574918769094413702090517031487773461652879848235338297260136110984514841823808120540996125274580881099486972216128524897425555516076371675054896173016809613803811914361143992106380050832140987604599309324851025168294467260666138151745712559754953580239983146982203613380828499356705575524712902745397762140493182014658008021566536067765508783804304134310591804606800834591136640834887408005741272586704792258319127415739080914383138456424150940849133918096840251163991936853225557338966953749026620923261318855891580832455571948453875628786128859004106006073746501402627824027346962528217174941582331749239683530136178653673760642166778137739951006589528877427662636841830680190804609849809469763667335662282915132352788806157768278159588669180238940333076441912403412022316368577860357276941541778826435238131905028087018575047046312933353757285386605888904583111450773942935201994321971171642235005644042979892081594307167019857469273848653833436145794634175922573898588001698014757420542995801242958105456510831046297282937584161162532562516572498078492099897990620035936509934721582965174135798491047111660791587436986541222348341887722929446335178653856731962559852026072947674072616767145573649812105677716893484917660771705277187601199908144113058645577910525684304811440261938402322470939249802933550731845890355397133088446174107959162511714864874468611247605428673436709046678468670274091881014249711149657817724279347070216688295610877794405048437528443375108828264771978540006509704033021862556147332117771174413350281608840351781452541964320309576018694649088681545285621346988355444560249556668436602922195124830910605377201980218310103270417838665447181260397190688462370857518080035327047185659499476124248110999288679158969049563947624608424065930948621507690314987020673533848349550836366017848771060809804269247132410009464014373603265645184566792456669551001502298330798496079949882497061723674493612262229617908143114146609412341593593095854079139087208322733549572080757165171876599449856937956238755516175754380917805280294642004472153962807463602113294255916002570735628126387331060058910652457080244749375431841494014821199962764531068006631183823761639663180931444671298615527598201451410275600689297502463040173514891945763607893528555053173314164570504996443890936308438744847839616840518452732884032345202470568516465716477139323775517294795126132398229602394548579754586517458787713318138752959809412174227300352296508089177705068259248822322154938048371454781647213976820963320508305647920482085920475499857320388876391601995240918938945576768749730856955958010659526503036266159750662225084067428898265907510637563569968211510949669744580547288693631020367823250182323708459790111548472087618212477813266330412076216587312970811230758159821248639807212407868878114501655825136178903070860870198975889807456643955157415363193191981070575336633738038272152798849350397480015890519420879711308051233933221903466249917169150948541401871060354603794643379005890957721180804465743962806186717861017156740967662080295766577051291209907944304632892947306159510430902221439371849560634056189342513057268291465783293340524635028929175470872564842600349629611654138230077313327298305001602567240141851520418907011542885799208121984493156999059182011819733500126187728036812481995877070207532406361259313438595542547781961142935163561223496661522614735399674051584998603552953329245752388810136202347624669055816438967863097627365504724348643071218494373485300606387644566272186661701238127715621379746149861328744117714552444708997144522885662942440230184791205478498574521634696448973892062401943518310088283480249249085403077863875165911302873958787098100772718271874529013972836614842142871705531796543076504534324600536361472618180969976933486264077435199928686323835088756683595097265574815431940195576850437248001020413749831872259677387154958399718444907279141965845930083942637020875635398216962055324803212267498911402678528599673405242031091797899905718821949391320753431707980023736590985375520238911643467185582906853711897952626234492483392496342449714656846591248918556629589329909035239233333647435203707701010843880032907598342170185542283861617210417603011645918780539367447472059985023582891833692922337323999480437108419659473162654825748099482509991833006976569367159689364493348864744213500840700660883597235039532340179582557036016936990988671132109798897070517280755855191269930673099250704070245568507786790694766126298082251633136399521170984528092630375922426742575599892892783704744452189363203489415521044597261883800300677617931381399162058062701651024458869247649246891924612125310275731390840470007143561362316992371694848132554200914530410371354532966206392105479824392125172540132314902740585892063217589494345489068463993137570910346332714153162232805522972979538018801628590735729554162788676498274186164218789885741071649069191851162815285486794173638906653885764229158342500673612453849160674137340173572779956341043326883569507814931378007362354180070619180267328551191942676091221035987469241172837493126163395001239599240508454375698507957046222664619000103500490183034153545842833764378111988556318777792537201166718539541835984438305203762819440761594106820716970302285152250573126093046898423433152732131361216582808075212631547730604423774753505952287174402666389148817173086436111389069420279088143119448799417154042103412190847094080254023932942945493878640230512927119097513536000921971105412096683111516328705423028470073120658032626417116165957613272351566662536672718998534199895236884830999302757419916463841427077988708874229277053891227172486322028898425125287217826030500994510824783572905691988555467886079462805371227042466543192145281760741482403827835829719301017888345674167811398954750448339314689630763396657226727043393216745421824557062524797219978668542798977992339579057581890622525473582205236424850783407110144980478726691990186438822932305382318559732869780922253529591017341407334884761005564018242392192695062083183814546983923664613639891012102177095976704908305081854704194664371312299692358895384930136356576186106062228705599423371631021278457446463989738188566746260879482018647487672727222062676465338099801966883680994159075776852639865146253336312450536402610569605513183813174261184420189088853196356986962795036738424313011331753305329802016688817481342988681585577810343231753064784983210629718425184385534427620128234570716988530518326179641178579608888150329602290705614476220915094739035946646916235396809201394578175891088931992112260073928149169481615273842736264298098234063200244024495894456129167049508235812487391799648641133480324757775219708932772262349486015046652681439877051615317026696929704928316285504212898146706195331970269507214378230476875280287354126166391708245925170010714180854800636923259462019002278087409859771921805158532147392653251559035410209284665925299914353791825314545290598415817637058927906909896911164381187809435371521332261443625314490127454772695739393481546916311624928873574718824071503995009446731954316193855485207665738825139639163576723151005556037263394867208207808653734942440115799667507360711159351331959197120948964717553024531364770942094635696982226673775209945168450643623824211853534887989395673187806606107885440005508276570305587448541805778891719207881423351138662929667179643468760077047999537883387870348718021842437342112273940255717690819603092018240188427057046092622564178375265263358324240661253311529423457965569502506810018310900411245379015332966156970522379210325706937051090830789479999004999395322153622748476603613677697978567386584670936679588583788795625946464891376652199588286933801836011932368578558558195556042156250883650203322024513762158204618106705195330653060606501054887167245377942831338871631395596905832083416898476065607118347136218123246227258841990286142087284956879639325464285343075301105285713829643709990356948885285190402956047346131138263878897551788560424998748316382804046848618938189590542039889872650697620201995548412650005394428203930127481638158530396439925470201672759328574366661644110962566337305409219519675148328734808957477775278344221091073111351828046036347198185655572957144747682552857863349342858423118749440003229690697758315903858039353521358860079600342097547392296733310649395601812237812854584317605561733861126734780745850676063048229409653041118306671081893031108871728167519579675347188537229309616143204006381322465841111157758358581135018569047815368938137718472814751998350504781297718599084707621974605887423256995828892535041937958260616211842368768511418316068315867994601652057740529423053601780313357263267054790338401257305912339601880137825421927094767337191987287385248057421248921183470876629667207272325650565129333126059505777727542471241648312832982072361750574673870128209575544305968395555686861188397135522084452852640081252027665557677495969626612604565245684086139238265768583384698499778726706555191854468698469478495734622606294219624557085371272776523098955450193037732166649182578154677292005212667143463209637891852323215018976126034373684067194193037746880999296877582441047878123266253181845960453853543839114496775312864260925211537673258866722604042523491087026958099647595805794663973419064010036361904042033113579336542426303561457009011244800890020801478056603710154122328891465722393145076071670643556827437743965789067972687438473076346451677562103098604092717090951280863090297385044527182892749689212106670081648583395537735919136950153162018908887484210798706899114804669270650940762046502772528650728905328548561433160812693005693785417861096969202538865034577183176686885923681488475276498468821949739729707737187188400414323127636504814531122850990020742409255859252926103021067368154347015252348786351643976235860419194129697690405264832347009911154242601273438022089331096686367898694977994001260164227609260823493041180643829138347354679725399262338791582998486459271734059225620749105308531537182911681637219395188700957788181586850464507699343940987433514431626330317247747486897918209239480833143970840673084079589358108966564775859905563769525232653614424780230826811831037735887089240613031336477371011628214614661679404090518615260360092521947218890918107335871964142144478654899528582343947050079830388538860831035719306002771194558021911942899922722353458707566246926177663178855144350218287026685610665003531050216318206017609217984684936863161293727951873078972637353717150256378733579771808184878458866504335824377004147710414934927438457587107159731559439426412570270965125108115548247939403597681188117282472158250109496096625393395380922195591918188552678062149923172763163218339896938075616855911752998450132067129392404144593862398809381240452191484831646210147389182510109096773869066404158973610476436500068077105656718486281496371118832192445663945814491486165500495676982690308911185687986929470513524816091743243015383684707292898982846022237301452655679898627767968091469798378268764311598832109043715611299766521539635464420869197567370005738764978437686287681792497469438427465256316323005551304174227341646455127812784577772457520386543754282825671412885834544435132562054464241011037955464190581168623059644769587054072141985212106734332410756767575818456990693046047522770167005684543969234041711089888993416350585157887353430815520811772071880379104046983069578685473937656433631979786803671873079693924236321448450354776315670255390065423117920153464977929066241508328858395290542637687668968805033317227800185885069736232403894700471897619347344308437443759925034178807972235859134245813144049847701732361694719765715353197754997162785663119046912609182591249890367654176979903623755286526375733763526969344354400473067198868901968147428767790866979688522501636949856730217523132529265375896415171479559538784278499866456302878831962099830494519874396369070682762657485810439112232618794059941554063270131989895703761105323606298674803779153767511583043208498720920280929752649812569163425000522908872646925284666104665392171482080130502298052637836426959733707053922789153510568883938113249757071331029504430346715989448786847116438328050692507766274500122003526203709466023414648998390252588830148678162196775194583167718762757200505439794412459900771152051546199305098386982542846407255540927403132571632640792934183342147090412542533523248021932277075355546795871638358750181593387174236061551171013123525633485820365146141870049205704372018261733194715700867578539336078622739558185797587258744102542077105475361294047460100094095444959662881486915903899071865980563617137692227290764197755177720104276496949611056220592502420217704269622154958726453989227697660310524980855759471631075870133208861463266412591148633881220284440694169488261529577625325019870359870674380469821942056381255833436421949232275937221289056420943082352544084110864545369404969271494003319782861318186188811118408257865928757426384450059944229568586460481033015388911499486935436030221810943466764000022362550573631294626296096198760564259963946138692330837196265954739234624134597795748524647837980795693198650815977675350553918991151335252298736112779182748542008689539658359421963331502869561192012298889887006079992795411188269023078913107603617634779489432032102773359416908650071932804017163840644987871753756781185321328408216571107549528294974936214608215583205687232185574065161096274874375098092230211609982633033915469494644491004515280925089745074896760324090768983652940657920198315265410658136823791984090645712468948470209357761193139980246813405200394781949866202624008902150166163813538381515037735022966074627952910384068685569070157516624192987244482719429331004854824454580718897633003232525821581280327467962002814762431828622171054352898348208273451680186131719593324711074662228508710666117703465352839577625997744672185715816126411143271794347885990892808486694914139097716736900277758502686646540565950394867841110790116104008572744562938425494167594605487117235946429105850909950214958793112196135908315882620682332156153086833730838173279328196983875087083483880463884784418840031847126974543709373298362402875197920802321878744882872843727378017827008058782410749357514889978911739746129320351081432703251409030487462262942344327571260086642508333187688650756429271605525289544921537651751492196367181049435317858383453865255656640657251363575064353236508936790431702597878177190314867963840828810209461490079715137717099061954969640070867667102330048672631475510537231757114322317411411680622864206388906210192355223546711662137499693269321737043105987225039456574924616978260970253359475020913836673772894438696400028110344026084712899000746807764844088711341352503367877316797709372778682166117865344231732264637847697875144332095340001650692130546476890985050203015044880834261845208730530973189492916425322933612431514306578264070283898409841602950309241897120971601649265613413433422298827909921786042679812457285345801338260995877178113102167340256562744007296834066198480676615805021691833723680399027931606420436812079900316264449146190219458229690992122788553948783538305646864881655562294315673128274390826450611628942803501661336697824051770155219626522725455850738640585299830379180350432876703809252167907571204061237596327685674845079151147313440001832570344920909712435809447900462494313455028900680648704293534037436032625820535790118395649089354345101342969617545249573960621490288728932792520696535386396443225388327522499605986974759882329916263545973324445163755334377492928990581175786355555626937426910947117002165411718219750519831787137106051063795558588905568852887989084750915764639074693619881507814685262133252473837651192990156109189777922008705793396463827490680698769168197492365624226087154176100430608904377976678519661891404144925270480881971498801542057787006521594009289777601330756847966992955433656139847738060394368895887646054983871478968482805384701730871117761159663505039979343869339119789887109156541709133082607647406305711411098839388095481437828474528838368079418884342666222070438722887413947801017721392281911992365405516395893474263953824829609036900288359327745855060801317988407162446563997948275783650195514221551339281978226984278638391679715091262410548725700924070045488485692950448110738087996547481568913935380943474556972128919827177020766613602489581468119133614121258783895577357194986317210844398901423948496659251731388171602663261931065366535041473070804414939169363262373767777095850313255990095762731957308648042467701212327020533742667053142448208168130306397378736642483672539837487690980602182785786216512738563513290148903509883270617258932575363993979055729175160097615459044771692265806315111028038436017374742152476085152099016158582312571590733421736576267142390478279587281505095633092802668458937649649770232973641319060982740633531089792464242134583740901169391964250459128813403498810635400887596820054408364386516617880557608956896727531538081942077332597917278437625661184319891025007491829086475149794003160703845549465385946027452447466812314687943441610993338908992638411847425257044572517459325738989565185716575961481266020310797628254165590506042479114016957900338356574869252800743025623419498286467914476322774005529460903940177536335655471931000175430047504719144899841040015867946179241610016454716551337074073950260442769538553834397550548871099785205401175169747581344926079433689543783221172450687344231989878844128542064742809735625807066983106979935260693392135685881391214807354728463227784908087002467776303605551232386656295178853719673034634701222939581606792509153217489030840886516061119011498443412350124646928028805996134283511884715449771278473361766285062169778717743824362565711779450064477718370221999106695021656757644044997940765037999954845002710665987813603802314126836905783190460792765297277694043613023051787080546511542469395265127101052927070306673024447125973939950514628404767431363739978259184541176413327906460636584152927019030276017339474866960348694976541752429306040727005059039503148522921392575594845078867977925253931765156416197168443524369794447355964260633391055126826061595726217036698506473281266724521989060549880280782881429796336696744124805982192146339565745722102298677599746738126069367069134081559412016115960190237753525556300606247983261249881288192937343476862689219239777833910733106588256813777172328315329082525092733047850724977139448333892552081175608452966590553940965568541706001179857293813998258319293679100391844099286575605993598910002969864460974714718470101531283762631146774209145574041815908800064943237855839308530828305476076799524357391631221886057549673832243195650655460852881201902363644712703748634421727257879503428486312944916318475347531435041392096108796057730987201352484075057637199253650470908582513936863463863368042891767107602111159828875539940120076013947033661793715396306139863655492213741597905119083588290097656647300733879314678913181465109316761575821351424860442292445304113160652700974330088499034675405518640677342603583409608605533747362760935658853109760994238347382222087292464497684560579562516765574088410321731345627735856052358236389532038534024842273371639123973215995440828421666636023296545694703577184873442034227706653837387506169212768015766181095420097708363604361110592409117889540338021426523948929686439808926114635414571535194342850721353453018315875628275733898268898523557799295727645229391567477566676051087887648453493636068278050564622813598885879259940946446041705204470046315137975431737187756039815962647501410906658866162180038266989961965580587208639721176995219466789857011798332440601811575658074284182910615193917630059194314434605154047710570054339000182453117733718955857603607182860506356479979004139761808955363669603162193113250223851791672055180659263518036251214575926238369348222665895576994660491938112486609099798128571823494006615552196112207203092277646200999315244273589488710576623894693889446495093960330454340842102462401048723328750081749179875543879387381439894238011762700837196053094383940063756116458560943129517597713935396074322792489221267045808183313764165818269562105872892447740035947009268662659651422050630078592002488291860839743732353849083964326147000532423540647042089499210250404726781059083644007466380020870126664209457181702946752278540074508552377720890581683918446592829417018288233014971554235235911774818628592967605048203864343108779562892925405638946621948268711042828163893975711757786915430165058602965217459581988878680408110328432739867198621306205559855266036405046282152306154594474489908839081999738747452969810776201487134000122535522246695409315213115337915798026979555710508507473874750758068765376445782524432638046143042889235934852961058269382103498000405248407084403561167817170512813378805705643450616119330424440798260377951198548694559152051960093041271007277849301555038895360338261929343797081874320949914159593396368110627557295278004254863060054523839151068998913578820019411786535682149118528207852130125518518493711503422159542244511900207393539627400208110465530207932867254740543652717595893500716336076321614725815407642053020045340183572338292661915308354095120226329165054426123619197051613839357326693760156914429944943744856809775696303129588719161129294681884936338647392747601226964158848900965717086160598147204467428664208765334799858222090619802173211614230419477754990738738567941189824660913091691772274207233367635032678340586301930193242996397204445179288122854478211953530898910125342975524727635730226281382091807439748671453590778633530160821559911314144205091447293535022230817193663509346865858656314855575862447818620108711889760652969899269328178705576435143382060141077329261063431525337182243385263520217735440715281898137698755157574546939727150488469793619500477720970561793913828989845327426227288647108883270173723258818244658436249580592560338105215606206155713299156084892064340303395262263451454283678698288074251422567451806184149564686111635404971897682154227722479474033571527436819409892050113653400123846714296551867344153741615042563256713430247655125219218035780169240326699541746087592409207004669340396510178134857835694440760470232540755557764728450751826890418293966113310160131119077398632462778219023650660374041606724962490137433217246454097412995570529142438208076098364823465973886691349919784013108015581343979194852830436739012482082444814128095443773898320059864909159505322857914576884962578665885999179867520554558099004556461178755249370124553217170194282884617402736649978475508294228020232901221630102309772151569446427909802190826689868834263071609207914085197695235553488657743425277531197247430873043619511396119080030255878387644206085044730631299277888942729189727169890575925244679660189707482960949190648764693702750773866432391919042254290235318923377293166736086996228032557185308919284403805071030064776847863243191000223929785255372375566213644740096760539439838235764606992465260089090624105904215453927904411529580345334500256244101006359530039598864466169595626351878060688513723462707997327233134693971456285542615467650632465676620279245208581347717608521691340946520307673391841147504140168924121319826881568664561485380287539331160232292555618941042995335640095786495340935115266454024418775949316930560448686420862757201172319526405023099774567647838488973464317215980626787671838005247696884084989185086149003432403476742686245952395890358582135006450998178244636087317754378859677672919526111213859194725451400301180503437875277664402762618941017576872680428176623860680477885242887430259145247073950546525135339459598789619778911041890292943818567205070964606263541732944649576612651953495701860015412623962286413897796733329070567376962156498184506842263690367849555970026079867996261019039331263768556968767029295371162528005543100786408728939225714512481135778627664902425161990277471090335933309304948380597856628844787441469841499067123764789582263294904679812089984857163571087831191848630254501620929805829208334813638405421720056121989353669371336733392464416125223196943471206417375491216357008573694397305979709719726666642267431117762176403068681310351899112271339724036887000996862922546465006385288620393800504778276912835603372548255793912985251506829969107754257647488325341412132800626717094009098223529657957997803018282428490221470748111124018607613415150387569830918652780658896682362523937845272634530420418802508442363190383318384550522367992357752929106925043261446950109861088899914658551881873582528164302520939285258077969737620845637482114433988162710031703151334402309526351929588680690821355853680161000213740851154484912685841268695899174149133820578492800698255195740201818105641297250836070356851055331787840829000041552511865779453963317538532092149720526607831260281961164858098684587525129997404092797683176639914655386108937587952214971731728131517932904431121815871023518740757222100123768721944747209349312324107065080618562372526732540733324875754482967573450019321902199119960797989373383673242576103938985349278777473980508080015544764061053522202325409443567718794565430406735896491017610775948364540823486130254718476485189575836674399791508512858020607820554462991723202028222914886959399729974297471155371858924238493855858595407438104882624648788053304271463011941589896328792678327322456103852197011130466587100500083285177311776489735230926661234588873102883515626446023671996644554727608310118788389151149340939344750073025855814756190881398752357812331342279866503522725367171230756861045004548970360079569827626392344107146584895780241408158405229536937499710665594894459246286619963556350652623405339439142111271810691052290024657423604130093691889255865784668461215679554256605416005071276641766056874274200329577160643448606201239821698271723197826816628249938714995449137302051843669076723577400053932662622760323659751718925901801104290384274185507894887438832703063283279963007200698012244365116394086922220745320244624121155804354542064215121585056896157356414313068883443185280853975927734433655384188340303517822946253702015782157373265523185763554098954033236382319219892171177449469403678296185920803403867575834111518824177439145077366384071880489358256868542011645031357633355509440319236720348651010561049872726472131986543435450409131859513145181276437310438972507004981987052176272494065214619959232142314439776546708351714749367986186552791715824080651063799500184295938799158350171580759883784962257398512129810326379376218322456594236685376799113140108043139732335449090824910499143325843298821033984698141715756010829706583065211347076803680695322971990599904451209087275776225351040902392888779424630483280319132710495478599180196967835321464441189260631526618167443193550817081875477050802654025294109218264858213857526688155584113198560022135158887210365696087515063187533002942118682221893775546027227291290504292259787710667873840000616772154638441292371193521828499824350920891801685572798156421858191197490985730570332667646460728757430565372602768982373259745084479649545648030771598153955827779139373601717422996027353102768719449444917939785144631597314435351850491413941557329382048542123508173912549749819308714396615132942045919380106231421774199184060180347949887691051557905554806953878540066453375981862846419905220452803306263695626490910827627115903856995051246529996062855443838330327638599800792922846659503551211245284087516229060262011857775313747949362055496401073001348853150735487353905602908933526400713274732621960311773433943673385759124508149335736911664541281788171454023054750667136518258284898099512139193995633241336556777098003081910272040997148687418134667006094051021462690280449159646545330107754695413088714165312544813061192407821188690056027781824235022696189344352547633573536485619363254417756613981703930632872166905722259745209192917262199844409646158269456380239502837121686446561785235565164127712826918688615572716201474934052276946595712198314943381622114006936307430444173284786101777743837977037231795255434107223445512555589998646183876764903972461167959018100035098928641204195163551108763204267612979826529425882951141275841262732790798807559751851576841264742209479721843309352972665210015662514552994745127631550917636730259462132930190402837954246323258550301096706922720227074863419005438302650681214142135057154175057508639907673946335146209082888934938376439399256900604067311422093312195936202982972351163259386772241477911629572780752395056251581603133359382311500518626890530658368129988108663263271980611271548858798093487912913707498230575929091862939195014721197586067270092547718025750337730799397134539532646195269996596385654917590458333585799102012713204583903200853878881633637685182083727885131175227769609787962142372162545214591281831798216044111311671406914827170981015457781939202311563871950805024679725792497605772625913328559726371211201905720771409148645074094926718035815157571514050397610963846755569298970383547314100223802583468767350129775413279532060971154506484212185936490997917766874774481882870632315515865032898164228288232746866106592732197907162384642153489852476216789050260998045266483929542357287343977680495774091449538391575565485459058976495198513801007958010783759945775299196700547602252552034453988712538780171960718164078124847847257912407824544361682345239570689514272269750431873633263011103053423335821609333191218806608268341428910415173247216053355849993224548730778822905252324234861531520976938461042582849714963475341837562003014915703279685301868631572488401526639835689563634657435321783493199825542117308467745297085839507616458229630324424328237737450517028560698067889521768198156710781633405266759539424926280756968326107495323390536223090807081455919837355377748742029039018142937311529334644468151212945097596534306284215319445727118614900017650558177095302468875263250119705209476159416768727784472000192789137251841622857783792284439084301181121496366424659033634194540657183544771912446621259392656620306888520055599121235363718226922531781458792593750441448933981608657900876165024635197045828895481793756681046474614105142498870252139936870509372305447734112641354892806841059107716677821238332810262185587751312721179344448201440425745083063944738363793906283008973306241380614589414227694747931665717623182472168350678076487573420491557628217583972975134478990696589532548940335615613167403276472469212505759116251529654568544633498114317670257295661844775487469378464233737238981920662048511894378868224807279352022501796545343757274163910791972952950812942922205347717304184477915673991738418311710362524395716152714669005814700002633010452643547865903290733205468338872078735444762647925297690170912007874183736735087713376977683496344252419949951388315074877537433849458259765560996555954318040920178497184685497370696212088524377013853757681416632722412634423982152941645378000492507262765150789085071265997036708726692764308377229685985169122305037462744310852934305273078865283977335246017463527703205938179125396915621063637625882937571373840754406468964783100704580613446731271591194608435935825987782835266531151065041623295329047772174083559349723758552138048305090009646676088301540612824308740645594431853413755220166305812111033453120745086824339432159043594430312431227471385842030390106070940315235556172767994160020393975099897629335325855575624808996691829864222677502360193257974726742578211119734709402357457222271212526852384295874273501563660093188045493338989741571490544182559738080871565281430102670460284316819230392535297795765862414392701549740879273131051636119137577008929564823323648298263024607975875767745377160102490804624301856524161756655600160859121534556267602192689982855377872583145144082654583484409478463178777374794653580169960779405568701192328608041130904629350871827125934668712766694873899824598527786499569165464029458935064964335809824765965165142090986755203808309203230487342703468288751604071546653834619611223013759451579252696743642531927390036038608236450762698827497618723575476762889950752114804852527950845033958570838130476937881321123674281319487950228066320170022460331989671970649163741175854851878484012054844672588851401562725019821719066960812627785485964818369621410721714214986361918774754509650308957099470934337856981674465828267911940611956037845397855839240761276344105766751024307559814552786167815949657062559755074306521085301597908073343736079432866757890533483669555486803913433720156498834220893399971641479746938696905480089193067138057171505857307148815649920714086758259602876056459782423770242469805328056632787041926768467116266879463486950464507420219373945259262668613552940624781361206202636498199999498405143868285258956342264328707663299304891723400725471764188685351372332667877921738347541480022803392997357936152412755829569276837231234798989446274330454566790062032420516396282588443085438307201495672106460533238537203143242112607424485845094580494081820927639140008540422023556260218564348994145439950410980591817948882628052066441086319001688568155169229486203010738897181007709290590480749092427141018933542818429995988169660993836961644381528877214085268088757488293258735809905670755817017949161906114001908553744882726200936685604475596557476485674008177381703307380305476973609786543859382187220583902344443508867499866506040645874346005331827436296177862518081893144363251205107094690813586440519229512932450078833398788429339342435126343365204385812912834345297308652909783300671261798130316794385535726296998740359570458452230856390098913179475948752126397078375944861139451960286751210561638976008880092746115860800207803341591451797073036835196977766076373785333012024120112046988609209339085365773222392412449051532780950955866459477634482269986074813297302630975028812103517723124465095349653693090018637764094094349837313251321862080214809922685502948454661814715557444709669530177690434272031892770604717784527939160472281534379803539679861424370956683221491465438014593829277393396032754048009552231816667380357183932757077142046723838624617803976292377131209580789363841447929802588065522129262093623930637313496640186619510811583471173312025805866727639992763579078063818813069156366274125431259589936119647626101405563503399523140323113819656236327198961837254845333702062563464223952766943568376761368711962921818754576081617053031590728828700712313666308722754918661395773730546065997437810987649802414011242142773668082751390959313404155826266789510846776118665957660165998178089414985754976284387856100263796543178313634025135814161151902096499133548733131115022700681930135929595971640197196053625033558479980963488718039111612813595968565478868325856437896173159762002419621552896297904819822199462269487137462444729093456470028537694958859591606789282491054412515996300781368367490209374915732896270028656829344431342347351239298259166739503425995868970697267332582735903121288746660451461487850346142827765991608090398652575717263081833494441820193533385071292345774375579344062178711330063106003324053991693682603746176638565758877580201229366353270267100681261825172914608202541892885935244491070138206211553827793565296914576502048643282865557934707209634807372692141186895467322767751335690190153723669036865389161291688887876407525493494249733427181178892759931596719354758988097924525262363659036320070854440784544797348291802082044926670634420437555325050527522833778887040804033531923407685630109347772125639088640413101073817853338316038135280828119040832564401842053746792992622037698718018061122624490909242641985820861751177113789051609140381575003366424156095216328197122335023167422600567941281406217219641842705784328959802882335059828208196666249035857789940333152274817776952843681630088531769694783690580671064828083598046698841098135158654906933319522394363287923990534810987830274500172065433699066117784554364687723631844464768069142828004551074686645392805399409108754939166095731619715033166968309929466349142798780842257220697148875580637480308862995118473187124777291910070227588893486939456289515802965372150409603107761289831263589964893410247036036645058687287589051406841238124247386385427908282733827973326885504935874303160274749063129572349742611221517417153133618622410913869500688835898962349276317316478340077460886655598733382113829928776911495492184192087771606068472874673681886167507221017261103830671787856694812948785048943063086169948798703160515884108282351274153538513365895332948629494495061868514779105804696039069372662670386512905201137810858616188886947957607413585534585151768051973334433495230120395770739623771316030242887200537320998253008977618973129817881944671731160647231476248457551928732782825127182446807824215216469567819294098238926284943760248852279003620219386696482215628093605373178040863727268426696421929946819214908701707533361094791381804063287387593848269535583077395761447997270003472880182785281389503217986345216111066608839314053226944905455527867894417579202440021450780192099804461382547805858048442416404775031536054906591430078158372430123137511562284015838644270890718284816757527123846782459534334449622010096071051370608461801187543120725491334994247617115633321408934609156561550600317384218701570226103101916603887064661438897736318780940711527528174689576401581047016965247557740891644568677717158500583269943401677202156767724068128366565264122982439465133197359199709403275938502669557470231813203243716420586141033606524536939160050644953060161267822648942437397166717661231048975031885732165554988342121802846912529086101485527815277625623750456375769497734336846015607727035509629049392487088406281067943622418704747008368842671022558302403599841645951122485272633632645114017395248086194635840783753556885622317115520947223065437092606797351000565549381224575483728545711797393615756167641692895805257297522338558611388322171107362265816218842443178857488798109026653793426664216990914056536432249301334867988154886628665052346997235574738424830590423677143278792316422403877764330192600192284778313837632536121025336935812624086866699738275977365682227907215832478888642369346396164363308730139814211430306008730666164803678984091335926293402304324974926887831643602681011309570716141912830686577323532639653677390317661361315965553584999398600565155921936759977717933019744688148371103206503693192894521402650915465184309936553493337183425298433679915939417466223900389527673813330617747629574943868716978453767219493506590875711917720875477107189937960894774512654757501871194870738736785890200617373321075693302216320628432065671192096950585761173961632326217708945426214609858410237813215817727602222738133495410481003073275107799948991977963883530734443457532975914263768405442264784216063122769646967156473999043715903323906560726644116438605404838847161912109008701019130726071044114143241976796828547885524779476481802959736049439700479596040292746299203572099761950140348315380947714601056333446998820822120587281510729182971211917876424880354672316916541852256729234429187128163232596965413548589577133208339911288775917226115273379010341362085614577992398778325083550730199818459025958355989260553299673770491722454935329683300002230181517226575787524058832249085821280089747909326100762578770428656006996176212176845478996440705066241710213327486796237430229155358200780141165348065647488230615003392068983794766255036549822805329662862117930628430170492402301985719978948836897183043805182174419147660429752437251683435411217038631379411422095295885798060152938752753799030938871683572095760715221900279379292786303637268765822681241993384808166021603722154710143007377537792699069587121289288019052031601285861825494413353820784883465311632650407642428390870121015194231961652268422003711230464300673442064747718021353070124098860353399152667923871101706221865883573781210935179775604425634694999787251125440854522274810914874307259869602040275941178942581281882159952359658979181144077653354321757595255536158128001163846720319346507296807990793963714961774312119402021297573125165253768017359101557338153772001952444543620071848475663415407442328621060997613243487548847434539665981338717466093020535070271952983943271425371155766600025784423031073429551533945060486222764966687624079324353192992639253731076892135352572321080889819339168668278948281170472624501948409700975760920983724090074717973340788141825195842598096241747610138252643955135259311885045636264188300338539652435997416931322894719878308427600401368074703904097238473945834896186539790594118599310356168436869219485382055780395773881360679549900085123259442529724486666766834641402189915944565309423440650667851948417766779470472041958822043295380326310537494883122180391279678446100139726753892195119117836587662528083690053249004597410947068772912328214304635337283519953648274325833119144459017809607782883583730111857543659958982724531925310588115026307542571493943024453931870179923608166611305426253995833897942971602070338767815033010280120095997252222280801423571094760351925544434929986767817891045559063015953809761875920358937341978962358931125983902598310267193304189215109689156225069659119828323455503059081730735195503721665870288053992138576037035377105178021280129566841984140362872725623214428754302210909472721073474134975514190737043318276626177275996888826027225247133683353452816692779591328861381766349857728936900965749562287103024362590772412219094300871755692625758065709912016659622436080242870024547362036394841255954881727272473653467783647201918303998717627037515724649922289467932322693619177641614618795613956699567783068290316589699430767333508234990790624100202506134057344300695745474682175690441651540636584680463692621274211075399042188716127617787014258864825775223889184599523376292377915585744549477361295525952226578636462118377598473700347971408206994145580719080213590732269233100831759510659019121294795408603640757358750205890208704579670007055262505811420663907459215273309406823649441590891009220296680523325266198911311842016291631076894084723564366808182168657219688268358402785500782804043453710183651096951782335743030504852653738073531074185917705610397395062640355442275156101107261779370634723804990666922161971194259120445084641746383589938239946517395509000859479990136026674261494290066467115067175422177038774507673563742154782905911012619157555870238957001405117822646989944917908301795475876760168094100135837613578591356924455647764464178667115391951357696104864922490083446715486383054477914330097680486878348184672733758436892724310447406807685278625585165092088263813233623148733336714764520450876627614950389949504809560460989604329123358348859990294526400284994280878624039811814884767301216754161106629995553668193123287425702063738352020086863691311733469731741219153633246745325630871347302792174956227014687325867891734558379964351358800959350877556356248810493852999007675135513527792412429277488565888566513247302514710210575352516511814850902750476845518252096331899068527614435138213662152368890578786699432288816028377482035506016029894009119713850179871683633744139275973644017007014763706655703504338121113576415018451821413619823495159601064752712575935185304332875537783057509567425442684712219618709178560783936144511383335649103256405733898667178123972237519316430617013859539474367843392670986712452211189690840236327411496601243483098929941738030588417166613073040067588380432111555379440605497721705942821514886165672771240903387727745629097110134885184374118695655449745736845218066982911045058004299887953899027804383596282409421860556287788428802127553884803728640019441614257499904272009595204654170598104989967504511936471172772220436102614079750809686975176600237187748348016120310234680567112644766123747627852190241202569943534716226660893675219833111813511146503854895025120655772636145473604426859498074396932331297127377157347099713952291182653485155587137336629120242714302503763269501350911612952993785864681307226486008270881333538193703682598867893321238327053297625857382790097826460545598555131836688844628265133798491667839409761353766251798258249663458771950124384040359140849209733754642474488176184070023569580177410177696925077814893386672557898564589851056891960924398841569280696983352240225634570497312245269354193837004843183357196516626721575524193401933099018319309196582920969656247667683659647019595754739345514337413708761517323677204227385674279170698204549953095918872434939524094441678998846319845504852393662972079777452814399418256789457795712552426826089940863317371538896262889629402112108884427376568624527612130371017300785135715404533041507959447776143597437803742436646973247138410492124314138903579092416036406314038149831481905251720937103964026808994832572297954564042701757722904173234796073618787889913318305843069394825961318713816423467218730845133877219086975104942843769325024981656673816260615941768252509993741672883951744066932549653403101452225316189009235376486378482881344209870048096227171226407489571939002918573307460104360729190945767994614929290427981687729426487729952858434647775386906950148984133924540394144680263625402118614317031251117577642829914644533408920976961699098372652361768745605894704968170136974909523072082682887890730190018253425805343421705928713931737993142410852647390948284596418093614138475831136130576108462366837237695913492615824516221552134879244145041756848064120636520170386330129532777699023118648020067556905682295016354931992305914246396217025329747573114094220180199368035026495636955866425906762685687372110339156793839895765565193177883000241613539562437777840801748819373095020699900890899328088397430367736595524891300156633294077907139615464534088791510300651321934486673248275907946807879819425019582622320395131252014109960531260696555404248670549986786923021746989009547850725672978794769888831093487464426400718183160331655511534276155622405474473378049246214952133258527698847336269182649174338987824789278468918828054669982303689939783413747587025805716349413568433929396068192061773331791738208562436433635359863494496890781064019674074436583667071586924521182997893804077137501290858646578905771426833582768978554717687184427726120509266486102051535642840632368481807287940717127966820060727559555904040233178749447346454760628189541512139162918444297651066947969354016866010055196077687335396511614930937570968554559381513789569039251014953265628147011998326992200066392875374713135236421589265126204072887716578358405219646054105435443642166562244565042999010256586927279142752931172082793937751326106052881235373451068372939893580871243869385934389175713376300720319760816604464683937725806909237297523486702916910426369262090199605204121024077648190316014085863558427609537086558164273995349346546314504040199528537252004957805254656251154109252437991326262713609099402902262062836752132305065183934057450112099341464918433323646569371725914489324159006242020612885732926133596808726500045628284557574596592120530341310111827501306961509835515632004310784601906565493806542525229161991819959602752327702249855738824899882707465936355768582560518068964285376850772012220347920993936179268206590142165615925306737944568949070853263568196831861772268249911472615732035807646298116244013316737892788689229032593349861797021994981925739617673075834417098559222170171825712777534491508205278430904619460835217402005838672849709411023266953921445461066215006410674740207009189911951376466904481267253691537162290791385403937560077835153374167747942100384002308951850994548779039346122220865060160500351776264831611153325587705073541279249909859373473787081194253055121436979749914951860535920403830235716352727630874693219622190064260886183676103346002255477477813641012691906569686495012688376296907233961276287223041141813610060264044030035996988919945827397624114613744804059697062576764723766065541618574690527229238228275186799156983390747671146103022776606020061246876477728819096791613354019881402757992174167678799231603963569492851513633647219540611171767387372555728522940054361785176502307544693869307873499110352182532929726044553210797887711449898870911511237250604238753734841257086064069052058452122754533848008205302450456517669518576913200042816758054924811780519832646032445792829730129105318385636821206215531288668564956512613892261367064093953334570526986959692350353094224543865278677673027540402702246384483553239914751363441044050092330361271496081355490531539021002299595756583705381261965683144286057956696622154721695620870013727768536960840704833325132793112232507148630206951245395003735723346807094656483089209801534878705633491092366057554050864111521441481434630437273271045027768661953107858323334857840297160925215326092558932655600672124359464255065996771770388445396181632879614460817789272171836908880126778207430106422524634807454300476492885553409062185153654355474125476152769772667769772777058315801412185688011705028365275543214803488004442979998062157904564161957212784508928489806426497427090579129069217807298769477975112447305991406050629946894280931034216416629935614828130998870745292716048433630818404126469637925843094185442216359084576146078558562473814931427078266215185541603870206876980461747400808324343665382354555109449498431093494759944672673665352517662706772194183191977196378015702169933675083760057163454643671776723387588643405644871566964321041282595645349841388412890420682047007615596916843038999348366793542549210328113363184722592305554383058206941675629992013373175489122037230349072681068534454035993561823576312837767640631013125335212141994611869350833176587852047112364331226765129964171325217513553261867681942338790365468908001827135283584888444111761234101179918709236507184857856221021104009776994453121795022479578069506532965940383987369907240797679040826794007618729547835963492793904576973661643405359792219285870574957481696694062334272619733518136626063735982575552496509807260123668283605928341855848026958413772558970883789942910549800331113884603401939166122186696058491571485733568286149500019097591125218800396419762163559375743718011480559442298730418196808085647265713547612831629200449880315402105530597076666362749328308916880932359290081787411985738317192616728834918402429721290434965526942726402559641463525914348400675867690350382320572934132981593533044446496829441367323442158380761694831219333119819061096142952201536170298575105594326461468505452684975764807808009221335811378197749271768545075538328768874474591593731162470601091244609829424841287520224462594477638749491997840446829257360968534549843266536862844489365704111817793806441616531223600214918768769467398407517176307516849856359201486892943105940202457969622924566644881967576294349535326382171613395757790766370764569570259738800438415805894336137106551859987600754924187211714889295221737721146081154344982665479872580056674724051122007383459271575727715218589946948117940644466399432370044291140747218180224825837736017346685300744985564715420036123593397312914458591522887408719508708632218837288262822884631843717261903305777147651564143822306791847386039147683108141358275755853643597721650028277803713422869688787349795096031108899196143386664068450697420787700280509367203387232629637856038653216432348815557557018469089074647879122436375556668678067610544955017260791142930831285761254481944449473244819093795369008206384631678225064809531810406570254327604385703505922818919878065865412184299217273720955103242251079718077833042609086794273428955735559252723805511440438001239041687716445180226491681641927401106451622431101700056691121733189423400547959684669804298017362570406733282129962153684881404102194463424646220745575643960452985313071409084608499653767803793201899140865814662175319337665970114330608625009829566917638846056762972931464911493704624469351984039534449135141193667933301936617663652555149174982307987072280860859626112660504289296966535652516688885572112276802772743708917389639772257564890533401038855931125679991516589025016486961427207005916056166159702451989051832969278935550303934681219761582183980483960562523091462638447386296039848924386187298507775928792722068554807210497817653286210187476766897248841139560349480376727036316921007350834073865261684507482496448597428134936480372426116704266870831925040997615319076855770327421785010006441984124207396400139603601583810565928413684574119102736420274163723488214524101347716529603128408658419787951116511529827814620379139855006399960326591248525308493690313130100799977191362230866011099929142871249388541612038020411340188887219693477904497527454288072803509305828754420755134816660927879353566521255620139988249628478726214432362853676502591450468377635282587652139156480972141929675549384375582600253168536356731379262475878049445944183429172756988376226261846365452743497662411138451305481449836311789784489732076719508784158618879692955819733250699951402601511675529750575437810242238957925786562128432731202200716730574069286869363930186765958251326499145950260917069347519408975357464016830811798846452473618956056479426358070562563281189269663026479535951097127659136233180866921535788607812759910537171402204506186075374866306350591483916467656723205714516886170790984695932236724946737583099607042589220481550799132752088583781117685214269334786921895240622657921043620348852926267984013953216458791151579050460579710838983371864038024417511347226472547010794793996953554669619726763255229914654933499663234185951450360980344092212206712567698723427940708857070474293173329188523896721971353924492426178641188637790962814486917869468177591717150669111480020759432012061969637795103227089029566085562225452602610460736131368869009281721068198618553780982018471154163630326265699283424155023600978046417108525537612728905335045506135684143775854429677977014660294387687225115363801191758154028120818255606485410787933598921064427244898618961629413418001295130683638609294100083136673372153008352696235737175330738653338204842190308186449184093723944033405244909554558016406460761581010301767488475017661908692946098769201691202181688291040870709560951470416921147027413390052253340834812870353031023919699978597413908593605433599697075604460134242453682496098772581311024732798562072126572499003468293886872304895562253204463602639854225258416464324271611419817802482595563544907219226583863662663750835944314877635156145710745528016159677048442714194435183275698407552677926411261765250615965235457187956673170913319358761628255920783080185206890151504713340386100310055914817852110384754542933389188444120517943969970194112695119526564919594189975418393234647424290702718875223534393673633663200307232747037407123982562024662651974090199762452056198557625760008708173083288344381831070054514493545885422678578551915372292379555494333410174420169600090696415612732297770221217951868376359082255128816470021992348864043959153018464004714321186360622527011541122283802778538911098490201342741014121559769965438877197485376431158229838533123071751132961904559007938064276695819014842627991221792947987348901868471676503827328552059082984529806259250352128451925927986593506132961946796252373972565584157853744567558998032405492186962888490332560851455344391660226257775512916200772796852629387937530454181080729285891989715381797343496187232927614747850192611450413274873242970583408471112333746274617274626582415324271059322506255302314738759251724787322881491455915605036334575424233779160374952502493022351481961381162563911415610326844958072508273431765944054098269765269344579863479709743124498271933113863873159636361218623497261409556079920628316999420072054811525353393946076850019909886553861433495781650089961649079678142901148387645682174914075623767618453775144031475411206760160726460556859257799322070337333398916369504346690694828436629980037414527627716547623825546170883189810868806847853705536480469350958818025360529740793538676511195079373282083146268960071075175520614433784114549950136432446328193346389050936545714506900864483440180428363390513578157273973334537284263372174065775771079830517555721036795976901889958494130195999573017901240193908681356585539661941371794487632079868800371607303220547423572266896801882123424391885984168972277652194032493227314793669234004848976059037958094696041754279613782553781223947646147832926976545162290281701100437846038756544151739433960048915318817576650500951697402415644771293656614253949368884230517400129920556854289853897942669956777027089146513736892206104415481662156804219838476730871787590279209175900695273456682026513373111518000181434120962601658629821076663523361774007837783423709152644063054071807843358061072961105550020415131696373046849213356837265400307509829089364612047891114753037049893952833457824082817386441322710002968311940203323456420826473276233830294639378998375836554559919340866235090967961134004867027123176526663710778725111860354037554487418693519733656621772359229396776463251562023487570113795712096237723431370212031004965152111976013176419408203437348512852602913334915125083119802850177855710725373149139215709105130965059885999931560863655477403551898166733535880048214665099741433761182777723351910741217572841592580872591315074606025634903777263373914461377038021318347447301113032670296917335047701632106616227830027269283365584011791419447808748253360714403296252285775009808599609040936312635621328162071453406104224112083010008587264252112262480142647519426184325853386753874054743491072710049754281159466017136122590440158991600229827801796035194080046513534752698777609527839984368086908989197839693532179980139135442552717910225397010810632143048511378291498511381969143043497500189980681644412123273328307192824362406733196554692677851193152775113446468905504248113361434984604849051258345683266441528489713972376040328212660253516693914082049947320486021627759791771234751097502403078935759937715095021751693555827072533911892334070223832077585802137174778378778391015234132098489423459613692340497998279304144463162707214796117456975719681239291913740982925805561955207434243295982898980529233366415419256367380689494201471241340525072204061794355252555225008748790086568314542835167750542294803274783044056438581591952666758282929705226127628711040134801787224801789684052407924360582742467443076721645270313451354167649668901274786801010295133862698649748212118629040337691568576240699296372493097201628707200189835423690364149270236961938547372480329855045112089192879829874467864129159417531675602533435310626745254507114181483239880607297140234725520713490798398982355268723950909365667878992383712578976248755990443228895388377317348941122757071410959790047919301046740750411435381782464630795989555638991884773781341347070246747362112048986226991888517456251732519341352038115863350123913054441910073628447567514161050410973505852762044489190978901984315485280533985777844313933883994310444465669244550885946314081751220331390681596592510546858013133838152176418210433429788826119630443111388796258746090226130900849975430395771243230616906262919403921439740270894777663702488155499322458825979020631257436910946393252806241642476868495455324938017639371615636847859823715902385421265840615367228607131702674740131145261063765383390315921943469817605358380310612887852051546933639241088467632009567089718367490578163085158138161966882222047570437590614338040725853862083565176998426774523195824182683698270160237414938363496629351576854061397342746470899685618170160551104880971554859118617189668025973541705423985135560018720335079060946421271143993196046527424050882225359773481519135438571253258540493946010865793798058620143366078825219717809025817370870916460452727977153509910340736425020386386718220522879694458387652947951048660717390229327455426785669776865939923416834122274663015062155320502655341460995249356050854921756549134830958906536175693817637473644183378974229700703545206663170929607591989627732423090252397443861014263098687733913882518684316501027964911497737582888913450341148865948670215492101084328080783428089417298008983297536940644969903125399863919581601468995220880662285408414864274786281975546629278814621607171381880180840572084715868906836919393381864278454537956719272397972364651667592011057995663962598535512763558768140213409829016296873429850792471846056874828331381259161962476156902875901072733103299140623864608333378638257926302391590003557609032477281338887339178096966601469615031754226751125993315529674213336300222964906480934582008181061802100227664580400278213336758573019011371754672763059044353131319036092489097246427928455549913490005180295707082919052556781889913899625138662319380053611346224294610248954072404857123256628888931722116432947816190554868054943441034090680716088028227959686950133643814268252170472870863010137301155236861416908375675747637239763185757038109443390564564468524183028148107998376918512127201935044041804604721626939445788377090105974693219720558114078775989772072009689382249303236830515862657281114637996983137517937623215111252349734305240622105244234353732905655163406669506165892878218707756794176080712973781335187117931650033155523822487730653444179453415395202424449703410120874072188109388268167512042299404948179449472732894770111574139441228455521828424922240658752689172272780607116754046973008037039618787796694882555614674384392570115829546661358678671897661297311267200072971553613027503556167817765442287442114729881614802705243806817653573275578602505847084013208837932816008769081300492491473682517035382219619039014999523495387105997351143478292339499187936608692301375596368532373806703591144243268561512109404259582639301678017128669239283231057658851714020211196957064799814031505633045141564414623163763809904402816256917576489142569714163598439317433270237812336938043012892626375382667795034169334323607500248175741808750388475094939454896209740485442635637164995949920980884294790363666297526003243856352945844728944547166209297495496616877414120882130477022816116456044007236351581149729739218966737382647204722642221242016560150284971306332795814302516013694825567014780935790889657134926158161346901806965089556310121218491805847922720691871696316330044858020102860657858591269974637661741463934159569539554203314628026518951167938074573315759846086173702687867602943677780500244673391332431669880354073232388281847501051641331189537036488422690270478052742490603492082954755054003457160184072574536938145531175354210726557835615499874447480427323457880061873149341566046352979779455075359304795687209316724536547208381685855606043801977030764246083489876101345709394877002946175792061952549255757109038525171488525265671045349813419803390641529876343695420256080277614421914318921393908834543131769685101840103844472348948869520981943531906506555354617335814045544837884752526253949665869992058417652780125341033896469818642430034146791380619028059607854888010789705516946215228773090104467462497979992627120951684779568482583341402266477210843362437593741610536734041954738964197895425335036301861400951534766961476255651873823292468547356935802896011536791787303553159378363082248615177770541577576561759358512016692943111138863582159667618830326104164651714846979385422621687161400122378213779774131268977266712992025922017408770076956283473932201088159356286281928563571893384958850603853158179760679479840878360975960149733420572704603521790605647603285569276273495182203236144112584182426247712012035776388895974318232827871314608053533574494297621796789034568169889553518504478325616380709476951699086247100019748809205009521943632378719764870339223811540363475488626845956159755193765410115014067001226927474393888589943859730245414801061235908036274585288493563251585384383242493252666087588908318700709100237377106576985056433928854337658342596750653715005333514489908293887737352051459333049626531415141386124437935885070944688045486975358170212908490787347806814366323322819415827345671356443171537967818058195852464840084032909981943781718177302317003989733050495387356116261023999433259780126893432605584710278764901070923443884634011735556865903585244919370181041626208504299258697435817098133894045934471937493877624232409852832762266604942385129709453245586252103600829286649724174919141988966129558076770979594795306013119159011773943104209049079424448868513086844493705909026006120649425744710353547657859242708130410618546219881830090634588187038755856274911587375421064667951346487586771543838018521348281915812462599335160198935595167968932852205824799421034512715877163345222995418839680448835529753361286837225935390079201666941339091168758803988828869216002373257361588207163516271332810518187602104852180675526648673908900907195138058626735124312215691637902277328705410842037841525683288718046987952513073266340278519059417338920358540395677035611329354482585628287610610698229721420961993509331312171187891078766872044548876089410174798647137882462153955933333275562009439580434537919782280590395959927436913793778664940964048777841748336432684026282932406260081908081804390914556351936856063045089142289645219987798849347477729132797266027658401667890136490508741142126861969862044126965282981087045479861559545338021201155646979976785738920186243599326777689454060508218838227909833627167124490026761178498264377033002081844590009717235204331994708242098771514449751017055643029542821819670009202515615844174205933658148134902693111517093872260026458630561325605792560927332265579346280805683443921373688405650434307396574061017779370141424615493070741360805442100295600095663588977899267630517718781943706761498217564186590116160865408635391513039201316805769034172596453692350806417446562351523929050409479953184074862151210561833854566176652606393713658802521666223576132201941701372664966073252010771947931265282763302413805164907174565964853748354669194523580315301969160480994606814904037819829732360930087135760798621425422096419004367905479049930078372421581954535418371129368658430553842717628035279128821129308351575656599944741788438381565148434229858704245592434693295232821803508333726283791830216591836181554217157448465778420134329982594566884558266171979012180849480332448787258183774805522268151011371745368417870280274452442905474518234674919564188551244421337783521423865979925988203287085109338386829906571994614906290257427686038850511032638544540419184958866538545040571323629681069146814847869659166861842756798460041868762298055562963045953227923051616721591968675849523635298935788507746081537321454642984792310511676357749494622952569497660359473962430995343310404994209677883827002714478494069037073249106444151696053256560586778757417472110827435774315194060757983563629143326397812218946287447798119807225646714664054850131009656786314880090303749338875364183165134982546694673316118123364854397649325026179549357204305402182974871251107404011611405899911093062492312813116340549262571356721818628932786138833718028535056503591952741400869510926167541476792668032109237467087213606278332922386413619594121339278036118276324106004740971111048140003623342714514483334641675466354699731494756643423659493496845884551524150756376605086632827424794136062876041290644913828519456402643153225858624043141838669590633245063000392213192647625962691510904457695301444054618037857503036686212462278639752746667870121003392984873375014475600322100622358029343774955032037012738468163061026570300872275462966796880890587127676361066225722352229739206443093524327228100859973095132528630601105497915644791845004618046762408928925680912930592960642357021061524646205023248966593987324933967376952023991760898474571843531936646529125848064480196520162838795189499336759241485626136995945307287254532463291529110128763770605570609531377527751867923292134955245133089867969165129073841302167573238637575820080363575728002754490327953079900799442541108725693188014667935595834676432868876966610097395749967836593397846346959948950610490383647409504695226063858046758073069912290474089879166872117147527644711604401952718169508289733537148530928937046384420893299771125856840846608339934045689026787516008775461267988015465856522061210953490796707365539702576199431376639960606061106406959330828171876426043573425361756943784848495250108266488395159700490598380812105221111091943323951136051446459834210799058082093716464523127704023160072138543723461267260997870385657091998507595634613248460188409850194287687902268734556500519121546544063829253851276317663922050938345204300773017029940362615434001322763910912988327863920412300445551684054889809080779174636092439334912641164240093880746356607262336695842764583698268734815881961058571835767462009650526065929263548291499045768307210893245857073701660717398194485028842603963660746031184786225831056580870870305567595861341700745402965687634774176431051751036732869245558582082372038601781739405175130437994868822320044378043103170921034261674998000073016094814586374488778522273076330495383944345382770608760763542098445008306247630253572781032783461766970544287155315340016497076657195985041748199087201490875686037783591994719343352772947285537925787684832301101859365800717291186967617655053775030293033830706448912811412025506150896411007623824574488655182581058140345320124754723269087547507078577659732542844459353044992070014538748948226556442223696365544194225441338212225477497535494624827680533336983284156138692363443358553868471111430498248398991803165458638289353799130535222833430137953372954016257623228081138499491876144141322933767106563492528814528239506209022357876684650116660097382753660405446941653422239052108314585847035529352219928272760574821266065291385530345549744551470344939486863429459658431024190785923680224560763936784166270518555178702904073557304620639692453307795782245949710420188043000183881429008173039450507342787013124466860092778581811040911511729374873627887874907465285565434748886831064110051023020875107768918781525622735251550379532444857787277617001964853703555167655209119339343762866284619844026295252183678522367475108809781507098978413086245881522660963551401874495836926917799047120726494905737264286005211403581231076006699518536124862746756375896225299116496066876508261734178484789337295056739007878617925351440621045366250640463728815698232317500596261080921955211150859302955654967538862612972339914628358476048627627027309739202001432248707582337354915246085608210328882974183906478869923273691360048837436615223517058437705545210815513361262142911815615301758882573594892507108879262128641392443309383797333867806131795237315266773820858024701433527009243803266951742119507670884326346442749127558907746863582162166042741315170212458586056233631493164646913946562497471741958354218607748711057338458433689939645913740603382159352243594751626239188685307822821763983237306180204246560477527943104796189724299533029792497481684052893791044947004590864991872727345413508101983881864673609392571930511968645601855782450218231065889437986522432050677379966196955472440585922417953006820451795370043472451762893566770508490213107736625751697335527462302943031203596260953423574397249659211010657817826108745318874803187430823573699195156340957162700992444929749105489851519658664740148225106335367949737142510229341882585117371994499115097583746130105505064197721531929354875371191630262030328588658528480193509225875775597425276584011721342323648084027143356367542046375182552524944329657043861387865901965738802868401894087672816714137033661732650120578653915780703088714261519075001492576112927675193096728453971160213606303090542243966320674323582797889332324405779199278484633339777737655901870574806828678347965624146102899508487399692970750432753029972872297327934442988646412725348160603779707298299173029296308695801996312413304939350493325412355071054461182591141116454534710329881047844067780138077131465400099386306481266614330858206811395838319169545558259426895769841428893743467084107946318932539106963955780706021245974898293564613560788983472419979478564362042094613412387613198865352358312996862268948608408456655606876954501274486631405054735351746873009806322780468912246821460806727627708402402266155485024008952891657117617439020337584877842911289623247059191874691042005848326140677333751027195653994697162517248312230633919328707983800748485726516123434933273356664473358556430235280883924348278760886164943289399166399210488307847777048045728491456303353265070029588906265915498509407972767567129795010098229476228961891591441520032283878773485130979081019129267227103778898053964156362364169154985768408398468861684375407065121039062506128107663799047908879674778069738473170475253442156390387201238806323688037017949308954900776331523063548374256816653361606641980030188287123767481898330246836371488309259283375902278942588060087286038859168849730693948020511221766359138251524278670094406942355120201568377778851824670025651708509249623747726813694284350062938814429987905301056217375459182679973217735029368928065210025396268807498092643458011655715886700443503976505323478287327368840863540002740676783821963522226539290939807367391364082898722017776747168118195856133721583119054682936083236976113450281757830202934845982925000895682630271263295866292147653142233351793093387951357095346377183684092444422096319331295620305575517340067973740614162107923633423805646850092037167152642556371853889571416419772387422610596667396997173168169415435095283193556417705668622215217991151355639707143312893657553844648326201206424338016955862698561022460646069330793847858814367407000599769703649019273328826135329363112403650698652160638987250267238087403396744397830258296894256896741864336134979475245526291426522842419243083388103580053787023999542172113686550275341362211693140694669513186928102574795985605145005021715913317751609957865551981886193211282110709442287240442481153406055895958355815232012184605820563592699303478851132068626627588771446035996656108430725696500563064489187599466596772847171539573612108180841547273142661748933134174632662354222072600146012701206934639520564445543291662986660783089068118790090815295063626782075614388815781351134695366303878412092346942868730839320432333872775496805210302821544324723388845215343727250128589747691460808314404125868181540049187772287869801853454537006526655649170915429522756709222217474112062720656622989806032891672068743654948246108697367225547404812889242471854323605753411672850757552057131156697954584887398742228135887985840783135060548290551482785294891121905383195624228719484759407859398047901094194070671764439032730712135887385049993638838205501683402777496070276844880281912220636888636811043569529300652195528261526991271637277388418993287130563464688227398288763198645709836308917786487086676185485680047672552675414742851028145807403152992197814557756843681110185317498167016426647884090262682824448258027532094549915104518517716546311804904567985713257528117913656278158111288816562285876030875974963849435275676612168959261485030785362045274507752950631012480341804584059432926079854435620093708091821523920371790678121992280496069738238743312626730306795943960954957189577217915597300588693646845576676092450906088202212235719254536715191834872587423919410890444115959932760044506556206461164655665487594247369252336955993030355095817626176231849561906494839673002037763874369343999829430209147073618947932692762445186560239559053705128978163455423320114975994896278424327483788032701418676952621180975006405149755889650293004867605208010491537885413909424531691719987628941277221129464568294860281493181560249677887949813777216229359437811004448060797672429276249510784153446429150842764520002042769470698041775832209097020291657347251582904630910359037842977572651720877244740952267166306005469716387943171196873484688738186656751279298575016363411314627530499019135646823804329970695770150789337728658035712790913767420805655493624646 diff --git a/vendor/github.com/klauspost/compress/zip/example_test.go b/vendor/github.com/klauspost/compress/zip/example_test.go new file mode 100644 index 0000000..8527b8e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/example_test.go @@ -0,0 +1,105 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip_test + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/compress/zip" +) + +func ExampleWriter() { + // Create a buffer to write our archive to. + buf := new(bytes.Buffer) + + // Create a new zip archive. + w := zip.NewWriter(buf) + + // Add some files to the archive. + var files = []struct { + Name, Body string + }{ + {"readme.txt", "This archive contains some text files."}, + {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, + {"todo.txt", "Get animal handling licence.\nWrite more examples."}, + } + for _, file := range files { + f, err := w.Create(file.Name) + if err != nil { + log.Fatal(err) + } + _, err = f.Write([]byte(file.Body)) + if err != nil { + log.Fatal(err) + } + } + + // Make sure to check the error on Close. + err := w.Close() + if err != nil { + log.Fatal(err) + } +} + +func ExampleReader() { + // Open a zip archive for reading. + r, err := zip.OpenReader("testdata/readme.zip") + if err != nil { + log.Fatal(err) + } + defer r.Close() + + // Iterate through the files in the archive, + // printing some of their contents. + for _, f := range r.File { + fmt.Printf("Contents of %s:\n", f.Name) + rc, err := f.Open() + if err != nil { + log.Fatal(err) + } + _, err = io.CopyN(os.Stdout, rc, 68) + if err != nil { + log.Fatal(err) + } + rc.Close() + fmt.Println() + } + // Output: + // Contents of README: + // This is the source code repository for the Go programming language. +} + +func ExampleWriter_RegisterCompressor() { + // Override the default Deflate compressor with a higher compression + // level. + + // Create a buffer to write our archive to. + buf := new(bytes.Buffer) + + // Create a new zip archive. + w := zip.NewWriter(buf) + + var fw *flate.Writer + + // Register the deflator. + w.RegisterCompressor(zip.Deflate, func(out io.Writer) (io.WriteCloser, error) { + var err error + if fw == nil { + // Creating a flate compressor for every file is + // expensive, create one and reuse it. + fw, err = flate.NewWriter(out, flate.BestCompression) + } else { + fw.Reset(out) + } + return fw, err + }) + + // Proceed to add files to w. +} diff --git a/vendor/github.com/klauspost/compress/zip/reader.go b/vendor/github.com/klauspost/compress/zip/reader.go new file mode 100644 index 0000000..27199dd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/reader.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "os" + + "github.com/klauspost/crc32" +) + +var ( + ErrFormat = errors.New("zip: not a valid zip file") + ErrAlgorithm = errors.New("zip: unsupported compression algorithm") + ErrChecksum = errors.New("zip: checksum error") +) + +type Reader struct { + r io.ReaderAt + File []*File + Comment string + decompressors map[uint16]Decompressor +} + +type ReadCloser struct { + f *os.File + Reader +} + +type File struct { + FileHeader + zip *Reader + zipr io.ReaderAt + zipsize int64 + headerOffset int64 +} + +func (f *File) hasDataDescriptor() bool { + return f.Flags&0x8 != 0 +} + +// OpenReader will open the Zip file specified by name and return a ReadCloser. +func OpenReader(name string) (*ReadCloser, error) { + f, err := os.Open(name) + if err != nil { + return nil, err + } + fi, err := f.Stat() + if err != nil { + f.Close() + return nil, err + } + r := new(ReadCloser) + if err := r.init(f, fi.Size()); err != nil { + f.Close() + return nil, err + } + r.f = f + return r, nil +} + +// NewReader returns a new Reader reading from r, which is assumed to +// have the given size in bytes. +func NewReader(r io.ReaderAt, size int64) (*Reader, error) { + zr := new(Reader) + if err := zr.init(r, size); err != nil { + return nil, err + } + return zr, nil +} + +func (z *Reader) init(r io.ReaderAt, size int64) error { + end, err := readDirectoryEnd(r, size) + if err != nil { + return err + } + if end.directoryRecords > uint64(size)/fileHeaderLen { + return fmt.Errorf("archive/zip: TOC declares impossible %d files in %d byte zip", end.directoryRecords, size) + } + z.r = r + z.File = make([]*File, 0, end.directoryRecords) + z.Comment = end.comment + rs := io.NewSectionReader(r, 0, size) + if _, err = rs.Seek(int64(end.directoryOffset), os.SEEK_SET); err != nil { + return err + } + buf := bufio.NewReader(rs) + + // The count of files inside a zip is truncated to fit in a uint16. + // Gloss over this by reading headers until we encounter + // a bad one, and then only report a ErrFormat or UnexpectedEOF if + // the file count modulo 65536 is incorrect. + for { + f := &File{zip: z, zipr: r, zipsize: size} + err = readDirectoryHeader(f, buf) + if err == ErrFormat || err == io.ErrUnexpectedEOF { + break + } + if err != nil { + return err + } + z.File = append(z.File, f) + } + if uint16(len(z.File)) != uint16(end.directoryRecords) { // only compare 16 bits here + // Return the readDirectoryHeader error if we read + // the wrong number of directory entries. + return err + } + return nil +} + +// RegisterDecompressor registers or overrides a custom decompressor for a +// specific method ID. If a decompressor for a given method is not found, +// Reader will default to looking up the decompressor at the package level. +// +// Must not be called concurrently with Open on any Files in the Reader. +func (z *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) { + if z.decompressors == nil { + z.decompressors = make(map[uint16]Decompressor) + } + z.decompressors[method] = dcomp +} + +func (z *Reader) decompressor(method uint16) Decompressor { + dcomp := z.decompressors[method] + if dcomp == nil { + dcomp = decompressor(method) + } + return dcomp +} + +// Close closes the Zip file, rendering it unusable for I/O. +func (rc *ReadCloser) Close() error { + return rc.f.Close() +} + +// DataOffset returns the offset of the file's possibly-compressed +// data, relative to the beginning of the zip file. +// +// Most callers should instead use Open, which transparently +// decompresses data and verifies checksums. +func (f *File) DataOffset() (offset int64, err error) { + bodyOffset, err := f.findBodyOffset() + if err != nil { + return + } + return f.headerOffset + bodyOffset, nil +} + +// Open returns a ReadCloser that provides access to the File's contents. +// Multiple files may be read concurrently. +func (f *File) Open() (rc io.ReadCloser, err error) { + bodyOffset, err := f.findBodyOffset() + if err != nil { + return + } + size := int64(f.CompressedSize64) + r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size) + dcomp := f.zip.decompressor(f.Method) + if dcomp == nil { + err = ErrAlgorithm + return + } + rc = dcomp(r) + var desr io.Reader + if f.hasDataDescriptor() { + desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen) + } + rc = &checksumReader{ + rc: rc, + hash: crc32.NewIEEE(), + f: f, + desr: desr, + } + return +} + +type checksumReader struct { + rc io.ReadCloser + hash hash.Hash32 + nread uint64 // number of bytes read so far + f *File + desr io.Reader // if non-nil, where to read the data descriptor + err error // sticky error +} + +func (r *checksumReader) Read(b []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + n, err = r.rc.Read(b) + r.hash.Write(b[:n]) + r.nread += uint64(n) + if err == nil { + return + } + if err == io.EOF { + if r.nread != r.f.UncompressedSize64 { + return 0, io.ErrUnexpectedEOF + } + if r.desr != nil { + if err1 := readDataDescriptor(r.desr, r.f); err1 != nil { + if err1 == io.EOF { + err = io.ErrUnexpectedEOF + } else { + err = err1 + } + } else if r.hash.Sum32() != r.f.CRC32 { + err = ErrChecksum + } + } else { + // If there's not a data descriptor, we still compare + // the CRC32 of what we've read against the file header + // or TOC's CRC32, if it seems like it was set. + if r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 { + err = ErrChecksum + } + } + } + r.err = err + return +} + +func (r *checksumReader) Close() error { return r.rc.Close() } + +// findBodyOffset does the minimum work to verify the file has a header +// and returns the file body offset. +func (f *File) findBodyOffset() (int64, error) { + var buf [fileHeaderLen]byte + if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil { + return 0, err + } + b := readBuf(buf[:]) + if sig := b.uint32(); sig != fileHeaderSignature { + return 0, ErrFormat + } + b = b[22:] // skip over most of the header + filenameLen := int(b.uint16()) + extraLen := int(b.uint16()) + return int64(fileHeaderLen + filenameLen + extraLen), nil +} + +// readDirectoryHeader attempts to read a directory header from r. +// It returns io.ErrUnexpectedEOF if it cannot read a complete header, +// and ErrFormat if it doesn't find a valid header signature. +func readDirectoryHeader(f *File, r io.Reader) error { + var buf [directoryHeaderLen]byte + if _, err := io.ReadFull(r, buf[:]); err != nil { + return err + } + b := readBuf(buf[:]) + if sig := b.uint32(); sig != directoryHeaderSignature { + return ErrFormat + } + f.CreatorVersion = b.uint16() + f.ReaderVersion = b.uint16() + f.Flags = b.uint16() + f.Method = b.uint16() + f.ModifiedTime = b.uint16() + f.ModifiedDate = b.uint16() + f.CRC32 = b.uint32() + f.CompressedSize = b.uint32() + f.UncompressedSize = b.uint32() + f.CompressedSize64 = uint64(f.CompressedSize) + f.UncompressedSize64 = uint64(f.UncompressedSize) + filenameLen := int(b.uint16()) + extraLen := int(b.uint16()) + commentLen := int(b.uint16()) + b = b[4:] // skipped start disk number and internal attributes (2x uint16) + f.ExternalAttrs = b.uint32() + f.headerOffset = int64(b.uint32()) + d := make([]byte, filenameLen+extraLen+commentLen) + if _, err := io.ReadFull(r, d); err != nil { + return err + } + f.Name = string(d[:filenameLen]) + f.Extra = d[filenameLen : filenameLen+extraLen] + f.Comment = string(d[filenameLen+extraLen:]) + + needUSize := f.UncompressedSize == ^uint32(0) + needCSize := f.CompressedSize == ^uint32(0) + needHeaderOffset := f.headerOffset == int64(^uint32(0)) + + if len(f.Extra) > 0 { + // Best effort to find what we need. + // Other zip authors might not even follow the basic format, + // and we'll just ignore the Extra content in that case. + b := readBuf(f.Extra) + for len(b) >= 4 { // need at least tag and size + tag := b.uint16() + size := b.uint16() + if int(size) > len(b) { + break + } + if tag == zip64ExtraId { + // update directory values from the zip64 extra block. + // They should only be consulted if the sizes read earlier + // are maxed out. + // See golang.org/issue/13367. + eb := readBuf(b[:size]) + + if needUSize { + needUSize = false + if len(eb) < 8 { + return ErrFormat + } + f.UncompressedSize64 = eb.uint64() + } + if needCSize { + needCSize = false + if len(eb) < 8 { + return ErrFormat + } + f.CompressedSize64 = eb.uint64() + } + if needHeaderOffset { + needHeaderOffset = false + if len(eb) < 8 { + return ErrFormat + } + f.headerOffset = int64(eb.uint64()) + } + break + } + b = b[size:] + } + } + + if needUSize || needCSize || needHeaderOffset { + return ErrFormat + } + + return nil +} + +func readDataDescriptor(r io.Reader, f *File) error { + var buf [dataDescriptorLen]byte + + // The spec says: "Although not originally assigned a + // signature, the value 0x08074b50 has commonly been adopted + // as a signature value for the data descriptor record. + // Implementers should be aware that ZIP files may be + // encountered with or without this signature marking data + // descriptors and should account for either case when reading + // ZIP files to ensure compatibility." + // + // dataDescriptorLen includes the size of the signature but + // first read just those 4 bytes to see if it exists. + if _, err := io.ReadFull(r, buf[:4]); err != nil { + return err + } + off := 0 + maybeSig := readBuf(buf[:4]) + if maybeSig.uint32() != dataDescriptorSignature { + // No data descriptor signature. Keep these four + // bytes. + off += 4 + } + if _, err := io.ReadFull(r, buf[off:12]); err != nil { + return err + } + b := readBuf(buf[:12]) + if b.uint32() != f.CRC32 { + return ErrChecksum + } + + // The two sizes that follow here can be either 32 bits or 64 bits + // but the spec is not very clear on this and different + // interpretations has been made causing incompatibilities. We + // already have the sizes from the central directory so we can + // just ignore these. + + return nil +} + +func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) { + // look for directoryEndSignature in the last 1k, then in the last 65k + var buf []byte + var directoryEndOffset int64 + for i, bLen := range []int64{1024, 65 * 1024} { + if bLen > size { + bLen = size + } + buf = make([]byte, int(bLen)) + if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF { + return nil, err + } + if p := findSignatureInBlock(buf); p >= 0 { + buf = buf[p:] + directoryEndOffset = size - bLen + int64(p) + break + } + if i == 1 || bLen == size { + return nil, ErrFormat + } + } + + // read header into struct + b := readBuf(buf[4:]) // skip signature + d := &directoryEnd{ + diskNbr: uint32(b.uint16()), + dirDiskNbr: uint32(b.uint16()), + dirRecordsThisDisk: uint64(b.uint16()), + directoryRecords: uint64(b.uint16()), + directorySize: uint64(b.uint32()), + directoryOffset: uint64(b.uint32()), + commentLen: b.uint16(), + } + l := int(d.commentLen) + if l > len(b) { + return nil, errors.New("zip: invalid comment length") + } + d.comment = string(b[:l]) + + // These values mean that the file can be a zip64 file + if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff { + p, err := findDirectory64End(r, directoryEndOffset) + if err == nil && p >= 0 { + err = readDirectory64End(r, p, d) + } + if err != nil { + return nil, err + } + } + // Make sure directoryOffset points to somewhere in our file. + if o := int64(d.directoryOffset); o < 0 || o >= size { + return nil, ErrFormat + } + return d, nil +} + +// findDirectory64End tries to read the zip64 locator just before the +// directory end and returns the offset of the zip64 directory end if +// found. +func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) { + locOffset := directoryEndOffset - directory64LocLen + if locOffset < 0 { + return -1, nil // no need to look for a header outside the file + } + buf := make([]byte, directory64LocLen) + if _, err := r.ReadAt(buf, locOffset); err != nil { + return -1, err + } + b := readBuf(buf) + if sig := b.uint32(); sig != directory64LocSignature { + return -1, nil + } + if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory + return -1, nil // the file is not a valid zip64-file + } + p := b.uint64() // relative offset of the zip64 end of central directory record + if b.uint32() != 1 { // total number of disks + return -1, nil // the file is not a valid zip64-file + } + return int64(p), nil +} + +// readDirectory64End reads the zip64 directory end and updates the +// directory end with the zip64 directory end values. +func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) { + buf := make([]byte, directory64EndLen) + if _, err := r.ReadAt(buf, offset); err != nil { + return err + } + + b := readBuf(buf) + if sig := b.uint32(); sig != directory64EndSignature { + return ErrFormat + } + + b = b[12:] // skip dir size, version and version needed (uint64 + 2x uint16) + d.diskNbr = b.uint32() // number of this disk + d.dirDiskNbr = b.uint32() // number of the disk with the start of the central directory + d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk + d.directoryRecords = b.uint64() // total number of entries in the central directory + d.directorySize = b.uint64() // size of the central directory + d.directoryOffset = b.uint64() // offset of start of central directory with respect to the starting disk number + + return nil +} + +func findSignatureInBlock(b []byte) int { + for i := len(b) - directoryEndLen; i >= 0; i-- { + // defined from directoryEndSignature in struct.go + if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 { + // n is length of comment + n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8 + if n+directoryEndLen+i <= len(b) { + return i + } + } + } + return -1 +} + +type readBuf []byte + +func (b *readBuf) uint16() uint16 { + v := binary.LittleEndian.Uint16(*b) + *b = (*b)[2:] + return v +} + +func (b *readBuf) uint32() uint32 { + v := binary.LittleEndian.Uint32(*b) + *b = (*b)[4:] + return v +} + +func (b *readBuf) uint64() uint64 { + v := binary.LittleEndian.Uint64(*b) + *b = (*b)[8:] + return v +} diff --git a/vendor/github.com/klauspost/compress/zip/reader_test.go b/vendor/github.com/klauspost/compress/zip/reader_test.go new file mode 100644 index 0000000..8f7e8bf --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/reader_test.go @@ -0,0 +1,644 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "testing" + "time" +) + +type ZipTest struct { + Name string + Source func() (r io.ReaderAt, size int64) // if non-nil, used instead of testdata/ file + Comment string + File []ZipTestFile + Error error // the error that Opening this file should return +} + +type ZipTestFile struct { + Name string + Content []byte // if blank, will attempt to compare against File + ContentErr error + File string // name of file to compare to (relative to testdata/) + Mtime string // modified time in format "mm-dd-yy hh:mm:ss" + Mode os.FileMode +} + +// Caution: The Mtime values found for the test files should correspond to +// the values listed with unzip -l . However, the values +// listed by unzip appear to be off by some hours. When creating +// fresh test files and testing them, this issue is not present. +// The test files were created in Sydney, so there might be a time +// zone issue. The time zone information does have to be encoded +// somewhere, because otherwise unzip -l could not provide a different +// time from what the archive/zip package provides, but there appears +// to be no documentation about this. + +var tests = []ZipTest{ + { + Name: "test.zip", + Comment: "This is a zipfile comment.", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte("This is a test text file.\n"), + Mtime: "09-05-10 12:12:02", + Mode: 0644, + }, + { + Name: "gophercolor16x16.png", + File: "gophercolor16x16.png", + Mtime: "09-05-10 15:52:58", + Mode: 0644, + }, + }, + }, + { + Name: "test-trailing-junk.zip", + Comment: "This is a zipfile comment.", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte("This is a test text file.\n"), + Mtime: "09-05-10 12:12:02", + Mode: 0644, + }, + { + Name: "gophercolor16x16.png", + File: "gophercolor16x16.png", + Mtime: "09-05-10 15:52:58", + Mode: 0644, + }, + }, + }, + { + Name: "r.zip", + Source: returnRecursiveZip, + File: []ZipTestFile{ + { + Name: "r/r.zip", + Content: rZipBytes(), + Mtime: "03-04-10 00:24:16", + Mode: 0666, + }, + }, + }, + { + Name: "symlink.zip", + File: []ZipTestFile{ + { + Name: "symlink", + Content: []byte("../target"), + Mode: 0777 | os.ModeSymlink, + }, + }, + }, + { + Name: "readme.zip", + }, + { + Name: "readme.notzip", + Error: ErrFormat, + }, + { + Name: "dd.zip", + File: []ZipTestFile{ + { + Name: "filename", + Content: []byte("This is a test textfile.\n"), + Mtime: "02-02-11 13:06:20", + Mode: 0666, + }, + }, + }, + { + // created in windows XP file manager. + Name: "winxp.zip", + File: crossPlatform, + }, + { + // created by Zip 3.0 under Linux + Name: "unix.zip", + File: crossPlatform, + }, + { + // created by Go, before we wrote the "optional" data + // descriptor signatures (which are required by OS X) + Name: "go-no-datadesc-sig.zip", + File: []ZipTestFile{ + { + Name: "foo.txt", + Content: []byte("foo\n"), + Mtime: "03-08-12 16:59:10", + Mode: 0644, + }, + { + Name: "bar.txt", + Content: []byte("bar\n"), + Mtime: "03-08-12 16:59:12", + Mode: 0644, + }, + }, + }, + { + // created by Go, after we wrote the "optional" data + // descriptor signatures (which are required by OS X) + Name: "go-with-datadesc-sig.zip", + File: []ZipTestFile{ + { + Name: "foo.txt", + Content: []byte("foo\n"), + Mode: 0666, + }, + { + Name: "bar.txt", + Content: []byte("bar\n"), + Mode: 0666, + }, + }, + }, + { + Name: "Bad-CRC32-in-data-descriptor", + Source: returnCorruptCRC32Zip, + File: []ZipTestFile{ + { + Name: "foo.txt", + Content: []byte("foo\n"), + Mode: 0666, + ContentErr: ErrChecksum, + }, + { + Name: "bar.txt", + Content: []byte("bar\n"), + Mode: 0666, + }, + }, + }, + // Tests that we verify (and accept valid) crc32s on files + // with crc32s in their file header (not in data descriptors) + { + Name: "crc32-not-streamed.zip", + File: []ZipTestFile{ + { + Name: "foo.txt", + Content: []byte("foo\n"), + Mtime: "03-08-12 16:59:10", + Mode: 0644, + }, + { + Name: "bar.txt", + Content: []byte("bar\n"), + Mtime: "03-08-12 16:59:12", + Mode: 0644, + }, + }, + }, + // Tests that we verify (and reject invalid) crc32s on files + // with crc32s in their file header (not in data descriptors) + { + Name: "crc32-not-streamed.zip", + Source: returnCorruptNotStreamedZip, + File: []ZipTestFile{ + { + Name: "foo.txt", + Content: []byte("foo\n"), + Mtime: "03-08-12 16:59:10", + Mode: 0644, + ContentErr: ErrChecksum, + }, + { + Name: "bar.txt", + Content: []byte("bar\n"), + Mtime: "03-08-12 16:59:12", + Mode: 0644, + }, + }, + }, + { + Name: "zip64.zip", + File: []ZipTestFile{ + { + Name: "README", + Content: []byte("This small file is in ZIP64 format.\n"), + Mtime: "08-10-12 14:33:32", + Mode: 0644, + }, + }, + }, + // Another zip64 file with different Extras fields. (golang.org/issue/7069) + { + Name: "zip64-2.zip", + File: []ZipTestFile{ + { + Name: "README", + Content: []byte("This small file is in ZIP64 format.\n"), + Mtime: "08-10-12 14:33:32", + Mode: 0644, + }, + }, + }, +} + +var crossPlatform = []ZipTestFile{ + { + Name: "hello", + Content: []byte("world \r\n"), + Mode: 0666, + }, + { + Name: "dir/bar", + Content: []byte("foo \r\n"), + Mode: 0666, + }, + { + Name: "dir/empty/", + Content: []byte{}, + Mode: os.ModeDir | 0777, + }, + { + Name: "readonly", + Content: []byte("important \r\n"), + Mode: 0444, + }, +} + +func TestReader(t *testing.T) { + for _, zt := range tests { + readTestZip(t, zt) + } +} + +func readTestZip(t *testing.T, zt ZipTest) { + var z *Reader + var err error + if zt.Source != nil { + rat, size := zt.Source() + z, err = NewReader(rat, size) + } else { + var rc *ReadCloser + rc, err = OpenReader(filepath.Join("testdata", zt.Name)) + if err == nil { + defer rc.Close() + z = &rc.Reader + } + } + if err != zt.Error { + t.Errorf("%s: error=%v, want %v", zt.Name, err, zt.Error) + return + } + + // bail if file is not zip + if err == ErrFormat { + return + } + + // bail here if no Files expected to be tested + // (there may actually be files in the zip, but we don't care) + if zt.File == nil { + return + } + + if z.Comment != zt.Comment { + t.Errorf("%s: comment=%q, want %q", zt.Name, z.Comment, zt.Comment) + } + if len(z.File) != len(zt.File) { + t.Fatalf("%s: file count=%d, want %d", zt.Name, len(z.File), len(zt.File)) + } + + // test read of each file + for i, ft := range zt.File { + readTestFile(t, zt, ft, z.File[i]) + } + + // test simultaneous reads + n := 0 + done := make(chan bool) + for i := 0; i < 5; i++ { + for j, ft := range zt.File { + go func(j int, ft ZipTestFile) { + readTestFile(t, zt, ft, z.File[j]) + done <- true + }(j, ft) + n++ + } + } + for ; n > 0; n-- { + <-done + } +} + +func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) { + if f.Name != ft.Name { + t.Errorf("%s: name=%q, want %q", zt.Name, f.Name, ft.Name) + } + + if ft.Mtime != "" { + mtime, err := time.Parse("01-02-06 15:04:05", ft.Mtime) + if err != nil { + t.Error(err) + return + } + if ft := f.ModTime(); !ft.Equal(mtime) { + t.Errorf("%s: %s: mtime=%s, want %s", zt.Name, f.Name, ft, mtime) + } + } + + testFileMode(t, zt.Name, f, ft.Mode) + + var b bytes.Buffer + r, err := f.Open() + if err != nil { + t.Errorf("%s: %v", zt.Name, err) + return + } + + _, err = io.Copy(&b, r) + if err != ft.ContentErr { + t.Errorf("%s: copying contents: %v (want %v)", zt.Name, err, ft.ContentErr) + } + if err != nil { + return + } + r.Close() + + size := uint64(f.UncompressedSize) + if size == uint32max { + size = f.UncompressedSize64 + } + if g := uint64(b.Len()); g != size { + t.Errorf("%v: read %v bytes but f.UncompressedSize == %v", f.Name, g, size) + } + + var c []byte + if ft.Content != nil { + c = ft.Content + } else if c, err = ioutil.ReadFile("testdata/" + ft.File); err != nil { + t.Error(err) + return + } + + if b.Len() != len(c) { + t.Errorf("%s: len=%d, want %d", f.Name, b.Len(), len(c)) + return + } + + for i, b := range b.Bytes() { + if b != c[i] { + t.Errorf("%s: content[%d]=%q want %q", f.Name, i, b, c[i]) + return + } + } +} + +func testFileMode(t *testing.T, zipName string, f *File, want os.FileMode) { + mode := f.Mode() + if want == 0 { + t.Errorf("%s: %s mode: got %v, want none", zipName, f.Name, mode) + } else if mode != want { + t.Errorf("%s: %s mode: want %v, got %v", zipName, f.Name, want, mode) + } +} + +func TestInvalidFiles(t *testing.T) { + const size = 1024 * 70 // 70kb + b := make([]byte, size) + + // zeroes + _, err := NewReader(bytes.NewReader(b), size) + if err != ErrFormat { + t.Errorf("zeroes: error=%v, want %v", err, ErrFormat) + } + + // repeated directoryEndSignatures + sig := make([]byte, 4) + binary.LittleEndian.PutUint32(sig, directoryEndSignature) + for i := 0; i < size-4; i += 4 { + copy(b[i:i+4], sig) + } + _, err = NewReader(bytes.NewReader(b), size) + if err != ErrFormat { + t.Errorf("sigs: error=%v, want %v", err, ErrFormat) + } +} + +func messWith(fileName string, corrupter func(b []byte)) (r io.ReaderAt, size int64) { + data, err := ioutil.ReadFile(filepath.Join("testdata", fileName)) + if err != nil { + panic("Error reading " + fileName + ": " + err.Error()) + } + corrupter(data) + return bytes.NewReader(data), int64(len(data)) +} + +func returnCorruptCRC32Zip() (r io.ReaderAt, size int64) { + return messWith("go-with-datadesc-sig.zip", func(b []byte) { + // Corrupt one of the CRC32s in the data descriptor: + b[0x2d]++ + }) +} + +func returnCorruptNotStreamedZip() (r io.ReaderAt, size int64) { + return messWith("crc32-not-streamed.zip", func(b []byte) { + // Corrupt foo.txt's final crc32 byte, in both + // the file header and TOC. (0x7e -> 0x7f) + b[0x11]++ + b[0x9d]++ + + // TODO(bradfitz): add a new test that only corrupts + // one of these values, and verify that that's also an + // error. Currently, the reader code doesn't verify the + // fileheader and TOC's crc32 match if they're both + // non-zero and only the second line above, the TOC, + // is what matters. + }) +} + +// rZipBytes returns the bytes of a recursive zip file, without +// putting it on disk and triggering certain virus scanners. +func rZipBytes() []byte { + s := ` +0000000 50 4b 03 04 14 00 00 00 08 00 08 03 64 3c f9 f4 +0000010 89 64 48 01 00 00 b8 01 00 00 07 00 00 00 72 2f +0000020 72 2e 7a 69 70 00 25 00 da ff 50 4b 03 04 14 00 +0000030 00 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00 +0000040 b8 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00 +0000050 2f 00 d0 ff 00 25 00 da ff 50 4b 03 04 14 00 00 +0000060 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00 b8 +0000070 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00 2f +0000080 00 d0 ff c2 54 8e 57 39 00 05 00 fa ff c2 54 8e +0000090 57 39 00 05 00 fa ff 00 05 00 fa ff 00 14 00 eb +00000a0 ff c2 54 8e 57 39 00 05 00 fa ff 00 05 00 fa ff +00000b0 00 14 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42 +00000c0 88 21 c4 00 00 14 00 eb ff 42 88 21 c4 00 00 14 +00000d0 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42 88 21 +00000e0 c4 00 00 00 00 ff ff 00 00 00 ff ff 00 34 00 cb +00000f0 ff 42 88 21 c4 00 00 00 00 ff ff 00 00 00 ff ff +0000100 00 34 00 cb ff 42 e8 21 5e 0f 00 00 00 ff ff 0a +0000110 f0 66 64 12 61 c0 15 dc e8 a0 48 bf 48 af 2a b3 +0000120 20 c0 9b 95 0d c4 67 04 42 53 06 06 06 40 00 06 +0000130 00 f9 ff 6d 01 00 00 00 00 42 e8 21 5e 0f 00 00 +0000140 00 ff ff 0a f0 66 64 12 61 c0 15 dc e8 a0 48 bf +0000150 48 af 2a b3 20 c0 9b 95 0d c4 67 04 42 53 06 06 +0000160 06 40 00 06 00 f9 ff 6d 01 00 00 00 00 50 4b 01 +0000170 02 14 00 14 00 00 00 08 00 08 03 64 3c f9 f4 89 +0000180 64 48 01 00 00 b8 01 00 00 07 00 00 00 00 00 00 +0000190 00 00 00 00 00 00 00 00 00 00 00 72 2f 72 2e 7a +00001a0 69 70 50 4b 05 06 00 00 00 00 01 00 01 00 35 00 +00001b0 00 00 6d 01 00 00 00 00` + s = regexp.MustCompile(`[0-9a-f]{7}`).ReplaceAllString(s, "") + s = regexp.MustCompile(`\s+`).ReplaceAllString(s, "") + b, err := hex.DecodeString(s) + if err != nil { + panic(err) + } + return b +} + +func returnRecursiveZip() (r io.ReaderAt, size int64) { + b := rZipBytes() + return bytes.NewReader(b), int64(len(b)) +} + +func TestIssue8186(t *testing.T) { + // Directory headers & data found in the TOC of a JAR file. + dirEnts := []string{ + "PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\xaa\x1b\x06\xf0\x81\x02\x00\x00\x81\x02\x00\x00-\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00res/drawable-xhdpi-v4/ic_actionbar_accept.png\xfe\xca\x00\x00\x00", + "PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\x90K\x89\xc7t\n\x00\x00t\n\x00\x00\x0e\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x02\x00\x00resources.arsc\x00\x00\x00", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xff$\x18\xed3\x03\x00\x00\xb4\b\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00t\r\x00\x00AndroidManifest.xml", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\x14\xc5K\xab\x192\x02\x00\xc8\xcd\x04\x00\v\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\x10\x00\x00classes.dex", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?E\x96\nD\xac\x01\x00\x00P\x03\x00\x00&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:C\x02\x00res/layout/actionbar_set_wallpaper.xml", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?Ļ\x14\xe3\xd8\x01\x00\x00\xd8\x03\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:E\x02\x00res/layout/wallpaper_cropper.xml", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?}\xc1\x15\x9eZ\x01\x00\x00!\x02\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`G\x02\x00META-INF/MANIFEST.MF", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xe6\x98Ьo\x01\x00\x00\x84\x02\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfcH\x02\x00META-INF/CERT.SF", + "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xbfP\x96b\x86\x04\x00\x00\xb2\x06\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa9J\x02\x00META-INF/CERT.RSA", + } + for i, s := range dirEnts { + var f File + err := readDirectoryHeader(&f, strings.NewReader(s)) + if err != nil { + t.Errorf("error reading #%d: %v", i, err) + } + } +} + +// Verify we return ErrUnexpectedEOF when length is short. +func TestIssue10957(t *testing.T) { + data := []byte("PK\x03\x040000000PK\x01\x0200000" + + "0000000000000000000\x00" + + "\x00\x00\x00\x00\x00000000000000PK\x01" + + "\x020000000000000000000" + + "00000\v\x00\x00\x00\x00\x00000000000" + + "00000000000000PK\x01\x0200" + + "00000000000000000000" + + "00\v\x00\x00\x00\x00\x00000000000000" + + "00000000000PK\x01\x020000<" + + "0\x00\x0000000000000000\v\x00\v" + + "\x00\x00\x00\x00\x0000000000\x00\x00\x00\x00000" + + "00000000PK\x01\x0200000000" + + "0000000000000000\v\x00\x00\x00" + + "\x00\x0000PK\x05\x06000000\x05\x000000" + + "\v\x00\x00\x00\x00\x00") + z, err := NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + t.Fatal(err) + } + for i, f := range z.File { + r, err := f.Open() + if err != nil { + continue + } + if f.UncompressedSize64 < 1e6 { + n, err := io.Copy(ioutil.Discard, r) + if i == 3 && err != io.ErrUnexpectedEOF { + t.Errorf("File[3] error = %v; want io.ErrUnexpectedEOF", err) + } + if err == nil && uint64(n) != f.UncompressedSize64 { + t.Errorf("file %d: bad size: copied=%d; want=%d", i, n, f.UncompressedSize64) + } + } + r.Close() + } +} + +// Verify the number of files is sane. +func TestIssue10956(t *testing.T) { + data := []byte("PK\x06\x06PK\x06\a0000\x00\x00\x00\x00\x00\x00\x00\x00" + + "0000PK\x05\x06000000000000" + + "0000\v\x00000\x00\x00\x00\x00\x00\x00\x000") + _, err := NewReader(bytes.NewReader(data), int64(len(data))) + const want = "TOC declares impossible 3472328296227680304 files in 57 byte" + if err == nil && !strings.Contains(err.Error(), want) { + t.Errorf("error = %v; want %q", err, want) + } +} + +// Verify we return ErrUnexpectedEOF when reading truncated data descriptor. +func TestIssue11146(t *testing.T) { + data := []byte("PK\x03\x040000000000000000" + + "000000\x01\x00\x00\x000\x01\x00\x00\xff\xff0000" + + "0000000000000000PK\x01\x02" + + "0000\b0\b\x00000000000000" + + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000000PK\x05\x06\x00\x00" + + "\x00\x0000\x01\x0000008\x00\x00\x00\x00\x00") + z, err := NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + t.Fatal(err) + } + r, err := z.File[0].Open() + if err != nil { + t.Fatal(err) + } + _, err = ioutil.ReadAll(r) + if err != io.ErrUnexpectedEOF { + t.Errorf("File[0] error = %v; want io.ErrUnexpectedEOF", err) + } + r.Close() +} + +// Verify we do not treat non-zip64 archives as zip64 +func TestIssue12449(t *testing.T) { + data := []byte{ + 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00, + 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0x00, 0x18, 0x00, 0xca, 0x64, + 0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05, + 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, + 0x00, 0x31, 0x31, 0x31, 0x32, 0x32, 0x32, 0x0a, + 0x50, 0x4b, 0x07, 0x08, 0x1d, 0x88, 0x77, 0xb0, + 0x07, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, + 0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x14, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46, + 0x1d, 0x88, 0x77, 0xb0, 0x07, 0x00, 0x00, 0x00, + 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x18, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xa0, 0x81, 0x00, 0x00, 0x00, 0x00, 0xca, 0x64, + 0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05, + 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, + 0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00, + 0x00, 0x97, 0x2b, 0x49, 0x23, 0x05, 0xc5, 0x0b, + 0xa7, 0xd1, 0x52, 0xa2, 0x9c, 0x50, 0x4b, 0x06, + 0x07, 0xc8, 0x19, 0xc1, 0xaf, 0x94, 0x9c, 0x61, + 0x44, 0xbe, 0x94, 0x19, 0x42, 0x58, 0x12, 0xc6, + 0x5b, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x01, 0x00, 0x69, 0x00, 0x00, + 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, + } + // Read in the archive. + _, err := NewReader(bytes.NewReader([]byte(data)), int64(len(data))) + if err != nil { + t.Errorf("Error reading the archive: %v", err) + } +} diff --git a/vendor/github.com/klauspost/compress/zip/register.go b/vendor/github.com/klauspost/compress/zip/register.go new file mode 100644 index 0000000..90b582d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/register.go @@ -0,0 +1,111 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "errors" + "io" + "io/ioutil" + "sync" + + "github.com/klauspost/compress/flate" +) + +// A Compressor returns a compressing writer, writing to the +// provided writer. On Close, any pending data should be flushed. +type Compressor func(io.Writer) (io.WriteCloser, error) + +// Decompressor is a function that wraps a Reader with a decompressing Reader. +// The decompressed ReadCloser is returned to callers who open files from +// within the archive. These callers are responsible for closing this reader +// when they're finished reading. +type Decompressor func(io.Reader) io.ReadCloser + +var flateWriterPool sync.Pool + +func newFlateWriter(w io.Writer) io.WriteCloser { + fw, ok := flateWriterPool.Get().(*flate.Writer) + if ok { + fw.Reset(w) + } else { + fw, _ = flate.NewWriter(w, 5) + } + return &pooledFlateWriter{fw: fw} +} + +type pooledFlateWriter struct { + mu sync.Mutex // guards Close and Write + fw *flate.Writer +} + +func (w *pooledFlateWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.fw == nil { + return 0, errors.New("Write after Close") + } + return w.fw.Write(p) +} + +func (w *pooledFlateWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.fw != nil { + err = w.fw.Close() + flateWriterPool.Put(w.fw) + w.fw = nil + } + return err +} + +var ( + mu sync.RWMutex // guards compressor and decompressor maps + + compressors = map[uint16]Compressor{ + Store: func(w io.Writer) (io.WriteCloser, error) { return &nopCloser{w}, nil }, + Deflate: func(w io.Writer) (io.WriteCloser, error) { return newFlateWriter(w), nil }, + } + + decompressors = map[uint16]Decompressor{ + Store: ioutil.NopCloser, + Deflate: flate.NewReader, + } +) + +// RegisterDecompressor allows custom decompressors for a specified method ID. +func RegisterDecompressor(method uint16, d Decompressor) { + mu.Lock() + defer mu.Unlock() + + if _, ok := decompressors[method]; ok { + panic("decompressor already registered") + } + decompressors[method] = d +} + +// RegisterCompressor registers custom compressors for a specified method ID. +// The common methods Store and Deflate are built in. +func RegisterCompressor(method uint16, comp Compressor) { + mu.Lock() + defer mu.Unlock() + + if _, ok := compressors[method]; ok { + panic("compressor already registered") + } + compressors[method] = comp +} + +func compressor(method uint16) Compressor { + mu.RLock() + defer mu.RUnlock() + return compressors[method] +} + +func decompressor(method uint16) Decompressor { + mu.RLock() + defer mu.RUnlock() + return decompressors[method] +} diff --git a/vendor/github.com/klauspost/compress/zip/struct.go b/vendor/github.com/klauspost/compress/zip/struct.go new file mode 100644 index 0000000..5ee4f88 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/struct.go @@ -0,0 +1,313 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package zip provides support for reading and writing ZIP archives. + +See: http://www.pkware.com/documents/casestudies/APPNOTE.TXT + +This package does not support disk spanning. + +A note about ZIP64: + +To be backwards compatible the FileHeader has both 32 and 64 bit Size +fields. The 64 bit fields will always contain the correct value and +for normal archives both fields will be the same. For files requiring +the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit +fields must be used instead. +*/ +package zip + +import ( + "os" + "path" + "time" +) + +// Compression methods. +const ( + Store uint16 = 0 + Deflate uint16 = 8 +) + +const ( + fileHeaderSignature = 0x04034b50 + directoryHeaderSignature = 0x02014b50 + directoryEndSignature = 0x06054b50 + directory64LocSignature = 0x07064b50 + directory64EndSignature = 0x06064b50 + dataDescriptorSignature = 0x08074b50 // de-facto standard; required by OS X Finder + fileHeaderLen = 30 // + filename + extra + directoryHeaderLen = 46 // + filename + extra + comment + directoryEndLen = 22 // + comment + dataDescriptorLen = 16 // four uint32: descriptor signature, crc32, compressed size, size + dataDescriptor64Len = 24 // descriptor with 8 byte sizes + directory64LocLen = 20 // + directory64EndLen = 56 // + extra + + // Constants for the first byte in CreatorVersion + creatorFAT = 0 + creatorUnix = 3 + creatorNTFS = 11 + creatorVFAT = 14 + creatorMacOSX = 19 + + // version numbers + zipVersion20 = 20 // 2.0 + zipVersion45 = 45 // 4.5 (reads and writes zip64 archives) + + // limits for non zip64 files + uint16max = (1 << 16) - 1 + uint32max = (1 << 32) - 1 + + // extra header id's + zip64ExtraId = 0x0001 // zip64 Extended Information Extra Field +) + +// FileHeader describes a file within a zip file. +// See the zip spec for details. +type FileHeader struct { + // Name is the name of the file. + // It must be a relative path: it must not start with a drive + // letter (e.g. C:) or leading slash, and only forward slashes + // are allowed. + Name string + + CreatorVersion uint16 + ReaderVersion uint16 + Flags uint16 + Method uint16 + ModifiedTime uint16 // MS-DOS time + ModifiedDate uint16 // MS-DOS date + CRC32 uint32 + CompressedSize uint32 // Deprecated: Use CompressedSize64 instead. + UncompressedSize uint32 // Deprecated: Use UncompressedSize64 instead. + CompressedSize64 uint64 + UncompressedSize64 uint64 + Extra []byte + ExternalAttrs uint32 // Meaning depends on CreatorVersion + Comment string +} + +// FileInfo returns an os.FileInfo for the FileHeader. +func (h *FileHeader) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + fh *FileHeader +} + +func (fi headerFileInfo) Name() string { return path.Base(fi.fh.Name) } +func (fi headerFileInfo) Size() int64 { + if fi.fh.UncompressedSize64 > 0 { + return int64(fi.fh.UncompressedSize64) + } + return int64(fi.fh.UncompressedSize) +} +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.fh.ModTime() } +func (fi headerFileInfo) Mode() os.FileMode { return fi.fh.Mode() } +func (fi headerFileInfo) Sys() interface{} { return fi.fh } + +// FileInfoHeader creates a partially-populated FileHeader from an +// os.FileInfo. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo) (*FileHeader, error) { + size := fi.Size() + fh := &FileHeader{ + Name: fi.Name(), + UncompressedSize64: uint64(size), + } + fh.SetModTime(fi.ModTime()) + fh.SetMode(fi.Mode()) + if fh.UncompressedSize64 > uint32max { + fh.UncompressedSize = uint32max + } else { + fh.UncompressedSize = uint32(fh.UncompressedSize64) + } + return fh, nil +} + +type directoryEnd struct { + diskNbr uint32 // unused + dirDiskNbr uint32 // unused + dirRecordsThisDisk uint64 // unused + directoryRecords uint64 + directorySize uint64 + directoryOffset uint64 // relative to file + commentLen uint16 + comment string +} + +// msDosTimeToTime converts an MS-DOS date and time into a time.Time. +// The resolution is 2s. +// See: http://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx +func msDosTimeToTime(dosDate, dosTime uint16) time.Time { + return time.Date( + // date bits 0-4: day of month; 5-8: month; 9-15: years since 1980 + int(dosDate>>9+1980), + time.Month(dosDate>>5&0xf), + int(dosDate&0x1f), + + // time bits 0-4: second/2; 5-10: minute; 11-15: hour + int(dosTime>>11), + int(dosTime>>5&0x3f), + int(dosTime&0x1f*2), + 0, // nanoseconds + + time.UTC, + ) +} + +// timeToMsDosTime converts a time.Time to an MS-DOS date and time. +// The resolution is 2s. +// See: http://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx +func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) { + t = t.In(time.UTC) + fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9) + fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11) + return +} + +// ModTime returns the modification time in UTC. +// The resolution is 2s. +func (h *FileHeader) ModTime() time.Time { + return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime) +} + +// SetModTime sets the ModifiedTime and ModifiedDate fields to the given time in UTC. +// The resolution is 2s. +func (h *FileHeader) SetModTime(t time.Time) { + h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t) +} + +const ( + // Unix constants. The specification doesn't mention them, + // but these seem to be the values agreed on by tools. + s_IFMT = 0xf000 + s_IFSOCK = 0xc000 + s_IFLNK = 0xa000 + s_IFREG = 0x8000 + s_IFBLK = 0x6000 + s_IFDIR = 0x4000 + s_IFCHR = 0x2000 + s_IFIFO = 0x1000 + s_ISUID = 0x800 + s_ISGID = 0x400 + s_ISVTX = 0x200 + + msdosDir = 0x10 + msdosReadOnly = 0x01 +) + +// Mode returns the permission and mode bits for the FileHeader. +func (h *FileHeader) Mode() (mode os.FileMode) { + switch h.CreatorVersion >> 8 { + case creatorUnix, creatorMacOSX: + mode = unixModeToFileMode(h.ExternalAttrs >> 16) + case creatorNTFS, creatorVFAT, creatorFAT: + mode = msdosModeToFileMode(h.ExternalAttrs) + } + if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' { + mode |= os.ModeDir + } + return mode +} + +// SetMode changes the permission and mode bits for the FileHeader. +func (h *FileHeader) SetMode(mode os.FileMode) { + h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8 + h.ExternalAttrs = fileModeToUnixMode(mode) << 16 + + // set MSDOS attributes too, as the original zip does. + if mode&os.ModeDir != 0 { + h.ExternalAttrs |= msdosDir + } + if mode&0200 == 0 { + h.ExternalAttrs |= msdosReadOnly + } +} + +// isZip64 reports whether the file size exceeds the 32 bit limit +func (fh *FileHeader) isZip64() bool { + return fh.CompressedSize64 >= uint32max || fh.UncompressedSize64 >= uint32max +} + +func msdosModeToFileMode(m uint32) (mode os.FileMode) { + if m&msdosDir != 0 { + mode = os.ModeDir | 0777 + } else { + mode = 0666 + } + if m&msdosReadOnly != 0 { + mode &^= 0222 + } + return mode +} + +func fileModeToUnixMode(mode os.FileMode) uint32 { + var m uint32 + switch mode & os.ModeType { + default: + m = s_IFREG + case os.ModeDir: + m = s_IFDIR + case os.ModeSymlink: + m = s_IFLNK + case os.ModeNamedPipe: + m = s_IFIFO + case os.ModeSocket: + m = s_IFSOCK + case os.ModeDevice: + if mode&os.ModeCharDevice != 0 { + m = s_IFCHR + } else { + m = s_IFBLK + } + } + if mode&os.ModeSetuid != 0 { + m |= s_ISUID + } + if mode&os.ModeSetgid != 0 { + m |= s_ISGID + } + if mode&os.ModeSticky != 0 { + m |= s_ISVTX + } + return m | uint32(mode&0777) +} + +func unixModeToFileMode(m uint32) os.FileMode { + mode := os.FileMode(m & 0777) + switch m & s_IFMT { + case s_IFBLK: + mode |= os.ModeDevice + case s_IFCHR: + mode |= os.ModeDevice | os.ModeCharDevice + case s_IFDIR: + mode |= os.ModeDir + case s_IFIFO: + mode |= os.ModeNamedPipe + case s_IFLNK: + mode |= os.ModeSymlink + case s_IFREG: + // nothing to do + case s_IFSOCK: + mode |= os.ModeSocket + } + if m&s_ISGID != 0 { + mode |= os.ModeSetgid + } + if m&s_ISUID != 0 { + mode |= os.ModeSetuid + } + if m&s_ISVTX != 0 { + mode |= os.ModeSticky + } + return mode +} diff --git a/vendor/github.com/klauspost/compress/zip/testdata/crc32-not-streamed.zip b/vendor/github.com/klauspost/compress/zip/testdata/crc32-not-streamed.zip new file mode 100644 index 0000000..f268d88 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/crc32-not-streamed.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/dd.zip b/vendor/github.com/klauspost/compress/zip/testdata/dd.zip new file mode 100644 index 0000000..e53378b Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/dd.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/go-no-datadesc-sig.zip b/vendor/github.com/klauspost/compress/zip/testdata/go-no-datadesc-sig.zip new file mode 100644 index 0000000..c3d593f Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/go-no-datadesc-sig.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/go-with-datadesc-sig.zip b/vendor/github.com/klauspost/compress/zip/testdata/go-with-datadesc-sig.zip new file mode 100644 index 0000000..bcfe121 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/go-with-datadesc-sig.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/gophercolor16x16.png b/vendor/github.com/klauspost/compress/zip/testdata/gophercolor16x16.png new file mode 100644 index 0000000..48854ff Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/gophercolor16x16.png differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/readme.notzip b/vendor/github.com/klauspost/compress/zip/testdata/readme.notzip new file mode 100644 index 0000000..06668c4 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/readme.notzip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/readme.zip b/vendor/github.com/klauspost/compress/zip/testdata/readme.zip new file mode 100644 index 0000000..db3bb90 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/readme.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/symlink.zip b/vendor/github.com/klauspost/compress/zip/testdata/symlink.zip new file mode 100644 index 0000000..af84693 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/symlink.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/test-trailing-junk.zip b/vendor/github.com/klauspost/compress/zip/testdata/test-trailing-junk.zip new file mode 100644 index 0000000..42281b4 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/test-trailing-junk.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/test.zip b/vendor/github.com/klauspost/compress/zip/testdata/test.zip new file mode 100644 index 0000000..03890c0 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/test.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/unix.zip b/vendor/github.com/klauspost/compress/zip/testdata/unix.zip new file mode 100644 index 0000000..ce1a981 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/unix.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/winxp.zip b/vendor/github.com/klauspost/compress/zip/testdata/winxp.zip new file mode 100644 index 0000000..3919322 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/winxp.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/zip64-2.zip b/vendor/github.com/klauspost/compress/zip/testdata/zip64-2.zip new file mode 100644 index 0000000..f844e35 Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/zip64-2.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/testdata/zip64.zip b/vendor/github.com/klauspost/compress/zip/testdata/zip64.zip new file mode 100644 index 0000000..a2ee1fa Binary files /dev/null and b/vendor/github.com/klauspost/compress/zip/testdata/zip64.zip differ diff --git a/vendor/github.com/klauspost/compress/zip/writer.go b/vendor/github.com/klauspost/compress/zip/writer.go new file mode 100644 index 0000000..5843958 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/writer.go @@ -0,0 +1,393 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "bufio" + "encoding/binary" + "errors" + "hash" + "io" + + "github.com/klauspost/crc32" +) + +// TODO(adg): support zip file comments + +// Writer implements a zip file writer. +type Writer struct { + cw *countWriter + dir []*header + last *fileWriter + closed bool + compressors map[uint16]Compressor +} + +type header struct { + *FileHeader + offset uint64 +} + +// NewWriter returns a new Writer writing a zip file to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}} +} + +// SetOffset sets the offset of the beginning of the zip data within the +// underlying writer. It should be used when the zip data is appended to an +// existing file, such as a binary executable. +// It must be called before any data is written. +func (w *Writer) SetOffset(n int64) { + if w.cw.count != 0 { + panic("zip: SetOffset called after data was written") + } + w.cw.count = n +} + +// Flush flushes any buffered data to the underlying writer. +// Calling Flush is not normally necessary; calling Close is sufficient. +func (w *Writer) Flush() error { + return w.cw.w.(*bufio.Writer).Flush() +} + +// Close finishes writing the zip file by writing the central directory. +// It does not (and can not) close the underlying writer. +func (w *Writer) Close() error { + if w.last != nil && !w.last.closed { + if err := w.last.close(); err != nil { + return err + } + w.last = nil + } + if w.closed { + return errors.New("zip: writer closed twice") + } + w.closed = true + + // write central directory + start := w.cw.count + for _, h := range w.dir { + var buf [directoryHeaderLen]byte + b := writeBuf(buf[:]) + b.uint32(uint32(directoryHeaderSignature)) + b.uint16(h.CreatorVersion) + b.uint16(h.ReaderVersion) + b.uint16(h.Flags) + b.uint16(h.Method) + b.uint16(h.ModifiedTime) + b.uint16(h.ModifiedDate) + b.uint32(h.CRC32) + if h.isZip64() || h.offset >= uint32max { + // the file needs a zip64 header. store maxint in both + // 32 bit size fields (and offset later) to signal that the + // zip64 extra header should be used. + b.uint32(uint32max) // compressed size + b.uint32(uint32max) // uncompressed size + + // append a zip64 extra block to Extra + var buf [28]byte // 2x uint16 + 3x uint64 + eb := writeBuf(buf[:]) + eb.uint16(zip64ExtraId) + eb.uint16(24) // size = 3x uint64 + eb.uint64(h.UncompressedSize64) + eb.uint64(h.CompressedSize64) + eb.uint64(h.offset) + h.Extra = append(h.Extra, buf[:]...) + } else { + b.uint32(h.CompressedSize) + b.uint32(h.UncompressedSize) + } + b.uint16(uint16(len(h.Name))) + b.uint16(uint16(len(h.Extra))) + b.uint16(uint16(len(h.Comment))) + b = b[4:] // skip disk number start and internal file attr (2x uint16) + b.uint32(h.ExternalAttrs) + if h.offset > uint32max { + b.uint32(uint32max) + } else { + b.uint32(uint32(h.offset)) + } + if _, err := w.cw.Write(buf[:]); err != nil { + return err + } + if _, err := io.WriteString(w.cw, h.Name); err != nil { + return err + } + if _, err := w.cw.Write(h.Extra); err != nil { + return err + } + if _, err := io.WriteString(w.cw, h.Comment); err != nil { + return err + } + } + end := w.cw.count + + records := uint64(len(w.dir)) + size := uint64(end - start) + offset := uint64(start) + + if records > uint16max || size > uint32max || offset > uint32max { + var buf [directory64EndLen + directory64LocLen]byte + b := writeBuf(buf[:]) + + // zip64 end of central directory record + b.uint32(directory64EndSignature) + b.uint64(directory64EndLen - 12) // length minus signature (uint32) and length fields (uint64) + b.uint16(zipVersion45) // version made by + b.uint16(zipVersion45) // version needed to extract + b.uint32(0) // number of this disk + b.uint32(0) // number of the disk with the start of the central directory + b.uint64(records) // total number of entries in the central directory on this disk + b.uint64(records) // total number of entries in the central directory + b.uint64(size) // size of the central directory + b.uint64(offset) // offset of start of central directory with respect to the starting disk number + + // zip64 end of central directory locator + b.uint32(directory64LocSignature) + b.uint32(0) // number of the disk with the start of the zip64 end of central directory + b.uint64(uint64(end)) // relative offset of the zip64 end of central directory record + b.uint32(1) // total number of disks + + if _, err := w.cw.Write(buf[:]); err != nil { + return err + } + + // store max values in the regular end record to signal that + // that the zip64 values should be used instead + records = uint16max + size = uint32max + offset = uint32max + } + + // write end record + var buf [directoryEndLen]byte + b := writeBuf(buf[:]) + b.uint32(uint32(directoryEndSignature)) + b = b[4:] // skip over disk number and first disk number (2x uint16) + b.uint16(uint16(records)) // number of entries this disk + b.uint16(uint16(records)) // number of entries total + b.uint32(uint32(size)) // size of directory + b.uint32(uint32(offset)) // start of directory + // skipped size of comment (always zero) + if _, err := w.cw.Write(buf[:]); err != nil { + return err + } + + return w.cw.w.(*bufio.Writer).Flush() +} + +// Create adds a file to the zip file using the provided name. +// It returns a Writer to which the file contents should be written. +// The name must be a relative path: it must not start with a drive +// letter (e.g. C:) or leading slash, and only forward slashes are +// allowed. +// The file's contents must be written to the io.Writer before the next +// call to Create, CreateHeader, or Close. +func (w *Writer) Create(name string) (io.Writer, error) { + header := &FileHeader{ + Name: name, + Method: Deflate, + } + return w.CreateHeader(header) +} + +// CreateHeader adds a file to the zip file using the provided FileHeader +// for the file metadata. +// It returns a Writer to which the file contents should be written. +// +// The file's contents must be written to the io.Writer before the next +// call to Create, CreateHeader, or Close. The provided FileHeader fh +// must not be modified after a call to CreateHeader. +func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { + if w.last != nil && !w.last.closed { + if err := w.last.close(); err != nil { + return nil, err + } + } + if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh { + // See https://golang.org/issue/11144 confusion. + return nil, errors.New("archive/zip: invalid duplicate FileHeader") + } + + fh.Flags |= 0x8 // we will write a data descriptor + + fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte + fh.ReaderVersion = zipVersion20 + + fw := &fileWriter{ + zipw: w.cw, + compCount: &countWriter{w: w.cw}, + crc32: crc32.NewIEEE(), + } + comp := w.compressor(fh.Method) + if comp == nil { + return nil, ErrAlgorithm + } + var err error + fw.comp, err = comp(fw.compCount) + if err != nil { + return nil, err + } + fw.rawCount = &countWriter{w: fw.comp} + + h := &header{ + FileHeader: fh, + offset: uint64(w.cw.count), + } + w.dir = append(w.dir, h) + fw.header = h + + if err := writeHeader(w.cw, fh); err != nil { + return nil, err + } + + w.last = fw + return fw, nil +} + +func writeHeader(w io.Writer, h *FileHeader) error { + var buf [fileHeaderLen]byte + b := writeBuf(buf[:]) + b.uint32(uint32(fileHeaderSignature)) + b.uint16(h.ReaderVersion) + b.uint16(h.Flags) + b.uint16(h.Method) + b.uint16(h.ModifiedTime) + b.uint16(h.ModifiedDate) + b.uint32(0) // since we are writing a data descriptor crc32, + b.uint32(0) // compressed size, + b.uint32(0) // and uncompressed size should be zero + b.uint16(uint16(len(h.Name))) + b.uint16(uint16(len(h.Extra))) + if _, err := w.Write(buf[:]); err != nil { + return err + } + if _, err := io.WriteString(w, h.Name); err != nil { + return err + } + _, err := w.Write(h.Extra) + return err +} + +// RegisterCompressor registers or overrides a custom compressor for a specific +// method ID. If a compressor for a given method is not found, Writer will +// default to looking up the compressor at the package level. +func (w *Writer) RegisterCompressor(method uint16, comp Compressor) { + if w.compressors == nil { + w.compressors = make(map[uint16]Compressor) + } + w.compressors[method] = comp +} + +func (w *Writer) compressor(method uint16) Compressor { + comp := w.compressors[method] + if comp == nil { + comp = compressor(method) + } + return comp +} + +type fileWriter struct { + *header + zipw io.Writer + rawCount *countWriter + comp io.WriteCloser + compCount *countWriter + crc32 hash.Hash32 + closed bool +} + +func (w *fileWriter) Write(p []byte) (int, error) { + if w.closed { + return 0, errors.New("zip: write to closed file") + } + w.crc32.Write(p) + return w.rawCount.Write(p) +} + +func (w *fileWriter) close() error { + if w.closed { + return errors.New("zip: file closed twice") + } + w.closed = true + if err := w.comp.Close(); err != nil { + return err + } + + // update FileHeader + fh := w.header.FileHeader + fh.CRC32 = w.crc32.Sum32() + fh.CompressedSize64 = uint64(w.compCount.count) + fh.UncompressedSize64 = uint64(w.rawCount.count) + + if fh.isZip64() { + fh.CompressedSize = uint32max + fh.UncompressedSize = uint32max + fh.ReaderVersion = zipVersion45 // requires 4.5 - File uses ZIP64 format extensions + } else { + fh.CompressedSize = uint32(fh.CompressedSize64) + fh.UncompressedSize = uint32(fh.UncompressedSize64) + } + + // Write data descriptor. This is more complicated than one would + // think, see e.g. comments in zipfile.c:putextended() and + // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588. + // The approach here is to write 8 byte sizes if needed without + // adding a zip64 extra in the local header (too late anyway). + var buf []byte + if fh.isZip64() { + buf = make([]byte, dataDescriptor64Len) + } else { + buf = make([]byte, dataDescriptorLen) + } + b := writeBuf(buf) + b.uint32(dataDescriptorSignature) // de-facto standard, required by OS X + b.uint32(fh.CRC32) + if fh.isZip64() { + b.uint64(fh.CompressedSize64) + b.uint64(fh.UncompressedSize64) + } else { + b.uint32(fh.CompressedSize) + b.uint32(fh.UncompressedSize) + } + _, err := w.zipw.Write(buf) + return err +} + +type countWriter struct { + w io.Writer + count int64 +} + +func (w *countWriter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.count += int64(n) + return n, err +} + +type nopCloser struct { + io.Writer +} + +func (w nopCloser) Close() error { + return nil +} + +type writeBuf []byte + +func (b *writeBuf) uint16(v uint16) { + binary.LittleEndian.PutUint16(*b, v) + *b = (*b)[2:] +} + +func (b *writeBuf) uint32(v uint32) { + binary.LittleEndian.PutUint32(*b, v) + *b = (*b)[4:] +} + +func (b *writeBuf) uint64(v uint64) { + binary.LittleEndian.PutUint64(*b, v) + *b = (*b)[8:] +} diff --git a/vendor/github.com/klauspost/compress/zip/writer_test.go b/vendor/github.com/klauspost/compress/zip/writer_test.go new file mode 100644 index 0000000..01b63f2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/writer_test.go @@ -0,0 +1,199 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zip + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "os" + "testing" +) + +// TODO(adg): a more sophisticated test suite + +type WriteTest struct { + Name string + Data []byte + Method uint16 + Mode os.FileMode +} + +var writeTests = []WriteTest{ + { + Name: "foo", + Data: []byte("Rabbits, guinea pigs, gophers, marsupial rats, and quolls."), + Method: Store, + Mode: 0666, + }, + { + Name: "bar", + Data: nil, // large data set in the test + Method: Deflate, + Mode: 0644, + }, + { + Name: "setuid", + Data: []byte("setuid file"), + Method: Deflate, + Mode: 0755 | os.ModeSetuid, + }, + { + Name: "setgid", + Data: []byte("setgid file"), + Method: Deflate, + Mode: 0755 | os.ModeSetgid, + }, + { + Name: "symlink", + Data: []byte("../link/target"), + Method: Deflate, + Mode: 0755 | os.ModeSymlink, + }, +} + +func TestWriter(t *testing.T) { + largeData := make([]byte, 1<<17) + for i := range largeData { + largeData[i] = byte(rand.Int()) + } + writeTests[1].Data = largeData + defer func() { + writeTests[1].Data = nil + }() + + // write a zip file + buf := new(bytes.Buffer) + w := NewWriter(buf) + + for _, wt := range writeTests { + testCreate(t, w, &wt) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // read it back + r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + if err != nil { + t.Fatal(err) + } + for i, wt := range writeTests { + testReadFile(t, r.File[i], &wt) + } +} + +func TestWriterOffset(t *testing.T) { + largeData := make([]byte, 1<<17) + for i := range largeData { + largeData[i] = byte(rand.Int()) + } + writeTests[1].Data = largeData + defer func() { + writeTests[1].Data = nil + }() + + // write a zip file + buf := new(bytes.Buffer) + existingData := []byte{1, 2, 3, 1, 2, 3, 1, 2, 3} + n, _ := buf.Write(existingData) + w := NewWriter(buf) + w.SetOffset(int64(n)) + + for _, wt := range writeTests { + testCreate(t, w, &wt) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // read it back + r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + if err != nil { + t.Fatal(err) + } + for i, wt := range writeTests { + testReadFile(t, r.File[i], &wt) + } +} + +func TestWriterFlush(t *testing.T) { + var buf bytes.Buffer + w := NewWriter(struct{ io.Writer }{&buf}) + _, err := w.Create("foo") + if err != nil { + t.Fatal(err) + } + if buf.Len() > 0 { + t.Fatalf("Unexpected %d bytes already in buffer", buf.Len()) + } + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if buf.Len() == 0 { + t.Fatal("No bytes written after Flush") + } +} + +func testCreate(t *testing.T, w *Writer, wt *WriteTest) { + header := &FileHeader{ + Name: wt.Name, + Method: wt.Method, + } + if wt.Mode != 0 { + header.SetMode(wt.Mode) + } + f, err := w.CreateHeader(header) + if err != nil { + t.Fatal(err) + } + _, err = f.Write(wt.Data) + if err != nil { + t.Fatal(err) + } +} + +func testReadFile(t *testing.T, f *File, wt *WriteTest) { + if f.Name != wt.Name { + t.Fatalf("File name: got %q, want %q", f.Name, wt.Name) + } + testFileMode(t, wt.Name, f, wt.Mode) + rc, err := f.Open() + if err != nil { + t.Fatal("opening:", err) + } + b, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatal("reading:", err) + } + err = rc.Close() + if err != nil { + t.Fatal("closing:", err) + } + if !bytes.Equal(b, wt.Data) { + t.Errorf("File contents %q, want %q", b, wt.Data) + } +} + +func BenchmarkCompressedZipGarbage(b *testing.B) { + b.ReportAllocs() + var buf bytes.Buffer + bigBuf := bytes.Repeat([]byte("a"), 1<<20) + for i := 0; i < b.N; i++ { + buf.Reset() + zw := NewWriter(&buf) + for j := 0; j < 3; j++ { + w, _ := zw.CreateHeader(&FileHeader{ + Name: "foo", + Method: Deflate, + }) + w.Write(bigBuf) + } + zw.Close() + } +} diff --git a/vendor/github.com/klauspost/compress/zip/zip_test.go b/vendor/github.com/klauspost/compress/zip/zip_test.go new file mode 100644 index 0000000..681e42c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zip/zip_test.go @@ -0,0 +1,467 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests that involve both reading and writing. + +package zip + +import ( + "bytes" + "fmt" + "hash" + "io" + "io/ioutil" + "sort" + "strings" + "testing" + "time" +) + +func TestOver65kFiles(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriter(buf) + const nFiles = (1 << 16) + 42 + for i := 0; i < nFiles; i++ { + _, err := w.CreateHeader(&FileHeader{ + Name: fmt.Sprintf("%d.dat", i), + Method: Store, // avoid Issue 6136 and Issue 6138 + }) + if err != nil { + t.Fatalf("creating file %d: %v", i, err) + } + } + if err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + s := buf.String() + zr, err := NewReader(strings.NewReader(s), int64(len(s))) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + if got := len(zr.File); got != nFiles { + t.Fatalf("File contains %d files, want %d", got, nFiles) + } + for i := 0; i < nFiles; i++ { + want := fmt.Sprintf("%d.dat", i) + if zr.File[i].Name != want { + t.Fatalf("File(%d) = %q, want %q", i, zr.File[i].Name, want) + } + } +} + +func TestModTime(t *testing.T) { + var testTime = time.Date(2009, time.November, 10, 23, 45, 58, 0, time.UTC) + fh := new(FileHeader) + fh.SetModTime(testTime) + outTime := fh.ModTime() + if !outTime.Equal(testTime) { + t.Errorf("times don't match: got %s, want %s", outTime, testTime) + } +} + +func testHeaderRoundTrip(fh *FileHeader, wantUncompressedSize uint32, wantUncompressedSize64 uint64, t *testing.T) { + fi := fh.FileInfo() + fh2, err := FileInfoHeader(fi) + if err != nil { + t.Fatal(err) + } + if got, want := fh2.Name, fh.Name; got != want { + t.Errorf("Name: got %s, want %s\n", got, want) + } + if got, want := fh2.UncompressedSize, wantUncompressedSize; got != want { + t.Errorf("UncompressedSize: got %d, want %d\n", got, want) + } + if got, want := fh2.UncompressedSize64, wantUncompressedSize64; got != want { + t.Errorf("UncompressedSize64: got %d, want %d\n", got, want) + } + if got, want := fh2.ModifiedTime, fh.ModifiedTime; got != want { + t.Errorf("ModifiedTime: got %d, want %d\n", got, want) + } + if got, want := fh2.ModifiedDate, fh.ModifiedDate; got != want { + t.Errorf("ModifiedDate: got %d, want %d\n", got, want) + } + + if sysfh, ok := fi.Sys().(*FileHeader); !ok && sysfh != fh { + t.Errorf("Sys didn't return original *FileHeader") + } +} + +func TestFileHeaderRoundTrip(t *testing.T) { + fh := &FileHeader{ + Name: "foo.txt", + UncompressedSize: 987654321, + ModifiedTime: 1234, + ModifiedDate: 5678, + } + testHeaderRoundTrip(fh, fh.UncompressedSize, uint64(fh.UncompressedSize), t) +} + +func TestFileHeaderRoundTrip64(t *testing.T) { + fh := &FileHeader{ + Name: "foo.txt", + UncompressedSize64: 9876543210, + ModifiedTime: 1234, + ModifiedDate: 5678, + } + testHeaderRoundTrip(fh, uint32max, fh.UncompressedSize64, t) +} + +type repeatedByte struct { + off int64 + b byte + n int64 +} + +// rleBuffer is a run-length-encoded byte buffer. +// It's an io.Writer (like a bytes.Buffer) and also an io.ReaderAt, +// allowing random-access reads. +type rleBuffer struct { + buf []repeatedByte +} + +func (r *rleBuffer) Size() int64 { + if len(r.buf) == 0 { + return 0 + } + last := &r.buf[len(r.buf)-1] + return last.off + last.n +} + +func (r *rleBuffer) Write(p []byte) (n int, err error) { + var rp *repeatedByte + if len(r.buf) > 0 { + rp = &r.buf[len(r.buf)-1] + // Fast path, if p is entirely the same byte repeated. + if lastByte := rp.b; len(p) > 0 && p[0] == lastByte { + all := true + for _, b := range p { + if b != lastByte { + all = false + break + } + } + if all { + rp.n += int64(len(p)) + return len(p), nil + } + } + } + + for _, b := range p { + if rp == nil || rp.b != b { + r.buf = append(r.buf, repeatedByte{r.Size(), b, 1}) + rp = &r.buf[len(r.buf)-1] + } else { + rp.n++ + } + } + return len(p), nil +} + +func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) { + if len(p) == 0 { + return + } + skipParts := sort.Search(len(r.buf), func(i int) bool { + part := &r.buf[i] + return part.off+part.n > off + }) + parts := r.buf[skipParts:] + if len(parts) > 0 { + skipBytes := off - parts[0].off + for len(parts) > 0 { + part := parts[0] + for i := skipBytes; i < part.n; i++ { + if n == len(p) { + return + } + p[n] = part.b + n++ + } + parts = parts[1:] + skipBytes = 0 + } + } + if n != len(p) { + err = io.ErrUnexpectedEOF + } + return +} + +// Just testing the rleBuffer used in the Zip64 test above. Not used by the zip code. +func TestRLEBuffer(t *testing.T) { + b := new(rleBuffer) + var all []byte + writes := []string{"abcdeee", "eeeeeee", "eeeefghaaiii"} + for _, w := range writes { + b.Write([]byte(w)) + all = append(all, w...) + } + if len(b.buf) != 10 { + t.Fatalf("len(b.buf) = %d; want 10", len(b.buf)) + } + + for i := 0; i < len(all); i++ { + for j := 0; j < len(all)-i; j++ { + buf := make([]byte, j) + n, err := b.ReadAt(buf, int64(i)) + if err != nil || n != len(buf) { + t.Errorf("ReadAt(%d, %d) = %d, %v; want %d, nil", i, j, n, err, len(buf)) + } + if !bytes.Equal(buf, all[i:i+j]) { + t.Errorf("ReadAt(%d, %d) = %q; want %q", i, j, buf, all[i:i+j]) + } + } + } +} + +// fakeHash32 is a dummy Hash32 that always returns 0. +type fakeHash32 struct { + hash.Hash32 +} + +func (fakeHash32) Write(p []byte) (int, error) { return len(p), nil } +func (fakeHash32) Sum32() uint32 { return 0 } + +func TestZip64(t *testing.T) { + if testing.Short() { + t.Skip("slow test; skipping") + } + const size = 1 << 32 // before the "END\n" part + buf := testZip64(t, size) + testZip64DirectoryRecordLength(buf, t) +} + +func TestZip64EdgeCase(t *testing.T) { + if testing.Short() { + t.Skip("slow test; skipping") + } + // Test a zip file with uncompressed size 0xFFFFFFFF. + // That's the magic marker for a 64-bit file, so even though + // it fits in a 32-bit field we must use the 64-bit field. + // Go 1.5 and earlier got this wrong, + // writing an invalid zip file. + const size = 1<<32 - 1 - int64(len("END\n")) // before the "END\n" part + buf := testZip64(t, size) + testZip64DirectoryRecordLength(buf, t) +} + +func testZip64(t testing.TB, size int64) *rleBuffer { + const chunkSize = 1024 + chunks := int(size / chunkSize) + // write size bytes plus "END\n" to a zip file + buf := new(rleBuffer) + w := NewWriter(buf) + f, err := w.CreateHeader(&FileHeader{ + Name: "huge.txt", + Method: Store, + }) + if err != nil { + t.Fatal(err) + } + f.(*fileWriter).crc32 = fakeHash32{} + chunk := make([]byte, chunkSize) + for i := range chunk { + chunk[i] = '.' + } + for i := 0; i < chunks; i++ { + _, err := f.Write(chunk) + if err != nil { + t.Fatal("write chunk:", err) + } + } + if frag := int(size % chunkSize); frag > 0 { + _, err := f.Write(chunk[:frag]) + if err != nil { + t.Fatal("write chunk:", err) + } + } + end := []byte("END\n") + _, err = f.Write(end) + if err != nil { + t.Fatal("write end:", err) + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // read back zip file and check that we get to the end of it + r, err := NewReader(buf, int64(buf.Size())) + if err != nil { + t.Fatal("reader:", err) + } + f0 := r.File[0] + rc, err := f0.Open() + if err != nil { + t.Fatal("opening:", err) + } + rc.(*checksumReader).hash = fakeHash32{} + for i := 0; i < chunks; i++ { + _, err := io.ReadFull(rc, chunk) + if err != nil { + t.Fatal("read:", err) + } + } + if frag := int(size % chunkSize); frag > 0 { + _, err := io.ReadFull(rc, chunk[:frag]) + if err != nil { + t.Fatal("read:", err) + } + } + gotEnd, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatal("read end:", err) + } + if !bytes.Equal(gotEnd, end) { + t.Errorf("End of zip64 archive %q, want %q", gotEnd, end) + } + err = rc.Close() + if err != nil { + t.Fatal("closing:", err) + } + if size+int64(len("END\n")) >= 1<<32-1 { + if got, want := f0.UncompressedSize, uint32(uint32max); got != want { + t.Errorf("UncompressedSize %#x, want %#x", got, want) + } + } + + if got, want := f0.UncompressedSize64, uint64(size)+uint64(len(end)); got != want { + t.Errorf("UncompressedSize64 %#x, want %#x", got, want) + } + + return buf +} + +// Issue 9857 +func testZip64DirectoryRecordLength(buf *rleBuffer, t *testing.T) { + d := make([]byte, 1024) + if _, err := buf.ReadAt(d, buf.Size()-int64(len(d))); err != nil { + t.Fatal("read:", err) + } + + sigOff := findSignatureInBlock(d) + dirOff, err := findDirectory64End(buf, buf.Size()-int64(len(d))+int64(sigOff)) + if err != nil { + t.Fatal("findDirectory64End:", err) + } + + d = make([]byte, directory64EndLen) + if _, err := buf.ReadAt(d, dirOff); err != nil { + t.Fatal("read:", err) + } + + b := readBuf(d) + if sig := b.uint32(); sig != directory64EndSignature { + t.Fatalf("Expected directory64EndSignature (%d), got %d", directory64EndSignature, sig) + } + + size := b.uint64() + if size != directory64EndLen-12 { + t.Fatalf("Expected length of %d, got %d", directory64EndLen-12, size) + } +} + +func testInvalidHeader(h *FileHeader, t *testing.T) { + var buf bytes.Buffer + z := NewWriter(&buf) + + f, err := z.CreateHeader(h) + if err != nil { + t.Fatalf("error creating header: %v", err) + } + if _, err := f.Write([]byte("hi")); err != nil { + t.Fatalf("error writing content: %v", err) + } + if err := z.Close(); err != nil { + t.Fatalf("error closing zip writer: %v", err) + } + + b := buf.Bytes() + if _, err = NewReader(bytes.NewReader(b), int64(len(b))); err != ErrFormat { + t.Fatalf("got %v, expected ErrFormat", err) + } +} + +func testValidHeader(h *FileHeader, t *testing.T) { + var buf bytes.Buffer + z := NewWriter(&buf) + + f, err := z.CreateHeader(h) + if err != nil { + t.Fatalf("error creating header: %v", err) + } + if _, err := f.Write([]byte("hi")); err != nil { + t.Fatalf("error writing content: %v", err) + } + if err := z.Close(); err != nil { + t.Fatalf("error closing zip writer: %v", err) + } + + b := buf.Bytes() + zf, err := NewReader(bytes.NewReader(b), int64(len(b))) + if err != nil { + t.Fatalf("got %v, expected nil", err) + } + zh := zf.File[0].FileHeader + if zh.Name != h.Name || zh.Method != h.Method || zh.UncompressedSize64 != uint64(len("hi")) { + t.Fatalf("got %q/%d/%d expected %q/%d/%d", zh.Name, zh.Method, zh.UncompressedSize64, h.Name, h.Method, len("hi")) + } +} + +// Issue 4302. +func TestHeaderInvalidTagAndSize(t *testing.T) { + const timeFormat = "20060102T150405.000.txt" + + ts := time.Now() + filename := ts.Format(timeFormat) + + h := FileHeader{ + Name: filename, + Method: Deflate, + Extra: []byte(ts.Format(time.RFC3339Nano)), // missing tag and len, but Extra is best-effort parsing + } + h.SetModTime(ts) + + testValidHeader(&h, t) +} + +func TestHeaderTooShort(t *testing.T) { + h := FileHeader{ + Name: "foo.txt", + Method: Deflate, + Extra: []byte{zip64ExtraId}, // missing size and second half of tag, but Extra is best-effort parsing + } + testValidHeader(&h, t) +} + +func TestHeaderIgnoredSize(t *testing.T) { + h := FileHeader{ + Name: "foo.txt", + Method: Deflate, + Extra: []byte{zip64ExtraId & 0xFF, zip64ExtraId >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted + } + testValidHeader(&h, t) +} + +// Issue 4393. It is valid to have an extra data header +// which contains no body. +func TestZeroLengthHeader(t *testing.T) { + h := FileHeader{ + Name: "extadata.txt", + Method: Deflate, + Extra: []byte{ + 85, 84, 5, 0, 3, 154, 144, 195, 77, // tag 21589 size 5 + 85, 120, 0, 0, // tag 30805 size 0 + }, + } + testValidHeader(&h, t) +} + +// Just benchmarking how fast the Zip64 test above is. Not related to +// our zip performance, since the test above disabled CRC32 and flate. +func BenchmarkZip64Test(b *testing.B) { + for i := 0; i < b.N; i++ { + testZip64(b, 1<<26) + } +} diff --git a/vendor/github.com/klauspost/compress/zlib/example_test.go b/vendor/github.com/klauspost/compress/zlib/example_test.go new file mode 100644 index 0000000..7040889 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/example_test.go @@ -0,0 +1,37 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zlib_test + +import ( + "bytes" + "compress/zlib" + "fmt" + "io" + "os" +) + +func ExampleNewWriter() { + var b bytes.Buffer + + w := zlib.NewWriter(&b) + w.Write([]byte("hello, world\n")) + w.Close() + fmt.Println(b.Bytes()) + // Output: [120 156 202 72 205 201 201 215 81 40 207 47 202 73 225 2 4 0 0 255 255 33 231 4 147] +} + +func ExampleNewReader() { + buff := []byte{120, 156, 202, 72, 205, 201, 201, 215, 81, 40, 207, + 47, 202, 73, 225, 2, 4, 0, 0, 255, 255, 33, 231, 4, 147} + b := bytes.NewReader(buff) + + r, err := zlib.NewReader(b) + if err != nil { + panic(err) + } + io.Copy(os.Stdout, r) + // Output: hello, world + r.Close() +} diff --git a/vendor/github.com/klauspost/compress/zlib/reader.go b/vendor/github.com/klauspost/compress/zlib/reader.go new file mode 100644 index 0000000..b78a2c1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/reader.go @@ -0,0 +1,178 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package zlib implements reading and writing of zlib format compressed data, +as specified in RFC 1950. + +The implementation provides filters that uncompress during reading +and compress during writing. For example, to write compressed data +to a buffer: + + var b bytes.Buffer + w := zlib.NewWriter(&b) + w.Write([]byte("hello, world\n")) + w.Close() + +and to read that data back: + + r, err := zlib.NewReader(&b) + io.Copy(os.Stdout, r) + r.Close() +*/ +package zlib + +import ( + "bufio" + "errors" + "hash" + "hash/adler32" + "io" + + "github.com/klauspost/compress/flate" +) + +const zlibDeflate = 8 + +var ( + // ErrChecksum is returned when reading ZLIB data that has an invalid checksum. + ErrChecksum = errors.New("zlib: invalid checksum") + // ErrDictionary is returned when reading ZLIB data that has an invalid dictionary. + ErrDictionary = errors.New("zlib: invalid dictionary") + // ErrHeader is returned when reading ZLIB data that has an invalid header. + ErrHeader = errors.New("zlib: invalid header") +) + +type reader struct { + r flate.Reader + decompressor io.ReadCloser + digest hash.Hash32 + err error + scratch [4]byte +} + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// NewReader creates a new ReadCloser. +// Reads from the returned ReadCloser read and decompress data from r. +// If r does not implement io.ByteReader, the decompressor may read more +// data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser when done. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) (io.ReadCloser, error) { + return NewReaderDict(r, nil) +} + +// NewReaderDict is like NewReader but uses a preset dictionary. +// NewReaderDict ignores the dictionary if the compressed data does not refer to it. +// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary. +// +// The ReadCloser returned by NewReaderDict also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) { + z := new(reader) + err := z.Reset(r, dict) + if err != nil { + return nil, err + } + return z, nil +} + +func (z *reader) Read(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + + var n int + n, z.err = z.decompressor.Read(p) + z.digest.Write(p[0:n]) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } + + // Finished file; check checksum. + if _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + z.err = err + return n, z.err + } + // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). + checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + if checksum != z.digest.Sum32() { + z.err = ErrChecksum + return n, z.err + } + return n, io.EOF +} + +// Calling Close does not close the wrapped io.Reader originally passed to NewReader. +// In order for the ZLIB checksum to be verified, the reader must be +// fully consumed until the io.EOF. +func (z *reader) Close() error { + if z.err != nil && z.err != io.EOF { + return z.err + } + z.err = z.decompressor.Close() + return z.err +} + +func (z *reader) Reset(r io.Reader, dict []byte) error { + *z = reader{decompressor: z.decompressor} + if fr, ok := r.(flate.Reader); ok { + z.r = fr + } else { + z.r = bufio.NewReader(r) + } + + // Read the header (RFC 1950 section 2.2.). + _, z.err = io.ReadFull(z.r, z.scratch[0:2]) + if z.err != nil { + if z.err == io.EOF { + z.err = io.ErrUnexpectedEOF + } + return z.err + } + h := uint(z.scratch[0])<<8 | uint(z.scratch[1]) + if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) { + z.err = ErrHeader + return z.err + } + haveDict := z.scratch[1]&0x20 != 0 + if haveDict { + _, z.err = io.ReadFull(z.r, z.scratch[0:4]) + if z.err != nil { + if z.err == io.EOF { + z.err = io.ErrUnexpectedEOF + } + return z.err + } + checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + if checksum != adler32.Checksum(dict) { + z.err = ErrDictionary + return z.err + } + } + + if z.decompressor == nil { + if haveDict { + z.decompressor = flate.NewReaderDict(z.r, dict) + } else { + z.decompressor = flate.NewReader(z.r) + } + } else { + z.decompressor.(flate.Resetter).Reset(z.r, dict) + } + z.digest = adler32.New() + return nil +} diff --git a/vendor/github.com/klauspost/compress/zlib/reader_test.go b/vendor/github.com/klauspost/compress/zlib/reader_test.go new file mode 100644 index 0000000..7e27aec --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/reader_test.go @@ -0,0 +1,179 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zlib + +import ( + "bytes" + "io" + "testing" +) + +type zlibTest struct { + desc string + raw string + compressed []byte + dict []byte + err error +} + +// Compare-to-golden test data was generated by the ZLIB example program at +// http://www.zlib.net/zpipe.c + +var zlibTests = []zlibTest{ + { + "truncated empty", + "", + []byte{}, + nil, + io.ErrUnexpectedEOF, + }, + { + "truncated dict", + "", + []byte{0x78, 0xbb}, + []byte{0x00}, + io.ErrUnexpectedEOF, + }, + { + "truncated checksum", + "", + []byte{0x78, 0xbb, 0x00, 0x01, 0x00, 0x01, 0xca, 0x48, + 0xcd, 0xc9, 0xc9, 0xd7, 0x51, 0x28, 0xcf, 0x2f, + 0xca, 0x49, 0x01, 0x04, 0x00, 0x00, 0xff, 0xff, + }, + []byte{0x00}, + io.ErrUnexpectedEOF, + }, + { + "empty", + "", + []byte{0x78, 0x9c, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01}, + nil, + nil, + }, + { + "goodbye", + "goodbye, world", + []byte{ + 0x78, 0x9c, 0x4b, 0xcf, 0xcf, 0x4f, 0x49, 0xaa, + 0x4c, 0xd5, 0x51, 0x28, 0xcf, 0x2f, 0xca, 0x49, + 0x01, 0x00, 0x28, 0xa5, 0x05, 0x5e, + }, + nil, + nil, + }, + { + "bad header", + "", + []byte{0x78, 0x9f, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01}, + nil, + ErrHeader, + }, + { + "bad checksum", + "", + []byte{0x78, 0x9c, 0x03, 0x00, 0x00, 0x00, 0x00, 0xff}, + nil, + ErrChecksum, + }, + { + "not enough data", + "", + []byte{0x78, 0x9c, 0x03, 0x00, 0x00, 0x00}, + nil, + io.ErrUnexpectedEOF, + }, + { + "excess data is silently ignored", + "", + []byte{ + 0x78, 0x9c, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x78, 0x9c, 0xff, + }, + nil, + nil, + }, + { + "dictionary", + "Hello, World!\n", + []byte{ + 0x78, 0xbb, 0x1c, 0x32, 0x04, 0x27, 0xf3, 0x00, + 0xb1, 0x75, 0x20, 0x1c, 0x45, 0x2e, 0x00, 0x24, + 0x12, 0x04, 0x74, + }, + []byte{ + 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x57, 0x6f, 0x72, 0x6c, 0x64, 0x0a, + }, + nil, + }, + { + "wrong dictionary", + "", + []byte{ + 0x78, 0xbb, 0x1c, 0x32, 0x04, 0x27, 0xf3, 0x00, + 0xb1, 0x75, 0x20, 0x1c, 0x45, 0x2e, 0x00, 0x24, + 0x12, 0x04, 0x74, + }, + []byte{ + 0x48, 0x65, 0x6c, 0x6c, + }, + ErrDictionary, + }, + { + "truncated zlib stream amid raw-block", + "hello", + []byte{ + 0x78, 0x9c, 0x00, 0x0c, 0x00, 0xf3, 0xff, 0x68, 0x65, 0x6c, 0x6c, 0x6f, + }, + nil, + io.ErrUnexpectedEOF, + }, + { + "truncated zlib stream amid fixed-block", + "He", + []byte{ + 0x78, 0x9c, 0xf2, 0x48, 0xcd, + }, + nil, + io.ErrUnexpectedEOF, + }, +} + +func TestDecompressor(t *testing.T) { + b := new(bytes.Buffer) + for _, tt := range zlibTests { + in := bytes.NewReader(tt.compressed) + zr, err := NewReaderDict(in, tt.dict) + if err != nil { + if err != tt.err { + t.Errorf("%s: NewReader: %s", tt.desc, err) + } + continue + } + defer zr.Close() + + // Read and verify correctness of data. + b.Reset() + n, err := io.Copy(b, zr) + if err != nil { + if err != tt.err { + t.Errorf("%s: io.Copy: %v want %v", tt.desc, err, tt.err) + } + continue + } + s := b.String() + if s != tt.raw { + t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.desc, n, s, len(tt.raw), tt.raw) + } + + // Check for sticky errors. + if n, err := zr.Read([]byte{0}); n != 0 || err != io.EOF { + t.Errorf("%s: Read() = (%d, %v), want (0, io.EOF)", tt.desc, n, err) + } + if err := zr.Close(); err != nil { + t.Errorf("%s: Close() = %v, want nil", tt.desc, err) + } + } +} diff --git a/vendor/github.com/klauspost/compress/zlib/writer.go b/vendor/github.com/klauspost/compress/zlib/writer.go new file mode 100644 index 0000000..605816b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/writer.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zlib + +import ( + "fmt" + "hash" + "hash/adler32" + "io" + + "github.com/klauspost/compress/flate" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/zlib" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + w io.Writer + level int + dict []byte + compressor *flate.Writer + digest hash.Hash32 + err error + scratch [4]byte + wroteHeader bool +} + +// NewWriter creates a new Writer. +// Writes to the returned Writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevelDict(w, DefaultCompression, nil) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, HuffmanOnly +// or any integer value between BestSpeed and BestCompression inclusive. +// The error returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + return NewWriterLevelDict(w, level, nil) +} + +// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to +// compress with. +// +// The dictionary may be nil. If not, its contents should not be modified until +// the Writer is closed. +func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) { + if level < HuffmanOnly || level > BestCompression { + return nil, fmt.Errorf("zlib: invalid compression level: %d", level) + } + return &Writer{ + w: w, + level: level, + dict: dict, + }, nil +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriterLevel or NewWriterLevelDict, but instead writing +// to w. +func (z *Writer) Reset(w io.Writer) { + z.w = w + // z.level and z.dict left unchanged. + if z.compressor != nil { + z.compressor.Reset(w) + } + if z.digest != nil { + z.digest.Reset() + } + z.err = nil + z.scratch = [4]byte{} + z.wroteHeader = false +} + +// writeHeader writes the ZLIB header. +func (z *Writer) writeHeader() (err error) { + z.wroteHeader = true + // ZLIB has a two-byte header (as documented in RFC 1950). + // The first four bits is the CINFO (compression info), which is 7 for the default deflate window size. + // The next four bits is the CM (compression method), which is 8 for deflate. + z.scratch[0] = 0x78 + // The next two bits is the FLEVEL (compression level). The four values are: + // 0=fastest, 1=fast, 2=default, 3=best. + // The next bit, FDICT, is set if a dictionary is given. + // The final five FCHECK bits form a mod-31 checksum. + switch z.level { + case -2, 0, 1: + z.scratch[1] = 0 << 6 + case 2, 3, 4, 5: + z.scratch[1] = 1 << 6 + case 6, -1: + z.scratch[1] = 2 << 6 + case 7, 8, 9: + z.scratch[1] = 3 << 6 + default: + panic("unreachable") + } + if z.dict != nil { + z.scratch[1] |= 1 << 5 + } + z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31) + if _, err = z.w.Write(z.scratch[0:2]); err != nil { + return err + } + if z.dict != nil { + // The next four bytes are the Adler-32 checksum of the dictionary. + checksum := adler32.Checksum(z.dict) + z.scratch[0] = uint8(checksum >> 24) + z.scratch[1] = uint8(checksum >> 16) + z.scratch[2] = uint8(checksum >> 8) + z.scratch[3] = uint8(checksum >> 0) + if _, err = z.w.Write(z.scratch[0:4]); err != nil { + return err + } + } + if z.compressor == nil { + // Initialize deflater unless the Writer is being reused + // after a Reset call. + z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict) + if err != nil { + return err + } + z.digest = adler32.New() + } + return nil +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed or +// explicitly flushed. +func (z *Writer) Write(p []byte) (n int, err error) { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return 0, z.err + } + if len(p) == 0 { + return 0, nil + } + n, err = z.compressor.Write(p) + if err != nil { + z.err = err + return + } + z.digest.Write(p) + return +} + +// Flush flushes the Writer to its underlying io.Writer. +func (z *Writer) Flush() error { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return z.err + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return z.err + } + z.err = z.compressor.Close() + if z.err != nil { + return z.err + } + checksum := z.digest.Sum32() + // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). + z.scratch[0] = uint8(checksum >> 24) + z.scratch[1] = uint8(checksum >> 16) + z.scratch[2] = uint8(checksum >> 8) + z.scratch[3] = uint8(checksum >> 0) + _, z.err = z.w.Write(z.scratch[0:4]) + return z.err +} diff --git a/vendor/github.com/klauspost/compress/zlib/writer_test.go b/vendor/github.com/klauspost/compress/zlib/writer_test.go new file mode 100644 index 0000000..5f4af64 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/writer_test.go @@ -0,0 +1,212 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zlib + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "testing" +) + +var filenames = []string{ + "../testdata/gettysburg.txt", + "../testdata/e.txt", + "../testdata/pi.txt", +} + +var data = []string{ + "test a reasonable sized string that can be compressed", +} + +// Tests that compressing and then decompressing the given file at the given compression level and dictionary +// yields equivalent bytes to the original file. +func testFileLevelDict(t *testing.T, fn string, level int, d string) { + // Read the file, as golden output. + golden, err := os.Open(fn) + if err != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err) + return + } + defer golden.Close() + b0, err0 := ioutil.ReadAll(golden) + if err0 != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err0) + return + } + testLevelDict(t, fn, b0, level, d) +} + +func testLevelDict(t *testing.T, fn string, b0 []byte, level int, d string) { + // Make dictionary, if given. + var dict []byte + if d != "" { + dict = []byte(d) + } + + // Push data through a pipe that compresses at the write end, and decompresses at the read end. + piper, pipew := io.Pipe() + defer piper.Close() + go func() { + defer pipew.Close() + zlibw, err := NewWriterLevelDict(pipew, level, dict) + if err != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err) + return + } + defer zlibw.Close() + _, err = zlibw.Write(b0) + if err != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err) + return + } + }() + zlibr, err := NewReaderDict(piper, dict) + if err != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err) + return + } + defer zlibr.Close() + + // Compare the decompressed data. + b1, err1 := ioutil.ReadAll(zlibr) + if err1 != nil { + t.Errorf("%s (level=%d, dict=%q): %v", fn, level, d, err1) + return + } + if len(b0) != len(b1) { + t.Errorf("%s (level=%d, dict=%q): length mismatch %d versus %d", fn, level, d, len(b0), len(b1)) + return + } + for i := 0; i < len(b0); i++ { + if b0[i] != b1[i] { + t.Errorf("%s (level=%d, dict=%q): mismatch at %d, 0x%02x versus 0x%02x\n", fn, level, d, i, b0[i], b1[i]) + return + } + } +} + +func testFileLevelDictReset(t *testing.T, fn string, level int, dict []byte) { + var b0 []byte + var err error + if fn != "" { + b0, err = ioutil.ReadFile(fn) + if err != nil { + t.Errorf("%s (level=%d): %v", fn, level, err) + return + } + } + + // Compress once. + buf := new(bytes.Buffer) + var zlibw *Writer + if dict == nil { + zlibw, err = NewWriterLevel(buf, level) + } else { + zlibw, err = NewWriterLevelDict(buf, level, dict) + } + if err == nil { + _, err = zlibw.Write(b0) + } + if err == nil { + err = zlibw.Close() + } + if err != nil { + t.Errorf("%s (level=%d): %v", fn, level, err) + return + } + out := buf.String() + + // Reset and compress again. + buf2 := new(bytes.Buffer) + zlibw.Reset(buf2) + _, err = zlibw.Write(b0) + if err == nil { + err = zlibw.Close() + } + if err != nil { + t.Errorf("%s (level=%d): %v", fn, level, err) + return + } + out2 := buf2.String() + + if out2 != out { + t.Errorf("%s (level=%d): different output after reset (got %d bytes, expected %d", + fn, level, len(out2), len(out)) + } +} + +func TestWriter(t *testing.T) { + for i, s := range data { + b := []byte(s) + tag := fmt.Sprintf("#%d", i) + testLevelDict(t, tag, b, DefaultCompression, "") + testLevelDict(t, tag, b, NoCompression, "") + testLevelDict(t, tag, b, HuffmanOnly, "") + for level := BestSpeed; level <= BestCompression; level++ { + testLevelDict(t, tag, b, level, "") + } + } +} + +func TestWriterBig(t *testing.T) { + for _, fn := range filenames { + testFileLevelDict(t, fn, DefaultCompression, "") + testFileLevelDict(t, fn, NoCompression, "") + testFileLevelDict(t, fn, HuffmanOnly, "") + for level := BestSpeed; level <= BestCompression; level++ { + testFileLevelDict(t, fn, level, "") + } + } +} + +func TestWriterDict(t *testing.T) { + const dictionary = "0123456789." + for _, fn := range filenames { + testFileLevelDict(t, fn, DefaultCompression, dictionary) + testFileLevelDict(t, fn, NoCompression, dictionary) + testFileLevelDict(t, fn, HuffmanOnly, dictionary) + for level := BestSpeed; level <= BestCompression; level++ { + testFileLevelDict(t, fn, level, dictionary) + } + } +} + +func TestWriterReset(t *testing.T) { + const dictionary = "0123456789." + for _, fn := range filenames { + testFileLevelDictReset(t, fn, NoCompression, nil) + testFileLevelDictReset(t, fn, DefaultCompression, nil) + testFileLevelDictReset(t, fn, HuffmanOnly, nil) + testFileLevelDictReset(t, fn, NoCompression, []byte(dictionary)) + testFileLevelDictReset(t, fn, DefaultCompression, []byte(dictionary)) + testFileLevelDictReset(t, fn, HuffmanOnly, []byte(dictionary)) + if testing.Short() { + break + } + for level := BestSpeed; level <= BestCompression; level++ { + testFileLevelDictReset(t, fn, level, nil) + } + } +} + +func TestWriterDictIsUsed(t *testing.T) { + var input = []byte("Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.") + var buf bytes.Buffer + compressor, err := NewWriterLevelDict(&buf, BestCompression, input) + if err != nil { + t.Errorf("error in NewWriterLevelDict: %s", err) + return + } + compressor.Write(input) + compressor.Close() + const expectedMaxSize = 25 + output := buf.Bytes() + if len(output) > expectedMaxSize { + t.Errorf("result too large (got %d, want <= %d bytes). Is the dictionary being used?", len(output), expectedMaxSize) + } +} diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml new file mode 100644 index 0000000..2332ea6 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/.travis.yml @@ -0,0 +1,24 @@ +language: go + +sudo: false + +os: + - linux + - osx +go: + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - master + +script: + - go vet ./... + - go test -v ./... + - go test -race ./... + - diff <(gofmt -d .) <("") + +matrix: + allow_failures: + - go: 'master' + fast_finish: true diff --git a/vendor/github.com/klauspost/cpuid/LICENSE b/vendor/github.com/klauspost/cpuid/LICENSE new file mode 100644 index 0000000..5cec7ee --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md new file mode 100644 index 0000000..b2b6bee --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/README.md @@ -0,0 +1,145 @@ +# cpuid +Package cpuid provides information about the CPU running the current program. + +CPU features are detected on startup, and kept for fast access through the life of the application. +Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. + +You can access the CPU information by accessing the shared CPU variable of the cpuid library. + +Package home: https://github.com/klauspost/cpuid + +[![GoDoc][1]][2] [![Build Status][3]][4] + +[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg +[2]: https://godoc.org/github.com/klauspost/cpuid +[3]: https://travis-ci.org/klauspost/cpuid.svg +[4]: https://travis-ci.org/klauspost/cpuid + +# features +## CPU Instructions +* **CMOV** (i686 CMOV) +* **NX** (NX (No-Execute) bit) +* **AMD3DNOW** (AMD 3DNOW) +* **AMD3DNOWEXT** (AMD 3DNowExt) +* **MMX** (standard MMX) +* **MMXEXT** (SSE integer functions or AMD MMX ext) +* **SSE** (SSE functions) +* **SSE2** (P4 SSE functions) +* **SSE3** (Prescott SSE3 functions) +* **SSSE3** (Conroe SSSE3 functions) +* **SSE4** (Penryn SSE4.1 functions) +* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions) +* **SSE42** (Nehalem SSE4.2 functions) +* **AVX** (AVX functions) +* **AVX2** (AVX2 functions) +* **FMA3** (Intel FMA 3) +* **FMA4** (Bulldozer FMA4 functions) +* **XOP** (Bulldozer XOP functions) +* **F16C** (Half-precision floating-point conversion) +* **BMI1** (Bit Manipulation Instruction Set 1) +* **BMI2** (Bit Manipulation Instruction Set 2) +* **TBM** (AMD Trailing Bit Manipulation) +* **LZCNT** (LZCNT instruction) +* **POPCNT** (POPCNT instruction) +* **AESNI** (Advanced Encryption Standard New Instructions) +* **CLMUL** (Carry-less Multiplication) +* **HTT** (Hyperthreading (enabled)) +* **HLE** (Hardware Lock Elision) +* **RTM** (Restricted Transactional Memory) +* **RDRAND** (RDRAND instruction is available) +* **RDSEED** (RDSEED instruction is available) +* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) +* **SHA** (Intel SHA Extensions) +* **AVX512F** (AVX-512 Foundation) +* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions) +* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions) +* **AVX512PF** (AVX-512 Prefetch Instructions) +* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions) +* **AVX512CD** (AVX-512 Conflict Detection Instructions) +* **AVX512BW** (AVX-512 Byte and Word Instructions) +* **AVX512VL** (AVX-512 Vector Length Extensions) +* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions) +* **MPX** (Intel MPX (Memory Protection Extensions)) +* **ERMS** (Enhanced REP MOVSB/STOSB) +* **RDTSCP** (RDTSCP Instruction) +* **CX16** (CMPXCHG16B Instruction) +* **SGX** (Software Guard Extensions, with activation details) + +## Performance +* **RDTSCP()** Returns current cycle count. Can be used for benchmarking. +* **SSE2SLOW** (SSE2 is supported, but usually not faster) +* **SSE3SLOW** (SSE3 is supported, but usually not faster) +* **ATOM** (Atom processor, some SSSE3 instructions are slower) +* **Cache line** (Probable size of a cache line). +* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs. + +## Cpu Vendor/VM +* **Intel** +* **AMD** +* **VIA** +* **Transmeta** +* **NSC** +* **KVM** (Kernel-based Virtual Machine) +* **MSVM** (Microsoft Hyper-V or Windows Virtual PC) +* **VMware** +* **XenHVM** + +# installing + +```go get github.com/klauspost/cpuid``` + +# example + +```Go +package main + +import ( + "fmt" + "github.com/klauspost/cpuid" +) + +func main() { + // Print basic CPU information: + fmt.Println("Name:", cpuid.CPU.BrandName) + fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores) + fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model) + fmt.Println("Features:", cpuid.CPU.Features) + fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine) + fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes") + fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes") + fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes") + fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes") + + // Test if we have a specific feature: + if cpuid.CPU.SSE() { + fmt.Println("We have Streaming SIMD Extensions") + } +} +``` + +Sample output: +``` +>go run main.go +Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz +PhysicalCores: 2 +ThreadsPerCore: 2 +LogicalCores: 4 +Family 6 Model: 42 +Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL +Cacheline bytes: 64 +We have Streaming SIMD Extensions +``` + +# private package + +In the "private" folder you can find an autogenerated version of the library you can include in your own packages. + +For this purpose all exports are removed, and functions and constants are lowercased. + +This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages. + +# license + +This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/cpuid.go b/vendor/github.com/klauspost/cpuid/cpuid.go new file mode 100644 index 0000000..44e5094 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid.go @@ -0,0 +1,1030 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// Package cpuid provides information about the CPU running the current program. +// +// CPU features are detected on startup, and kept for fast access through the life of the application. +// Currently x86 / x64 (AMD64) is supported. +// +// You can access the CPU information by accessing the shared CPU variable of the cpuid library. +// +// Package home: https://github.com/klauspost/cpuid +package cpuid + +import "strings" + +// Vendor is a representation of a CPU vendor. +type Vendor int + +const ( + Other Vendor = iota + Intel + AMD + VIA + Transmeta + NSC + KVM // Kernel-based Virtual Machine + MSVM // Microsoft Hyper-V or Windows Virtual PC + VMware + XenHVM +) + +const ( + CMOV = 1 << iota // i686 CMOV + NX // NX (No-Execute) bit + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSSE3 // Conroe SSSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSE42 // Nehalem SSE4.2 functions + AVX // AVX functions + AVX2 // AVX2 functions + FMA3 // Intel FMA 3 + FMA4 // Bulldozer FMA4 functions + XOP // Bulldozer XOP functions + F16C // Half-precision floating-point conversion + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + TBM // AMD Trailing Bit Manipulation + LZCNT // LZCNT instruction + POPCNT // POPCNT instruction + AESNI // Advanced Encryption Standard New Instructions + CLMUL // Carry-less Multiplication + HTT // Hyperthreading (enabled) + HLE // Hardware Lock Elision + RTM // Restricted Transactional Memory + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + SHA // Intel SHA Extensions + AVX512F // AVX-512 Foundation + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512BW // AVX-512 Byte and Word Instructions + AVX512VL // AVX-512 Vector Length Extensions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + MPX // Intel MPX (Memory Protection Extensions) + ERMS // Enhanced REP MOVSB/STOSB + RDTSCP // RDTSCP Instruction + CX16 // CMPXCHG16B Instruction + SGX // Software Guard Extensions + + // Performance indicators + SSE2SLOW // SSE2 is supported, but usually not faster + SSE3SLOW // SSE3 is supported, but usually not faster + ATOM // Atom processor, some SSSE3 instructions are slower +) + +var flagNames = map[Flags]string{ + CMOV: "CMOV", // i686 CMOV + NX: "NX", // NX (No-Execute) bit + AMD3DNOW: "AMD3DNOW", // AMD 3DNOW + AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt + MMX: "MMX", // Standard MMX + MMXEXT: "MMXEXT", // SSE integer functions or AMD MMX ext + SSE: "SSE", // SSE functions + SSE2: "SSE2", // P4 SSE2 functions + SSE3: "SSE3", // Prescott SSE3 functions + SSSE3: "SSSE3", // Conroe SSSE3 functions + SSE4: "SSE4.1", // Penryn SSE4.1 functions + SSE4A: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions + SSE42: "SSE4.2", // Nehalem SSE4.2 functions + AVX: "AVX", // AVX functions + AVX2: "AVX2", // AVX functions + FMA3: "FMA3", // Intel FMA 3 + FMA4: "FMA4", // Bulldozer FMA4 functions + XOP: "XOP", // Bulldozer XOP functions + F16C: "F16C", // Half-precision floating-point conversion + BMI1: "BMI1", // Bit Manipulation Instruction Set 1 + BMI2: "BMI2", // Bit Manipulation Instruction Set 2 + TBM: "TBM", // AMD Trailing Bit Manipulation + LZCNT: "LZCNT", // LZCNT instruction + POPCNT: "POPCNT", // POPCNT instruction + AESNI: "AESNI", // Advanced Encryption Standard New Instructions + CLMUL: "CLMUL", // Carry-less Multiplication + HTT: "HTT", // Hyperthreading (enabled) + HLE: "HLE", // Hardware Lock Elision + RTM: "RTM", // Restricted Transactional Memory + RDRAND: "RDRAND", // RDRAND instruction is available + RDSEED: "RDSEED", // RDSEED instruction is available + ADX: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + SHA: "SHA", // Intel SHA Extensions + AVX512F: "AVX512F", // AVX-512 Foundation + AVX512DQ: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions + AVX512IFMA: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF: "AVX512PF", // AVX-512 Prefetch Instructions + AVX512ER: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions + AVX512CD: "AVX512CD", // AVX-512 Conflict Detection Instructions + AVX512BW: "AVX512BW", // AVX-512 Byte and Word Instructions + AVX512VL: "AVX512VL", // AVX-512 Vector Length Extensions + AVX512VBMI: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions + MPX: "MPX", // Intel MPX (Memory Protection Extensions) + ERMS: "ERMS", // Enhanced REP MOVSB/STOSB + RDTSCP: "RDTSCP", // RDTSCP Instruction + CX16: "CX16", // CMPXCHG16B Instruction + SGX: "SGX", // Software Guard Extensions + + // Performance indicators + SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster + SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster + ATOM: "ATOM", // Atom processor, some SSSE3 instructions are slower + +} + +// CPUInfo contains information about the detected system CPU. +type CPUInfo struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + Features Flags // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Cache struct { + L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected + L2 int // L2 Cache (per core or shared). Will be -1 if undetected + L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected + } + SGX SGXSupport + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data, +// this way queries are +var CPU CPUInfo + +func init() { + initCPU() + Detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func Detect() { + CPU.maxFunc = maxFunctionID() + CPU.maxExFunc = maxExtendedFunction() + CPU.BrandName = brandName() + CPU.CacheLine = cacheLine() + CPU.Family, CPU.Model = familyModel() + CPU.Features = support() + CPU.SGX = hasSGX(CPU.Features&SGX != 0) + CPU.ThreadsPerCore = threadsPerCore() + CPU.LogicalCores = logicalCores() + CPU.PhysicalCores = physicalCores() + CPU.VendorID = vendorID() + CPU.cacheSize() +} + +// Generated here: http://play.golang.org/p/BxFH2Gdc0G + +// Cmov indicates support of CMOV instructions +func (c CPUInfo) Cmov() bool { + return c.Features&CMOV != 0 +} + +// Amd3dnow indicates support of AMD 3DNOW! instructions +func (c CPUInfo) Amd3dnow() bool { + return c.Features&AMD3DNOW != 0 +} + +// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions +func (c CPUInfo) Amd3dnowExt() bool { + return c.Features&AMD3DNOWEXT != 0 +} + +// MMX indicates support of MMX instructions +func (c CPUInfo) MMX() bool { + return c.Features&MMX != 0 +} + +// MMXExt indicates support of MMXEXT instructions +// (SSE integer functions or AMD MMX ext) +func (c CPUInfo) MMXExt() bool { + return c.Features&MMXEXT != 0 +} + +// SSE indicates support of SSE instructions +func (c CPUInfo) SSE() bool { + return c.Features&SSE != 0 +} + +// SSE2 indicates support of SSE 2 instructions +func (c CPUInfo) SSE2() bool { + return c.Features&SSE2 != 0 +} + +// SSE3 indicates support of SSE 3 instructions +func (c CPUInfo) SSE3() bool { + return c.Features&SSE3 != 0 +} + +// SSSE3 indicates support of SSSE 3 instructions +func (c CPUInfo) SSSE3() bool { + return c.Features&SSSE3 != 0 +} + +// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions +func (c CPUInfo) SSE4() bool { + return c.Features&SSE4 != 0 +} + +// SSE42 indicates support of SSE4.2 instructions +func (c CPUInfo) SSE42() bool { + return c.Features&SSE42 != 0 +} + +// AVX indicates support of AVX instructions +// and operating system support of AVX instructions +func (c CPUInfo) AVX() bool { + return c.Features&AVX != 0 +} + +// AVX2 indicates support of AVX2 instructions +func (c CPUInfo) AVX2() bool { + return c.Features&AVX2 != 0 +} + +// FMA3 indicates support of FMA3 instructions +func (c CPUInfo) FMA3() bool { + return c.Features&FMA3 != 0 +} + +// FMA4 indicates support of FMA4 instructions +func (c CPUInfo) FMA4() bool { + return c.Features&FMA4 != 0 +} + +// XOP indicates support of XOP instructions +func (c CPUInfo) XOP() bool { + return c.Features&XOP != 0 +} + +// F16C indicates support of F16C instructions +func (c CPUInfo) F16C() bool { + return c.Features&F16C != 0 +} + +// BMI1 indicates support of BMI1 instructions +func (c CPUInfo) BMI1() bool { + return c.Features&BMI1 != 0 +} + +// BMI2 indicates support of BMI2 instructions +func (c CPUInfo) BMI2() bool { + return c.Features&BMI2 != 0 +} + +// TBM indicates support of TBM instructions +// (AMD Trailing Bit Manipulation) +func (c CPUInfo) TBM() bool { + return c.Features&TBM != 0 +} + +// Lzcnt indicates support of LZCNT instruction +func (c CPUInfo) Lzcnt() bool { + return c.Features&LZCNT != 0 +} + +// Popcnt indicates support of POPCNT instruction +func (c CPUInfo) Popcnt() bool { + return c.Features&POPCNT != 0 +} + +// HTT indicates the processor has Hyperthreading enabled +func (c CPUInfo) HTT() bool { + return c.Features&HTT != 0 +} + +// SSE2Slow indicates that SSE2 may be slow on this processor +func (c CPUInfo) SSE2Slow() bool { + return c.Features&SSE2SLOW != 0 +} + +// SSE3Slow indicates that SSE3 may be slow on this processor +func (c CPUInfo) SSE3Slow() bool { + return c.Features&SSE3SLOW != 0 +} + +// AesNi indicates support of AES-NI instructions +// (Advanced Encryption Standard New Instructions) +func (c CPUInfo) AesNi() bool { + return c.Features&AESNI != 0 +} + +// Clmul indicates support of CLMUL instructions +// (Carry-less Multiplication) +func (c CPUInfo) Clmul() bool { + return c.Features&CLMUL != 0 +} + +// NX indicates support of NX (No-Execute) bit +func (c CPUInfo) NX() bool { + return c.Features&NX != 0 +} + +// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions +func (c CPUInfo) SSE4A() bool { + return c.Features&SSE4A != 0 +} + +// HLE indicates support of Hardware Lock Elision +func (c CPUInfo) HLE() bool { + return c.Features&HLE != 0 +} + +// RTM indicates support of Restricted Transactional Memory +func (c CPUInfo) RTM() bool { + return c.Features&RTM != 0 +} + +// Rdrand indicates support of RDRAND instruction is available +func (c CPUInfo) Rdrand() bool { + return c.Features&RDRAND != 0 +} + +// Rdseed indicates support of RDSEED instruction is available +func (c CPUInfo) Rdseed() bool { + return c.Features&RDSEED != 0 +} + +// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) +func (c CPUInfo) ADX() bool { + return c.Features&ADX != 0 +} + +// SHA indicates support of Intel SHA Extensions +func (c CPUInfo) SHA() bool { + return c.Features&SHA != 0 +} + +// AVX512F indicates support of AVX-512 Foundation +func (c CPUInfo) AVX512F() bool { + return c.Features&AVX512F != 0 +} + +// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions +func (c CPUInfo) AVX512DQ() bool { + return c.Features&AVX512DQ != 0 +} + +// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions +func (c CPUInfo) AVX512IFMA() bool { + return c.Features&AVX512IFMA != 0 +} + +// AVX512PF indicates support of AVX-512 Prefetch Instructions +func (c CPUInfo) AVX512PF() bool { + return c.Features&AVX512PF != 0 +} + +// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions +func (c CPUInfo) AVX512ER() bool { + return c.Features&AVX512ER != 0 +} + +// AVX512CD indicates support of AVX-512 Conflict Detection Instructions +func (c CPUInfo) AVX512CD() bool { + return c.Features&AVX512CD != 0 +} + +// AVX512BW indicates support of AVX-512 Byte and Word Instructions +func (c CPUInfo) AVX512BW() bool { + return c.Features&AVX512BW != 0 +} + +// AVX512VL indicates support of AVX-512 Vector Length Extensions +func (c CPUInfo) AVX512VL() bool { + return c.Features&AVX512VL != 0 +} + +// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions +func (c CPUInfo) AVX512VBMI() bool { + return c.Features&AVX512VBMI != 0 +} + +// MPX indicates support of Intel MPX (Memory Protection Extensions) +func (c CPUInfo) MPX() bool { + return c.Features&MPX != 0 +} + +// ERMS indicates support of Enhanced REP MOVSB/STOSB +func (c CPUInfo) ERMS() bool { + return c.Features&ERMS != 0 +} + +// RDTSCP Instruction is available. +func (c CPUInfo) RDTSCP() bool { + return c.Features&RDTSCP != 0 +} + +// CX16 indicates if CMPXCHG16B instruction is available. +func (c CPUInfo) CX16() bool { + return c.Features&CX16 != 0 +} + +// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection. +// So TSX simply checks that. +func (c CPUInfo) TSX() bool { + return c.Features&(MPX|RTM) == MPX|RTM +} + +// Atom indicates an Atom processor +func (c CPUInfo) Atom() bool { + return c.Features&ATOM != 0 +} + +// Intel returns true if vendor is recognized as Intel +func (c CPUInfo) Intel() bool { + return c.VendorID == Intel +} + +// AMD returns true if vendor is recognized as AMD +func (c CPUInfo) AMD() bool { + return c.VendorID == AMD +} + +// Transmeta returns true if vendor is recognized as Transmeta +func (c CPUInfo) Transmeta() bool { + return c.VendorID == Transmeta +} + +// NSC returns true if vendor is recognized as National Semiconductor +func (c CPUInfo) NSC() bool { + return c.VendorID == NSC +} + +// VIA returns true if vendor is recognized as VIA +func (c CPUInfo) VIA() bool { + return c.VendorID == VIA +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c CPUInfo) RTCounter() uint64 { + if !c.RDTSCP() { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c CPUInfo) Ia32TscAux() uint32 { + if !c.RDTSCP() { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c CPUInfo) LogicalCPU() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. This is only a hint, and will very likely +// have many false negatives. +func (c CPUInfo) VM() bool { + switch c.VendorID { + case MSVM, KVM, VMware, XenHVM: + return true + } + return false +} + +// Flags contains detected cpu features and caracteristics +type Flags uint64 + +// String returns a string representation of the detected +// CPU features. +func (f Flags) String() string { + return strings.Join(f.Strings(), ",") +} + +// Strings returns and array of the detected features. +func (f Flags) Strings() []string { + s := support() + r := make([]string, 0, 20) + for i := uint(0); i < 64; i++ { + key := Flags(1 << i) + val := flagNames[key] + if s&key != 0 { + r = append(r, val) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + if mfi < 0x4 || vendorID() != Intel { + return 1 + } + + if mfi < 0xb { + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + switch vendorID() { + case Intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case AMD: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (int, int) { + if maxFunctionID() < 0x1 { + return 0, 0 + } + eax, _, _, _ := cpuid(1) + family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) + model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) + return int(family), int(model) +} + +func physicalCores() int { + switch vendorID() { + case Intel: + return logicalCores() / threadsPerCore() + case AMD: + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + return int(c&0xff) + 1 + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]Vendor{ + "AMDisbetter!": AMD, + "AuthenticAMD": AMD, + "CentaurHauls": VIA, + "GenuineIntel": Intel, + "TransmetaCPU": Transmeta, + "GenuineTMx86": Transmeta, + "Geode by NSC": NSC, + "VIA VIA VIA ": VIA, + "KVMKVMKVMKVM": KVM, + "Microsoft Hv": MSVM, + "VMwareVMware": VMware, + "XenVMMXenVMM": XenHVM, +} + +func vendorID() Vendor { + _, b, c, d := cpuid(0) + v := valAsString(b, d, c) + vend, ok := vendorMapping[string(v)] + if !ok { + return Other + } + return vend +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *CPUInfo) cacheSize() { + c.Cache.L1D = -1 + c.Cache.L1I = -1 + c.Cache.L2 = -1 + c.Cache.L3 = -1 + vendor := vendorID() + switch vendor { + case Intel: + if maxFunctionID() < 4 { + return + } + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.Cache.L1D = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.Cache.L1I = size + } else { + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + case AMD: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) + c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) + } + + return +} + +type SGXSupport struct { + Available bool + SGX1Supported bool + SGX2Supported bool + MaxEnclaveSizeNot64 int64 + MaxEnclaveSize64 int64 +} + +func hasSGX(available bool) (rval SGXSupport) { + rval.Available = available + + if !available { + return + } + + a, _, _, d := cpuidex(0x12, 0) + rval.SGX1Supported = a&0x01 != 0 + rval.SGX2Supported = a&0x02 != 0 + rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 + rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + + return +} + +func support() Flags { + mfi := maxFunctionID() + vend := vendorID() + if mfi < 0x1 { + return 0 + } + rval := uint64(0) + _, _, c, d := cpuid(1) + if (d & (1 << 15)) != 0 { + rval |= CMOV + } + if (d & (1 << 23)) != 0 { + rval |= MMX + } + if (d & (1 << 25)) != 0 { + rval |= MMXEXT + } + if (d & (1 << 25)) != 0 { + rval |= SSE + } + if (d & (1 << 26)) != 0 { + rval |= SSE2 + } + if (c & 1) != 0 { + rval |= SSE3 + } + if (c & 0x00000200) != 0 { + rval |= SSSE3 + } + if (c & 0x00080000) != 0 { + rval |= SSE4 + } + if (c & 0x00100000) != 0 { + rval |= SSE42 + } + if (c & (1 << 25)) != 0 { + rval |= AESNI + } + if (c & (1 << 1)) != 0 { + rval |= CLMUL + } + if c&(1<<23) != 0 { + rval |= POPCNT + } + if c&(1<<30) != 0 { + rval |= RDRAND + } + if c&(1<<29) != 0 { + rval |= F16C + } + if c&(1<<13) != 0 { + rval |= CX16 + } + if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { + if threadsPerCore() > 1 { + rval |= HTT + } + } + + // Check XGETBV, OXSAVE and AVX bits + if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + rval |= AVX + if (c & 0x00001000) != 0 { + rval |= FMA3 + } + } + } + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, _ := cpuidex(7, 0) + if (rval&AVX) != 0 && (ebx&0x00000020) != 0 { + rval |= AVX2 + } + if (ebx & 0x00000008) != 0 { + rval |= BMI1 + if (ebx & 0x00000100) != 0 { + rval |= BMI2 + } + } + if ebx&(1<<2) != 0 { + rval |= SGX + } + if ebx&(1<<4) != 0 { + rval |= HLE + } + if ebx&(1<<9) != 0 { + rval |= ERMS + } + if ebx&(1<<11) != 0 { + rval |= RTM + } + if ebx&(1<<14) != 0 { + rval |= MPX + } + if ebx&(1<<18) != 0 { + rval |= RDSEED + } + if ebx&(1<<19) != 0 { + rval |= ADX + } + if ebx&(1<<29) != 0 { + rval |= SHA + } + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { + if ebx&(1<<16) != 0 { + rval |= AVX512F + } + if ebx&(1<<17) != 0 { + rval |= AVX512DQ + } + if ebx&(1<<21) != 0 { + rval |= AVX512IFMA + } + if ebx&(1<<26) != 0 { + rval |= AVX512PF + } + if ebx&(1<<27) != 0 { + rval |= AVX512ER + } + if ebx&(1<<28) != 0 { + rval |= AVX512CD + } + if ebx&(1<<30) != 0 { + rval |= AVX512BW + } + if ebx&(1<<31) != 0 { + rval |= AVX512VL + } + // ecx + if ecx&(1<<1) != 0 { + rval |= AVX512VBMI + } + } + } + } + + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + rval |= LZCNT + rval |= POPCNT + } + if (d & (1 << 31)) != 0 { + rval |= AMD3DNOW + } + if (d & (1 << 30)) != 0 { + rval |= AMD3DNOWEXT + } + if (d & (1 << 23)) != 0 { + rval |= MMX + } + if (d & (1 << 22)) != 0 { + rval |= MMXEXT + } + if (c & (1 << 6)) != 0 { + rval |= SSE4A + } + if d&(1<<20) != 0 { + rval |= NX + } + if d&(1<<27) != 0 { + rval |= RDTSCP + } + + /* Allow for selectively disabling SSE2 functions on AMD processors + with SSE2 support but not SSE4a. This includes Athlon64, some + Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster + than SSE2 often enough to utilize this special-case flag. + AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case + so that SSE2 is used unless explicitly disabled by checking + AV_CPU_FLAG_SSE2SLOW. */ + if vendorID() != Intel && + rval&SSE2 != 0 && (c&0x00000040) == 0 { + rval |= SSE2SLOW + } + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if (rval & AVX) != 0 { + if (c & 0x00000800) != 0 { + rval |= XOP + } + if (c & 0x00010000) != 0 { + rval |= FMA4 + } + } + + if vendorID() == Intel { + family, model := familyModel() + if family == 6 && (model == 9 || model == 13 || model == 14) { + /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and + * 6/14 (core1 "yonah") theoretically support sse2, but it's + * usually slower than mmx. */ + if (rval & SSE2) != 0 { + rval |= SSE2SLOW + } + if (rval & SSE3) != 0 { + rval |= SSE3SLOW + } + } + /* The Atom processor has SSSE3 support, which is useful in many cases, + * but sometimes the SSSE3 version is slower than the SSE2 equivalent + * on the Atom, but is generally faster on other processors supporting + * SSSE3. This flag allows for selectively disabling certain SSSE3 + * functions on the Atom. */ + if family == 6 && model == 28 { + rval |= ATOM + } + } + } + return Flags(rval) +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/vendor/github.com/klauspost/cpuid/cpuid_386.s b/vendor/github.com/klauspost/cpuid/cpuid_386.s new file mode 100644 index 0000000..4d73171 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid_386.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s new file mode 100644 index 0000000..3c1d60e --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build amd64,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/cpuid_test.go b/vendor/github.com/klauspost/cpuid/cpuid_test.go new file mode 100644 index 0000000..45401d9 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid_test.go @@ -0,0 +1,737 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +package cpuid + +import ( + "fmt" + "testing" +) + +// There is no real way to test a CPU identifier, since results will +// obviously differ on each machine. +func TestCPUID(t *testing.T) { + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("Name:", CPU.BrandName) + t.Log("PhysicalCores:", CPU.PhysicalCores) + t.Log("ThreadsPerCore:", CPU.ThreadsPerCore) + t.Log("LogicalCores:", CPU.LogicalCores) + t.Log("Family", CPU.Family, "Model:", CPU.Model) + t.Log("Features:", CPU.Features) + t.Log("Cacheline bytes:", CPU.CacheLine) + t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes") + t.Log("L2 Cache:", CPU.Cache.L2, "bytes") + t.Log("L3 Cache:", CPU.Cache.L3, "bytes") + + if CPU.SSE2() { + t.Log("We have SSE2") + } +} + +func TestDumpCPUID(t *testing.T) { + n := int(maxFunctionID()) + for i := 0; i <= n; i++ { + a, b, c, d := cpuidex(uint32(i), 0) + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d) + ex := uint32(1) + for { + a2, b2, c2, d2 := cpuidex(uint32(i), ex) + if a2 == a && b2 == b && d2 == d || ex > 50 || a2 == 0 { + break + } + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a2, b2, c2, d2) + a, b, c, d = a2, b2, c2, d2 + ex++ + } + } + n2 := maxExtendedFunction() + for i := uint32(0x80000000); i <= n2; i++ { + a, b, c, d := cpuid(i) + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d) + } +} + +func Example() { + // Print basic CPU information: + fmt.Println("Name:", CPU.BrandName) + fmt.Println("PhysicalCores:", CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", CPU.LogicalCores) + fmt.Println("Family", CPU.Family, "Model:", CPU.Model) + fmt.Println("Features:", CPU.Features) + fmt.Println("Cacheline bytes:", CPU.CacheLine) + + // Test if we have a specific feature: + if CPU.SSE() { + fmt.Println("We have Streaming SIMD Extensions") + } +} + +func TestBrandNameZero(t *testing.T) { + if len(CPU.BrandName) > 0 { + // Cut out last byte + last := []byte(CPU.BrandName[len(CPU.BrandName)-1:]) + if last[0] == 0 { + t.Fatal("last byte was zero") + } else if last[0] == 32 { + t.Fatal("whitespace wasn't trimmed") + } + } +} + +// Generated here: http://play.golang.org/p/mko-0tFt0Q + +// TestCmov tests Cmov() function +func TestCmov(t *testing.T) { + got := CPU.Cmov() + expected := CPU.Features&CMOV == CMOV + if got != expected { + t.Fatalf("Cmov: expected %v, got %v", expected, got) + } + t.Log("CMOV Support:", got) +} + +// TestAmd3dnow tests Amd3dnow() function +func TestAmd3dnow(t *testing.T) { + got := CPU.Amd3dnow() + expected := CPU.Features&AMD3DNOW == AMD3DNOW + if got != expected { + t.Fatalf("Amd3dnow: expected %v, got %v", expected, got) + } + t.Log("AMD3DNOW Support:", got) +} + +// TestAmd3dnowExt tests Amd3dnowExt() function +func TestAmd3dnowExt(t *testing.T) { + got := CPU.Amd3dnowExt() + expected := CPU.Features&AMD3DNOWEXT == AMD3DNOWEXT + if got != expected { + t.Fatalf("Amd3dnowExt: expected %v, got %v", expected, got) + } + t.Log("AMD3DNOWEXT Support:", got) +} + +// TestMMX tests MMX() function +func TestMMX(t *testing.T) { + got := CPU.MMX() + expected := CPU.Features&MMX == MMX + if got != expected { + t.Fatalf("MMX: expected %v, got %v", expected, got) + } + t.Log("MMX Support:", got) +} + +// TestMMXext tests MMXext() function +func TestMMXext(t *testing.T) { + got := CPU.MMXExt() + expected := CPU.Features&MMXEXT == MMXEXT + if got != expected { + t.Fatalf("MMXExt: expected %v, got %v", expected, got) + } + t.Log("MMXEXT Support:", got) +} + +// TestSSE tests SSE() function +func TestSSE(t *testing.T) { + got := CPU.SSE() + expected := CPU.Features&SSE == SSE + if got != expected { + t.Fatalf("SSE: expected %v, got %v", expected, got) + } + t.Log("SSE Support:", got) +} + +// TestSSE2 tests SSE2() function +func TestSSE2(t *testing.T) { + got := CPU.SSE2() + expected := CPU.Features&SSE2 == SSE2 + if got != expected { + t.Fatalf("SSE2: expected %v, got %v", expected, got) + } + t.Log("SSE2 Support:", got) +} + +// TestSSE3 tests SSE3() function +func TestSSE3(t *testing.T) { + got := CPU.SSE3() + expected := CPU.Features&SSE3 == SSE3 + if got != expected { + t.Fatalf("SSE3: expected %v, got %v", expected, got) + } + t.Log("SSE3 Support:", got) +} + +// TestSSSE3 tests SSSE3() function +func TestSSSE3(t *testing.T) { + got := CPU.SSSE3() + expected := CPU.Features&SSSE3 == SSSE3 + if got != expected { + t.Fatalf("SSSE3: expected %v, got %v", expected, got) + } + t.Log("SSSE3 Support:", got) +} + +// TestSSE4 tests SSE4() function +func TestSSE4(t *testing.T) { + got := CPU.SSE4() + expected := CPU.Features&SSE4 == SSE4 + if got != expected { + t.Fatalf("SSE4: expected %v, got %v", expected, got) + } + t.Log("SSE4 Support:", got) +} + +// TestSSE42 tests SSE42() function +func TestSSE42(t *testing.T) { + got := CPU.SSE42() + expected := CPU.Features&SSE42 == SSE42 + if got != expected { + t.Fatalf("SSE42: expected %v, got %v", expected, got) + } + t.Log("SSE42 Support:", got) +} + +// TestAVX tests AVX() function +func TestAVX(t *testing.T) { + got := CPU.AVX() + expected := CPU.Features&AVX == AVX + if got != expected { + t.Fatalf("AVX: expected %v, got %v", expected, got) + } + t.Log("AVX Support:", got) +} + +// TestAVX2 tests AVX2() function +func TestAVX2(t *testing.T) { + got := CPU.AVX2() + expected := CPU.Features&AVX2 == AVX2 + if got != expected { + t.Fatalf("AVX2: expected %v, got %v", expected, got) + } + t.Log("AVX2 Support:", got) +} + +// TestFMA3 tests FMA3() function +func TestFMA3(t *testing.T) { + got := CPU.FMA3() + expected := CPU.Features&FMA3 == FMA3 + if got != expected { + t.Fatalf("FMA3: expected %v, got %v", expected, got) + } + t.Log("FMA3 Support:", got) +} + +// TestFMA4 tests FMA4() function +func TestFMA4(t *testing.T) { + got := CPU.FMA4() + expected := CPU.Features&FMA4 == FMA4 + if got != expected { + t.Fatalf("FMA4: expected %v, got %v", expected, got) + } + t.Log("FMA4 Support:", got) +} + +// TestXOP tests XOP() function +func TestXOP(t *testing.T) { + got := CPU.XOP() + expected := CPU.Features&XOP == XOP + if got != expected { + t.Fatalf("XOP: expected %v, got %v", expected, got) + } + t.Log("XOP Support:", got) +} + +// TestF16C tests F16C() function +func TestF16C(t *testing.T) { + got := CPU.F16C() + expected := CPU.Features&F16C == F16C + if got != expected { + t.Fatalf("F16C: expected %v, got %v", expected, got) + } + t.Log("F16C Support:", got) +} + +// TestCX16 tests CX16() function +func TestCX16(t *testing.T) { + got := CPU.CX16() + expected := CPU.Features&CX16 == CX16 + if got != expected { + t.Fatalf("CX16: expected %v, got %v", expected, got) + } + t.Log("CX16 Support:", got) +} + +// TestSGX tests SGX() function +func TestSGX(t *testing.T) { + got := CPU.SGX.Available + expected := CPU.Features&SGX == SGX + if got != expected { + t.Fatalf("SGX: expected %v, got %v", expected, got) + } + t.Log("SGX Support:", got) +} + +// TestBMI1 tests BMI1() function +func TestBMI1(t *testing.T) { + got := CPU.BMI1() + expected := CPU.Features&BMI1 == BMI1 + if got != expected { + t.Fatalf("BMI1: expected %v, got %v", expected, got) + } + t.Log("BMI1 Support:", got) +} + +// TestBMI2 tests BMI2() function +func TestBMI2(t *testing.T) { + got := CPU.BMI2() + expected := CPU.Features&BMI2 == BMI2 + if got != expected { + t.Fatalf("BMI2: expected %v, got %v", expected, got) + } + t.Log("BMI2 Support:", got) +} + +// TestTBM tests TBM() function +func TestTBM(t *testing.T) { + got := CPU.TBM() + expected := CPU.Features&TBM == TBM + if got != expected { + t.Fatalf("TBM: expected %v, got %v", expected, got) + } + t.Log("TBM Support:", got) +} + +// TestLzcnt tests Lzcnt() function +func TestLzcnt(t *testing.T) { + got := CPU.Lzcnt() + expected := CPU.Features&LZCNT == LZCNT + if got != expected { + t.Fatalf("Lzcnt: expected %v, got %v", expected, got) + } + t.Log("LZCNT Support:", got) +} + +// TestLzcnt tests Lzcnt() function +func TestPopcnt(t *testing.T) { + got := CPU.Popcnt() + expected := CPU.Features&POPCNT == POPCNT + if got != expected { + t.Fatalf("Popcnt: expected %v, got %v", expected, got) + } + t.Log("POPCNT Support:", got) +} + +// TestAesNi tests AesNi() function +func TestAesNi(t *testing.T) { + got := CPU.AesNi() + expected := CPU.Features&AESNI == AESNI + if got != expected { + t.Fatalf("AesNi: expected %v, got %v", expected, got) + } + t.Log("AESNI Support:", got) +} + +// TestHTT tests HTT() function +func TestHTT(t *testing.T) { + got := CPU.HTT() + expected := CPU.Features&HTT == HTT + if got != expected { + t.Fatalf("HTT: expected %v, got %v", expected, got) + } + t.Log("HTT Support:", got) +} + +// TestClmul tests Clmul() function +func TestClmul(t *testing.T) { + got := CPU.Clmul() + expected := CPU.Features&CLMUL == CLMUL + if got != expected { + t.Fatalf("Clmul: expected %v, got %v", expected, got) + } + t.Log("CLMUL Support:", got) +} + +// TestSSE2Slow tests SSE2Slow() function +func TestSSE2Slow(t *testing.T) { + got := CPU.SSE2Slow() + expected := CPU.Features&SSE2SLOW == SSE2SLOW + if got != expected { + t.Fatalf("SSE2Slow: expected %v, got %v", expected, got) + } + t.Log("SSE2SLOW Support:", got) +} + +// TestSSE3Slow tests SSE3slow() function +func TestSSE3Slow(t *testing.T) { + got := CPU.SSE3Slow() + expected := CPU.Features&SSE3SLOW == SSE3SLOW + if got != expected { + t.Fatalf("SSE3slow: expected %v, got %v", expected, got) + } + t.Log("SSE3SLOW Support:", got) +} + +// TestAtom tests Atom() function +func TestAtom(t *testing.T) { + got := CPU.Atom() + expected := CPU.Features&ATOM == ATOM + if got != expected { + t.Fatalf("Atom: expected %v, got %v", expected, got) + } + t.Log("ATOM Support:", got) +} + +// TestNX tests NX() function (NX (No-Execute) bit) +func TestNX(t *testing.T) { + got := CPU.NX() + expected := CPU.Features&NX == NX + if got != expected { + t.Fatalf("NX: expected %v, got %v", expected, got) + } + t.Log("NX Support:", got) +} + +// TestSSE4A tests SSE4A() function (AMD Barcelona microarchitecture SSE4a instructions) +func TestSSE4A(t *testing.T) { + got := CPU.SSE4A() + expected := CPU.Features&SSE4A == SSE4A + if got != expected { + t.Fatalf("SSE4A: expected %v, got %v", expected, got) + } + t.Log("SSE4A Support:", got) +} + +// TestHLE tests HLE() function (Hardware Lock Elision) +func TestHLE(t *testing.T) { + got := CPU.HLE() + expected := CPU.Features&HLE == HLE + if got != expected { + t.Fatalf("HLE: expected %v, got %v", expected, got) + } + t.Log("HLE Support:", got) +} + +// TestRTM tests RTM() function (Restricted Transactional Memory) +func TestRTM(t *testing.T) { + got := CPU.RTM() + expected := CPU.Features&RTM == RTM + if got != expected { + t.Fatalf("RTM: expected %v, got %v", expected, got) + } + t.Log("RTM Support:", got) +} + +// TestRdrand tests RDRAND() function (RDRAND instruction is available) +func TestRdrand(t *testing.T) { + got := CPU.Rdrand() + expected := CPU.Features&RDRAND == RDRAND + if got != expected { + t.Fatalf("Rdrand: expected %v, got %v", expected, got) + } + t.Log("Rdrand Support:", got) +} + +// TestRdseed tests RDSEED() function (RDSEED instruction is available) +func TestRdseed(t *testing.T) { + got := CPU.Rdseed() + expected := CPU.Features&RDSEED == RDSEED + if got != expected { + t.Fatalf("Rdseed: expected %v, got %v", expected, got) + } + t.Log("Rdseed Support:", got) +} + +// TestADX tests ADX() function (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) +func TestADX(t *testing.T) { + got := CPU.ADX() + expected := CPU.Features&ADX == ADX + if got != expected { + t.Fatalf("ADX: expected %v, got %v", expected, got) + } + t.Log("ADX Support:", got) +} + +// TestSHA tests SHA() function (Intel SHA Extensions) +func TestSHA(t *testing.T) { + got := CPU.SHA() + expected := CPU.Features&SHA == SHA + if got != expected { + t.Fatalf("SHA: expected %v, got %v", expected, got) + } + t.Log("SHA Support:", got) +} + +// TestAVX512F tests AVX512F() function (AVX-512 Foundation) +func TestAVX512F(t *testing.T) { + got := CPU.AVX512F() + expected := CPU.Features&AVX512F == AVX512F + if got != expected { + t.Fatalf("AVX512F: expected %v, got %v", expected, got) + } + t.Log("AVX512F Support:", got) +} + +// TestAVX512DQ tests AVX512DQ() function (AVX-512 Doubleword and Quadword Instructions) +func TestAVX512DQ(t *testing.T) { + got := CPU.AVX512DQ() + expected := CPU.Features&AVX512DQ == AVX512DQ + if got != expected { + t.Fatalf("AVX512DQ: expected %v, got %v", expected, got) + } + t.Log("AVX512DQ Support:", got) +} + +// TestAVX512IFMA tests AVX512IFMA() function (AVX-512 Integer Fused Multiply-Add Instructions) +func TestAVX512IFMA(t *testing.T) { + got := CPU.AVX512IFMA() + expected := CPU.Features&AVX512IFMA == AVX512IFMA + if got != expected { + t.Fatalf("AVX512IFMA: expected %v, got %v", expected, got) + } + t.Log("AVX512IFMA Support:", got) +} + +// TestAVX512PF tests AVX512PF() function (AVX-512 Prefetch Instructions) +func TestAVX512PF(t *testing.T) { + got := CPU.AVX512PF() + expected := CPU.Features&AVX512PF == AVX512PF + if got != expected { + t.Fatalf("AVX512PF: expected %v, got %v", expected, got) + } + t.Log("AVX512PF Support:", got) +} + +// TestAVX512ER tests AVX512ER() function (AVX-512 Exponential and Reciprocal Instructions) +func TestAVX512ER(t *testing.T) { + got := CPU.AVX512ER() + expected := CPU.Features&AVX512ER == AVX512ER + if got != expected { + t.Fatalf("AVX512ER: expected %v, got %v", expected, got) + } + t.Log("AVX512ER Support:", got) +} + +// TestAVX512CD tests AVX512CD() function (AVX-512 Conflict Detection Instructions) +func TestAVX512CD(t *testing.T) { + got := CPU.AVX512CD() + expected := CPU.Features&AVX512CD == AVX512CD + if got != expected { + t.Fatalf("AVX512CD: expected %v, got %v", expected, got) + } + t.Log("AVX512CD Support:", got) +} + +// TestAVX512BW tests AVX512BW() function (AVX-512 Byte and Word Instructions) +func TestAVX512BW(t *testing.T) { + got := CPU.AVX512BW() + expected := CPU.Features&AVX512BW == AVX512BW + if got != expected { + t.Fatalf("AVX512BW: expected %v, got %v", expected, got) + } + t.Log("AVX512BW Support:", got) +} + +// TestAVX512VL tests AVX512VL() function (AVX-512 Vector Length Extensions) +func TestAVX512VL(t *testing.T) { + got := CPU.AVX512VL() + expected := CPU.Features&AVX512VL == AVX512VL + if got != expected { + t.Fatalf("AVX512VL: expected %v, got %v", expected, got) + } + t.Log("AVX512VL Support:", got) +} + +// TestAVX512VL tests AVX512VBMI() function (AVX-512 Vector Bit Manipulation Instructions) +func TestAVX512VBMI(t *testing.T) { + got := CPU.AVX512VBMI() + expected := CPU.Features&AVX512VBMI == AVX512VBMI + if got != expected { + t.Fatalf("AVX512VBMI: expected %v, got %v", expected, got) + } + t.Log("AVX512VBMI Support:", got) +} + +// TestMPX tests MPX() function (Intel MPX (Memory Protection Extensions)) +func TestMPX(t *testing.T) { + got := CPU.MPX() + expected := CPU.Features&MPX == MPX + if got != expected { + t.Fatalf("MPX: expected %v, got %v", expected, got) + } + t.Log("MPX Support:", got) +} + +// TestERMS tests ERMS() function (Enhanced REP MOVSB/STOSB) +func TestERMS(t *testing.T) { + got := CPU.ERMS() + expected := CPU.Features&ERMS == ERMS + if got != expected { + t.Fatalf("ERMS: expected %v, got %v", expected, got) + } + t.Log("ERMS Support:", got) +} + +// TestVendor writes the detected vendor. Will be 0 if unknown +func TestVendor(t *testing.T) { + t.Log("Vendor ID:", CPU.VendorID) +} + +// Intel returns true if vendor is recognized as Intel +func TestIntel(t *testing.T) { + got := CPU.Intel() + expected := CPU.VendorID == Intel + if got != expected { + t.Fatalf("TestIntel: expected %v, got %v", expected, got) + } + t.Log("TestIntel:", got) +} + +// AMD returns true if vendor is recognized as AMD +func TestAMD(t *testing.T) { + got := CPU.AMD() + expected := CPU.VendorID == AMD + if got != expected { + t.Fatalf("TestAMD: expected %v, got %v", expected, got) + } + t.Log("TestAMD:", got) +} + +// Transmeta returns true if vendor is recognized as Transmeta +func TestTransmeta(t *testing.T) { + got := CPU.Transmeta() + expected := CPU.VendorID == Transmeta + if got != expected { + t.Fatalf("TestTransmeta: expected %v, got %v", expected, got) + } + t.Log("TestTransmeta:", got) +} + +// NSC returns true if vendor is recognized as National Semiconductor +func TestNSC(t *testing.T) { + got := CPU.NSC() + expected := CPU.VendorID == NSC + if got != expected { + t.Fatalf("TestNSC: expected %v, got %v", expected, got) + } + t.Log("TestNSC:", got) +} + +// VIA returns true if vendor is recognized as VIA +func TestVIA(t *testing.T) { + got := CPU.VIA() + expected := CPU.VendorID == VIA + if got != expected { + t.Fatalf("TestVIA: expected %v, got %v", expected, got) + } + t.Log("TestVIA:", got) +} + +// Test VM function +func TestVM(t *testing.T) { + t.Log("Vendor ID:", CPU.VM()) +} + +// NSC returns true if vendor is recognized as National Semiconductor +func TestCPUInfo_TSX(t *testing.T) { + got := CPU.TSX() + expected := CPU.HLE() && CPU.RTM() + if got != expected { + t.Fatalf("TestNSC: expected %v, got %v", expected, got) + } + t.Log("TestNSC:", got) +} + +// Test RTCounter function +func TestRtCounter(t *testing.T) { + a := CPU.RTCounter() + b := CPU.RTCounter() + t.Log("CPU Counter:", a, b, b-a) +} + +// Prints the value of Ia32TscAux() +func TestIa32TscAux(t *testing.T) { + ecx := CPU.Ia32TscAux() + t.Logf("Ia32TscAux:0x%x\n", ecx) + if ecx != 0 { + chip := (ecx & 0xFFF000) >> 12 + core := ecx & 0xFFF + t.Log("Likely chip, core:", chip, core) + } +} + +func TestThreadsPerCoreNZ(t *testing.T) { + if CPU.ThreadsPerCore == 0 { + t.Fatal("threads per core is zero") + } +} + +// Prints the value of LogicalCPU() +func TestLogicalCPU(t *testing.T) { + t.Log("Currently executing on cpu:", CPU.LogicalCPU()) +} + +func TestMaxFunction(t *testing.T) { + expect := maxFunctionID() + if CPU.maxFunc != expect { + t.Fatal("Max function does not match, expected", expect, "but got", CPU.maxFunc) + } + expect = maxExtendedFunction() + if CPU.maxExFunc != expect { + t.Fatal("Max Extended function does not match, expected", expect, "but got", CPU.maxFunc) + } +} + +// This example will calculate the chip/core number on Linux +// Linux encodes numa id (<<12) and core id (8bit) into TSC_AUX. +func ExampleCPUInfo_Ia32TscAux() { + ecx := CPU.Ia32TscAux() + if ecx == 0 { + fmt.Println("Unknown CPU ID") + return + } + chip := (ecx & 0xFFF000) >> 12 + core := ecx & 0xFFF + fmt.Println("Chip, Core:", chip, core) +} + +/* +func TestPhysical(t *testing.T) { + var test16 = "CPUID 00000000: 0000000d-756e6547-6c65746e-49656e69 \nCPUID 00000001: 000206d7-03200800-1fbee3ff-bfebfbff \nCPUID 00000002: 76035a01-00f0b2ff-00000000-00ca0000 \nCPUID 00000003: 00000000-00000000-00000000-00000000 \nCPUID 00000004: 3c004121-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004122-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004143-01c0003f-000001ff-00000000 \nCPUID 00000004: 3c07c163-04c0003f-00003fff-00000006 \nCPUID 00000005: 00000040-00000040-00000003-00021120 \nCPUID 00000006: 00000075-00000002-00000009-00000000 \nCPUID 00000007: 00000000-00000000-00000000-00000000 \nCPUID 00000008: 00000000-00000000-00000000-00000000 \nCPUID 00000009: 00000001-00000000-00000000-00000000 \nCPUID 0000000a: 07300403-00000000-00000000-00000603 \nCPUID 0000000b: 00000000-00000000-00000003-00000003 \nCPUID 0000000b: 00000005-00000010-00000201-00000003 \nCPUID 0000000c: 00000000-00000000-00000000-00000000 \nCPUID 0000000d: 00000007-00000340-00000340-00000000 \nCPUID 0000000d: 00000001-00000000-00000000-00000000 \nCPUID 0000000d: 00000100-00000240-00000000-00000000 \nCPUID 80000000: 80000008-00000000-00000000-00000000 \nCPUID 80000001: 00000000-00000000-00000001-2c100800 \nCPUID 80000002: 20202020-49202020-6c65746e-20295228 \nCPUID 80000003: 6e6f6558-20295228-20555043-322d3545 \nCPUID 80000004: 20303636-20402030-30322e32-007a4847 \nCPUID 80000005: 00000000-00000000-00000000-00000000 \nCPUID 80000006: 00000000-00000000-01006040-00000000 \nCPUID 80000007: 00000000-00000000-00000000-00000100 \nCPUID 80000008: 0000302e-00000000-00000000-00000000" + restore := mockCPU([]byte(test16)) + Detect() + t.Log("Name:", CPU.BrandName) + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("PhysicalCores:", CPU.PhysicalCores) + t.Log("ThreadsPerCore:", CPU.ThreadsPerCore) + t.Log("LogicalCores:", CPU.LogicalCores) + t.Log("Family", CPU.Family, "Model:", CPU.Model) + t.Log("Features:", CPU.Features) + t.Log("Cacheline bytes:", CPU.CacheLine) + t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes") + t.Log("L2 Cache:", CPU.Cache.L2, "bytes") + t.Log("L3 Cache:", CPU.Cache.L3, "bytes") + if CPU.LogicalCores > 0 && CPU.PhysicalCores > 0 { + if CPU.LogicalCores != CPU.PhysicalCores*CPU.ThreadsPerCore { + t.Fatalf("Core count mismatch, LogicalCores (%d) != PhysicalCores (%d) * CPU.ThreadsPerCore (%d)", + CPU.LogicalCores, CPU.PhysicalCores, CPU.ThreadsPerCore) + } + } + + if CPU.ThreadsPerCore > 1 && !CPU.HTT() { + t.Fatalf("Hyperthreading not detected") + } + if CPU.ThreadsPerCore == 1 && CPU.HTT() { + t.Fatalf("Hyperthreading detected, but only 1 Thread per core") + } + restore() + Detect() + TestCPUID(t) +} +*/ diff --git a/vendor/github.com/klauspost/cpuid/detect_intel.go b/vendor/github.com/klauspost/cpuid/detect_intel.go new file mode 100644 index 0000000..a5f04dd --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/detect_intel.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo amd64,!gccgo + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm +} diff --git a/vendor/github.com/klauspost/cpuid/detect_ref.go b/vendor/github.com/klauspost/cpuid/detect_ref.go new file mode 100644 index 0000000..909c5d9 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/detect_ref.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build !amd64,!386 gccgo + +package cpuid + +func initCPU() { + cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + xgetbv = func(index uint32) (eax, edx uint32) { + return 0, 0 + } + + rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } +} diff --git a/vendor/github.com/klauspost/cpuid/generate.go b/vendor/github.com/klauspost/cpuid/generate.go new file mode 100644 index 0000000..90e7a98 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/generate.go @@ -0,0 +1,4 @@ +package cpuid + +//go:generate go run private-gen.go +//go:generate gofmt -w ./private diff --git a/vendor/github.com/klauspost/cpuid/mockcpu_test.go b/vendor/github.com/klauspost/cpuid/mockcpu_test.go new file mode 100644 index 0000000..f15173f --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/mockcpu_test.go @@ -0,0 +1,209 @@ +package cpuid + +import ( + "archive/zip" + "fmt" + "io/ioutil" + "sort" + "strings" + "testing" +) + +type fakecpuid map[uint32][][]uint32 + +type idfuncs struct { + cpuid func(op uint32) (eax, ebx, ecx, edx uint32) + cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) + xgetbv func(index uint32) (eax, edx uint32) +} + +func (f fakecpuid) String() string { + var out = make([]string, 0, len(f)) + for key, val := range f { + for _, v := range val { + out = append(out, fmt.Sprintf("CPUID %08x: [%08x, %08x, %08x, %08x]", key, v[0], v[1], v[2], v[3])) + } + } + sorter := sort.StringSlice(out) + sort.Sort(&sorter) + return strings.Join(sorter, "\n") +} + +func mockCPU(def []byte) func() { + lines := strings.Split(string(def), "\n") + anyfound := false + fakeID := make(fakecpuid) + for _, line := range lines { + line = strings.Trim(line, "\r\t ") + if !strings.HasPrefix(line, "CPUID") { + continue + } + // Only collect for first cpu + if strings.HasPrefix(line, "CPUID 00000000") { + if anyfound { + break + } + } + if !strings.Contains(line, "-") { + //continue + } + items := strings.Split(line, ":") + if len(items) < 2 { + if len(line) == 51 || len(line) == 50 { + items = []string{line[0:14], line[15:]} + } else { + items = strings.Split(line, "\t") + if len(items) != 2 { + //fmt.Println("not found:", line, "len:", len(line)) + continue + } + } + } + items = items[0:2] + vals := strings.Trim(items[1], "\r\n ") + + var idV uint32 + n, err := fmt.Sscanf(items[0], "CPUID %x", &idV) + if err != nil || n != 1 { + continue + } + existing, ok := fakeID[idV] + if !ok { + existing = make([][]uint32, 0) + } + + values := make([]uint32, 4) + n, err = fmt.Sscanf(vals, "%x-%x-%x-%x", &values[0], &values[1], &values[2], &values[3]) + if n != 4 || err != nil { + n, err = fmt.Sscanf(vals, "%x %x %x %x", &values[0], &values[1], &values[2], &values[3]) + if n != 4 || err != nil { + //fmt.Println("scanned", vals, "got", n, "Err:", err) + continue + } + } + + existing = append(existing, values) + fakeID[idV] = existing + anyfound = true + } + + restorer := func(f idfuncs) func() { + return func() { + cpuid = f.cpuid + cpuidex = f.cpuidex + xgetbv = f.xgetbv + } + }(idfuncs{cpuid: cpuid, cpuidex: cpuidex, xgetbv: xgetbv}) + + cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { + if op == 0x80000000 || op == 0 { + var ok bool + _, ok = fakeID[op] + if !ok { + return 0, 0, 0, 0 + } + } + first, ok := fakeID[op] + if !ok { + if op > maxFunctionID() { + panic(fmt.Sprintf("Base not found: %v, request:%#v\n", fakeID, op)) + } else { + // we have some entries missing + return 0, 0, 0, 0 + } + } + theid := first[0] + return theid[0], theid[1], theid[2], theid[3] + } + cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + if op == 0x80000000 { + var ok bool + _, ok = fakeID[op] + if !ok { + return 0, 0, 0, 0 + } + } + first, ok := fakeID[op] + if !ok { + if op > maxExtendedFunction() { + panic(fmt.Sprintf("Extended not found Info: %v, request:%#v, %#v\n", fakeID, op, op2)) + } else { + // we have some entries missing + return 0, 0, 0, 0 + } + } + if int(op2) >= len(first) { + //fmt.Printf("Extended not found Info: %v, request:%#v, %#v\n", fakeID, op, op2) + return 0, 0, 0, 0 + } + theid := first[op2] + return theid[0], theid[1], theid[2], theid[3] + } + xgetbv = func(index uint32) (eax, edx uint32) { + first, ok := fakeID[1] + if !ok { + panic(fmt.Sprintf("XGETBV not supported %v", fakeID)) + } + second := first[0] + // ECX bit 26 must be set + if (second[2] & 1 << 26) == 0 { + panic(fmt.Sprintf("XGETBV not supported %v", fakeID)) + } + // We don't have any data to return, unfortunately + return 0, 0 + } + return restorer +} + +func TestMocks(t *testing.T) { + zr, err := zip.OpenReader("testdata/cpuid_data.zip") + if err != nil { + t.Skip("No testdata:", err) + } + defer zr.Close() + for _, f := range zr.File { + rc, err := f.Open() + if err != nil { + t.Fatal(err) + } + content, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatal(err) + } + rc.Close() + t.Log("Opening", f.FileInfo().Name()) + restore := mockCPU(content) + Detect() + t.Log("Name:", CPU.BrandName) + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("PhysicalCores:", CPU.PhysicalCores) + t.Log("ThreadsPerCore:", CPU.ThreadsPerCore) + t.Log("LogicalCores:", CPU.LogicalCores) + t.Log("Family", CPU.Family, "Model:", CPU.Model) + t.Log("Features:", CPU.Features) + t.Log("Cacheline bytes:", CPU.CacheLine) + t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes") + t.Log("L2 Cache:", CPU.Cache.L2, "bytes") + t.Log("L3 Cache:", CPU.Cache.L3, "bytes") + if CPU.LogicalCores > 0 && CPU.PhysicalCores > 0 { + if CPU.LogicalCores != CPU.PhysicalCores*CPU.ThreadsPerCore { + t.Fatalf("Core count mismatch, LogicalCores (%d) != PhysicalCores (%d) * CPU.ThreadsPerCore (%d)", + CPU.LogicalCores, CPU.PhysicalCores, CPU.ThreadsPerCore) + } + } + + if CPU.ThreadsPerCore > 1 && !CPU.HTT() { + t.Fatalf("Hyperthreading not detected") + } + if CPU.ThreadsPerCore == 1 && CPU.HTT() { + t.Fatalf("Hyperthreading detected, but only 1 Thread per core") + } + restore() + } + Detect() + +} diff --git a/vendor/github.com/klauspost/cpuid/private-gen.go b/vendor/github.com/klauspost/cpuid/private-gen.go new file mode 100644 index 0000000..437333d --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private-gen.go @@ -0,0 +1,476 @@ +// +build ignore + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +var inFiles = []string{"cpuid.go", "cpuid_test.go"} +var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"} +var fileSet = token.NewFileSet() +var reWrites = []rewrite{ + initRewrite("CPUInfo -> cpuInfo"), + initRewrite("Vendor -> vendor"), + initRewrite("Flags -> flags"), + initRewrite("Detect -> detect"), + initRewrite("CPU -> cpu"), +} +var excludeNames = map[string]bool{"string": true, "join": true, "trim": true, + // cpuid_test.go + "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true, +} + +var excludePrefixes = []string{"test", "benchmark"} + +func main() { + Package := "private" + parserMode := parser.ParseComments + exported := make(map[string]rewrite) + for _, file := range inFiles { + in, err := os.Open(file) + if err != nil { + log.Fatalf("opening input", err) + } + + src, err := ioutil.ReadAll(in) + if err != nil { + log.Fatalf("reading input", err) + } + + astfile, err := parser.ParseFile(fileSet, file, src, parserMode) + if err != nil { + log.Fatalf("parsing input", err) + } + + for _, rw := range reWrites { + astfile = rw(astfile) + } + + // Inspect the AST and print all identifiers and literals. + var startDecl token.Pos + var endDecl token.Pos + ast.Inspect(astfile, func(n ast.Node) bool { + var s string + switch x := n.(type) { + case *ast.Ident: + if x.IsExported() { + t := strings.ToLower(x.Name) + for _, pre := range excludePrefixes { + if strings.HasPrefix(t, pre) { + return true + } + } + if excludeNames[t] != true { + //if x.Pos() > startDecl && x.Pos() < endDecl { + exported[x.Name] = initRewrite(x.Name + " -> " + t) + } + } + + case *ast.GenDecl: + if x.Tok == token.CONST && x.Lparen > 0 { + startDecl = x.Lparen + endDecl = x.Rparen + // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl)) + } + } + if s != "" { + fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s) + } + return true + }) + + for _, rw := range exported { + astfile = rw(astfile) + } + + var buf bytes.Buffer + + printer.Fprint(&buf, fileSet, astfile) + + // Remove package documentation and insert information + s := buf.String() + ind := strings.Index(buf.String(), "\npackage cpuid") + s = s[ind:] + s = "// Generated, DO NOT EDIT,\n" + + "// but copy it to your own project and rename the package.\n" + + "// See more at http://github.com/klauspost/cpuid\n" + + s + + outputName := Package + string(os.PathSeparator) + file + + err = ioutil.WriteFile(outputName, []byte(s), 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } + log.Println("Generated", outputName) + } + + for _, file := range copyFiles { + dst := "" + if strings.HasPrefix(file, "cpuid") { + dst = Package + string(os.PathSeparator) + file + } else { + dst = Package + string(os.PathSeparator) + "cpuid_" + file + } + err := copyFile(file, dst) + if err != nil { + log.Fatalf("copying file: %s", err) + } + log.Println("Copied", dst) + } +} + +// CopyFile copies a file from src to dst. If src and dst files exist, and are +// the same, then return success. Copy the file contents from src to dst. +func copyFile(src, dst string) (err error) { + sfi, err := os.Stat(src) + if err != nil { + return + } + if !sfi.Mode().IsRegular() { + // cannot copy non-regular files (e.g., directories, + // symlinks, devices, etc.) + return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String()) + } + dfi, err := os.Stat(dst) + if err != nil { + if !os.IsNotExist(err) { + return + } + } else { + if !(dfi.Mode().IsRegular()) { + return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String()) + } + if os.SameFile(sfi, dfi) { + return + } + } + err = copyFileContents(src, dst) + return +} + +// copyFileContents copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all it's contents will be replaced by the contents +// of the source file. +func copyFileContents(src, dst string) (err error) { + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + cerr := out.Close() + if err == nil { + err = cerr + } + }() + if _, err = io.Copy(out, in); err != nil { + return + } + err = out.Sync() + return +} + +type rewrite func(*ast.File) *ast.File + +// Mostly copied from gofmt +func initRewrite(rewriteRule string) rewrite { + f := strings.Split(rewriteRule, "->") + if len(f) != 2 { + fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") + os.Exit(2) + } + pattern := parseExpr(f[0], "pattern") + replace := parseExpr(f[1], "replacement") + return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } +} + +// parseExpr parses s as an expression. +// It might make sense to expand this to allow statement patterns, +// but there are problems with preserving formatting and also +// with what a wildcard for a statement looks like. +func parseExpr(s, what string) ast.Expr { + x, err := parser.ParseExpr(s) + if err != nil { + fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) + os.Exit(2) + } + return x +} + +// Keep this function for debugging. +/* +func dump(msg string, val reflect.Value) { + fmt.Printf("%s:\n", msg) + ast.Print(fileSet, val.Interface()) + fmt.Println() +} +*/ + +// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. +func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { + cmap := ast.NewCommentMap(fileSet, p, p.Comments) + m := make(map[string]reflect.Value) + pat := reflect.ValueOf(pattern) + repl := reflect.ValueOf(replace) + + var rewriteVal func(val reflect.Value) reflect.Value + rewriteVal = func(val reflect.Value) reflect.Value { + // don't bother if val is invalid to start with + if !val.IsValid() { + return reflect.Value{} + } + for k := range m { + delete(m, k) + } + val = apply(rewriteVal, val) + if match(m, pat, val) { + val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) + } + return val + } + + r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) + r.Comments = cmap.Filter(r).Comments() // recreate comments list + return r +} + +// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. +func set(x, y reflect.Value) { + // don't bother if x cannot be set or y is invalid + if !x.CanSet() || !y.IsValid() { + return + } + defer func() { + if x := recover(); x != nil { + if s, ok := x.(string); ok && + (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { + // x cannot be set to y - ignore this rewrite + return + } + panic(x) + } + }() + x.Set(y) +} + +// Values/types for special cases. +var ( + objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) + scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) + + identType = reflect.TypeOf((*ast.Ident)(nil)) + objectPtrType = reflect.TypeOf((*ast.Object)(nil)) + positionType = reflect.TypeOf(token.NoPos) + callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) + scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) +) + +// apply replaces each AST field x in val with f(x), returning val. +// To avoid extra conversions, f operates on the reflect.Value form. +func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { + if !val.IsValid() { + return reflect.Value{} + } + + // *ast.Objects introduce cycles and are likely incorrect after + // rewrite; don't follow them but replace with nil instead + if val.Type() == objectPtrType { + return objectPtrNil + } + + // similarly for scopes: they are likely incorrect after a rewrite; + // replace them with nil + if val.Type() == scopePtrType { + return scopePtrNil + } + + switch v := reflect.Indirect(val); v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + e := v.Index(i) + set(e, f(e)) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + e := v.Field(i) + set(e, f(e)) + } + case reflect.Interface: + e := v.Elem() + set(v, f(e)) + } + return val +} + +func isWildcard(s string) bool { + rune, size := utf8.DecodeRuneInString(s) + return size == len(s) && unicode.IsLower(rune) +} + +// match returns true if pattern matches val, +// recording wildcard submatches in m. +// If m == nil, match checks whether pattern == val. +func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { + // Wildcard matches any expression. If it appears multiple + // times in the pattern, it must match the same expression + // each time. + if m != nil && pattern.IsValid() && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) && val.IsValid() { + // wildcards only match valid (non-nil) expressions. + if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { + if old, ok := m[name]; ok { + return match(nil, old, val) + } + m[name] = val + return true + } + } + } + + // Otherwise, pattern and val must match recursively. + if !pattern.IsValid() || !val.IsValid() { + return !pattern.IsValid() && !val.IsValid() + } + if pattern.Type() != val.Type() { + return false + } + + // Special cases. + switch pattern.Type() { + case identType: + // For identifiers, only the names need to match + // (and none of the other *ast.Object information). + // This is a common case, handle it all here instead + // of recursing down any further via reflection. + p := pattern.Interface().(*ast.Ident) + v := val.Interface().(*ast.Ident) + return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name + case objectPtrType, positionType: + // object pointers and token positions always match + return true + case callExprType: + // For calls, the Ellipsis fields (token.Position) must + // match since that is how f(x) and f(x...) are different. + // Check them here but fall through for the remaining fields. + p := pattern.Interface().(*ast.CallExpr) + v := val.Interface().(*ast.CallExpr) + if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { + return false + } + } + + p := reflect.Indirect(pattern) + v := reflect.Indirect(val) + if !p.IsValid() || !v.IsValid() { + return !p.IsValid() && !v.IsValid() + } + + switch p.Kind() { + case reflect.Slice: + if p.Len() != v.Len() { + return false + } + for i := 0; i < p.Len(); i++ { + if !match(m, p.Index(i), v.Index(i)) { + return false + } + } + return true + + case reflect.Struct: + for i := 0; i < p.NumField(); i++ { + if !match(m, p.Field(i), v.Field(i)) { + return false + } + } + return true + + case reflect.Interface: + return match(m, p.Elem(), v.Elem()) + } + + // Handle token integers, etc. + return p.Interface() == v.Interface() +} + +// subst returns a copy of pattern with values from m substituted in place +// of wildcards and pos used as the position of tokens from the pattern. +// if m == nil, subst returns a copy of pattern and doesn't change the line +// number information. +func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { + if !pattern.IsValid() { + return reflect.Value{} + } + + // Wildcard gets replaced with map value. + if m != nil && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) { + if old, ok := m[name]; ok { + return subst(nil, old, reflect.Value{}) + } + } + } + + if pos.IsValid() && pattern.Type() == positionType { + // use new position only if old position was valid in the first place + if old := pattern.Interface().(token.Pos); !old.IsValid() { + return pattern + } + return pos + } + + // Otherwise copy. + switch p := pattern; p.Kind() { + case reflect.Slice: + v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) + for i := 0; i < p.Len(); i++ { + v.Index(i).Set(subst(m, p.Index(i), pos)) + } + return v + + case reflect.Struct: + v := reflect.New(p.Type()).Elem() + for i := 0; i < p.NumField(); i++ { + v.Field(i).Set(subst(m, p.Field(i), pos)) + } + return v + + case reflect.Ptr: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos).Addr()) + } + return v + + case reflect.Interface: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos)) + } + return v + } + + return pattern +} diff --git a/vendor/github.com/klauspost/cpuid/private/README.md b/vendor/github.com/klauspost/cpuid/private/README.md new file mode 100644 index 0000000..57a68f8 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/README.md @@ -0,0 +1,6 @@ +# cpuid private + +This is a specially converted of the cpuid package, so it can be included in +a package without exporting anything. + +Package home: https://github.com/klauspost/cpuid diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid.go b/vendor/github.com/klauspost/cpuid/private/cpuid.go new file mode 100644 index 0000000..2171214 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid.go @@ -0,0 +1,1024 @@ +// Generated, DO NOT EDIT, +// but copy it to your own project and rename the package. +// See more at http://github.com/klauspost/cpuid + +package cpuid + +import "strings" + +// Vendor is a representation of a CPU vendor. +type vendor int + +const ( + other vendor = iota + intel + amd + via + transmeta + nsc + kvm // Kernel-based Virtual Machine + msvm // Microsoft Hyper-V or Windows Virtual PC + vmware + xenhvm +) + +const ( + cmov = 1 << iota // i686 CMOV + nx // NX (No-Execute) bit + amd3dnow // AMD 3DNOW + amd3dnowext // AMD 3DNowExt + mmx // standard MMX + mmxext // SSE integer functions or AMD MMX ext + sse // SSE functions + sse2 // P4 SSE functions + sse3 // Prescott SSE3 functions + ssse3 // Conroe SSSE3 functions + sse4 // Penryn SSE4.1 functions + sse4a // AMD Barcelona microarchitecture SSE4a instructions + sse42 // Nehalem SSE4.2 functions + avx // AVX functions + avx2 // AVX2 functions + fma3 // Intel FMA 3 + fma4 // Bulldozer FMA4 functions + xop // Bulldozer XOP functions + f16c // Half-precision floating-point conversion + bmi1 // Bit Manipulation Instruction Set 1 + bmi2 // Bit Manipulation Instruction Set 2 + tbm // AMD Trailing Bit Manipulation + lzcnt // LZCNT instruction + popcnt // POPCNT instruction + aesni // Advanced Encryption Standard New Instructions + clmul // Carry-less Multiplication + htt // Hyperthreading (enabled) + hle // Hardware Lock Elision + rtm // Restricted Transactional Memory + rdrand // RDRAND instruction is available + rdseed // RDSEED instruction is available + adx // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + sha // Intel SHA Extensions + avx512f // AVX-512 Foundation + avx512dq // AVX-512 Doubleword and Quadword Instructions + avx512ifma // AVX-512 Integer Fused Multiply-Add Instructions + avx512pf // AVX-512 Prefetch Instructions + avx512er // AVX-512 Exponential and Reciprocal Instructions + avx512cd // AVX-512 Conflict Detection Instructions + avx512bw // AVX-512 Byte and Word Instructions + avx512vl // AVX-512 Vector Length Extensions + avx512vbmi // AVX-512 Vector Bit Manipulation Instructions + mpx // Intel MPX (Memory Protection Extensions) + erms // Enhanced REP MOVSB/STOSB + rdtscp // RDTSCP Instruction + cx16 // CMPXCHG16B Instruction + sgx // Software Guard Extensions + + // Performance indicators + sse2slow // SSE2 is supported, but usually not faster + sse3slow // SSE3 is supported, but usually not faster + atom // Atom processor, some SSSE3 instructions are slower +) + +var flagNames = map[flags]string{ + cmov: "CMOV", // i686 CMOV + nx: "NX", // NX (No-Execute) bit + amd3dnow: "AMD3DNOW", // AMD 3DNOW + amd3dnowext: "AMD3DNOWEXT", // AMD 3DNowExt + mmx: "MMX", // Standard MMX + mmxext: "MMXEXT", // SSE integer functions or AMD MMX ext + sse: "SSE", // SSE functions + sse2: "SSE2", // P4 SSE2 functions + sse3: "SSE3", // Prescott SSE3 functions + ssse3: "SSSE3", // Conroe SSSE3 functions + sse4: "SSE4.1", // Penryn SSE4.1 functions + sse4a: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions + sse42: "SSE4.2", // Nehalem SSE4.2 functions + avx: "AVX", // AVX functions + avx2: "AVX2", // AVX functions + fma3: "FMA3", // Intel FMA 3 + fma4: "FMA4", // Bulldozer FMA4 functions + xop: "XOP", // Bulldozer XOP functions + f16c: "F16C", // Half-precision floating-point conversion + bmi1: "BMI1", // Bit Manipulation Instruction Set 1 + bmi2: "BMI2", // Bit Manipulation Instruction Set 2 + tbm: "TBM", // AMD Trailing Bit Manipulation + lzcnt: "LZCNT", // LZCNT instruction + popcnt: "POPCNT", // POPCNT instruction + aesni: "AESNI", // Advanced Encryption Standard New Instructions + clmul: "CLMUL", // Carry-less Multiplication + htt: "HTT", // Hyperthreading (enabled) + hle: "HLE", // Hardware Lock Elision + rtm: "RTM", // Restricted Transactional Memory + rdrand: "RDRAND", // RDRAND instruction is available + rdseed: "RDSEED", // RDSEED instruction is available + adx: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + sha: "SHA", // Intel SHA Extensions + avx512f: "AVX512F", // AVX-512 Foundation + avx512dq: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions + avx512ifma: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions + avx512pf: "AVX512PF", // AVX-512 Prefetch Instructions + avx512er: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions + avx512cd: "AVX512CD", // AVX-512 Conflict Detection Instructions + avx512bw: "AVX512BW", // AVX-512 Byte and Word Instructions + avx512vl: "AVX512VL", // AVX-512 Vector Length Extensions + avx512vbmi: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions + mpx: "MPX", // Intel MPX (Memory Protection Extensions) + erms: "ERMS", // Enhanced REP MOVSB/STOSB + rdtscp: "RDTSCP", // RDTSCP Instruction + cx16: "CX16", // CMPXCHG16B Instruction + sgx: "SGX", // Software Guard Extensions + + // Performance indicators + sse2slow: "SSE2SLOW", // SSE2 supported, but usually not faster + sse3slow: "SSE3SLOW", // SSE3 supported, but usually not faster + atom: "ATOM", // Atom processor, some SSSE3 instructions are slower + +} + +// CPUInfo contains information about the detected system CPU. +type cpuInfo struct { + brandname string // Brand name reported by the CPU + vendorid vendor // Comparable CPU vendor ID + features flags // Features of the CPU + physicalcores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + threadspercore int // Number of threads per physical core. Will be 1 if undetectable. + logicalcores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + family int // CPU family number + model int // CPU model number + cacheline int // Cache line size in bytes. Will be 0 if undetectable. + cache struct { + l1i int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + l1d int // L1 Data Cache (per core or shared). Will be -1 if undetected + l2 int // L2 Cache (per core or shared). Will be -1 if undetected + l3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected + } + sgx sgxsupport + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data, +// this way queries are +var cpu cpuInfo + +func init() { + initCPU() + detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func detect() { + cpu.maxFunc = maxFunctionID() + cpu.maxExFunc = maxExtendedFunction() + cpu.brandname = brandName() + cpu.cacheline = cacheLine() + cpu.family, cpu.model = familyModel() + cpu.features = support() + cpu.sgx = hasSGX(cpu.features&sgx != 0) + cpu.threadspercore = threadsPerCore() + cpu.logicalcores = logicalCores() + cpu.physicalcores = physicalCores() + cpu.vendorid = vendorID() + cpu.cacheSize() +} + +// Generated here: http://play.golang.org/p/BxFH2Gdc0G + +// Cmov indicates support of CMOV instructions +func (c cpuInfo) cmov() bool { + return c.features&cmov != 0 +} + +// Amd3dnow indicates support of AMD 3DNOW! instructions +func (c cpuInfo) amd3dnow() bool { + return c.features&amd3dnow != 0 +} + +// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions +func (c cpuInfo) amd3dnowext() bool { + return c.features&amd3dnowext != 0 +} + +// MMX indicates support of MMX instructions +func (c cpuInfo) mmx() bool { + return c.features&mmx != 0 +} + +// MMXExt indicates support of MMXEXT instructions +// (SSE integer functions or AMD MMX ext) +func (c cpuInfo) mmxext() bool { + return c.features&mmxext != 0 +} + +// SSE indicates support of SSE instructions +func (c cpuInfo) sse() bool { + return c.features&sse != 0 +} + +// SSE2 indicates support of SSE 2 instructions +func (c cpuInfo) sse2() bool { + return c.features&sse2 != 0 +} + +// SSE3 indicates support of SSE 3 instructions +func (c cpuInfo) sse3() bool { + return c.features&sse3 != 0 +} + +// SSSE3 indicates support of SSSE 3 instructions +func (c cpuInfo) ssse3() bool { + return c.features&ssse3 != 0 +} + +// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions +func (c cpuInfo) sse4() bool { + return c.features&sse4 != 0 +} + +// SSE42 indicates support of SSE4.2 instructions +func (c cpuInfo) sse42() bool { + return c.features&sse42 != 0 +} + +// AVX indicates support of AVX instructions +// and operating system support of AVX instructions +func (c cpuInfo) avx() bool { + return c.features&avx != 0 +} + +// AVX2 indicates support of AVX2 instructions +func (c cpuInfo) avx2() bool { + return c.features&avx2 != 0 +} + +// FMA3 indicates support of FMA3 instructions +func (c cpuInfo) fma3() bool { + return c.features&fma3 != 0 +} + +// FMA4 indicates support of FMA4 instructions +func (c cpuInfo) fma4() bool { + return c.features&fma4 != 0 +} + +// XOP indicates support of XOP instructions +func (c cpuInfo) xop() bool { + return c.features&xop != 0 +} + +// F16C indicates support of F16C instructions +func (c cpuInfo) f16c() bool { + return c.features&f16c != 0 +} + +// BMI1 indicates support of BMI1 instructions +func (c cpuInfo) bmi1() bool { + return c.features&bmi1 != 0 +} + +// BMI2 indicates support of BMI2 instructions +func (c cpuInfo) bmi2() bool { + return c.features&bmi2 != 0 +} + +// TBM indicates support of TBM instructions +// (AMD Trailing Bit Manipulation) +func (c cpuInfo) tbm() bool { + return c.features&tbm != 0 +} + +// Lzcnt indicates support of LZCNT instruction +func (c cpuInfo) lzcnt() bool { + return c.features&lzcnt != 0 +} + +// Popcnt indicates support of POPCNT instruction +func (c cpuInfo) popcnt() bool { + return c.features&popcnt != 0 +} + +// HTT indicates the processor has Hyperthreading enabled +func (c cpuInfo) htt() bool { + return c.features&htt != 0 +} + +// SSE2Slow indicates that SSE2 may be slow on this processor +func (c cpuInfo) sse2slow() bool { + return c.features&sse2slow != 0 +} + +// SSE3Slow indicates that SSE3 may be slow on this processor +func (c cpuInfo) sse3slow() bool { + return c.features&sse3slow != 0 +} + +// AesNi indicates support of AES-NI instructions +// (Advanced Encryption Standard New Instructions) +func (c cpuInfo) aesni() bool { + return c.features&aesni != 0 +} + +// Clmul indicates support of CLMUL instructions +// (Carry-less Multiplication) +func (c cpuInfo) clmul() bool { + return c.features&clmul != 0 +} + +// NX indicates support of NX (No-Execute) bit +func (c cpuInfo) nx() bool { + return c.features&nx != 0 +} + +// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions +func (c cpuInfo) sse4a() bool { + return c.features&sse4a != 0 +} + +// HLE indicates support of Hardware Lock Elision +func (c cpuInfo) hle() bool { + return c.features&hle != 0 +} + +// RTM indicates support of Restricted Transactional Memory +func (c cpuInfo) rtm() bool { + return c.features&rtm != 0 +} + +// Rdrand indicates support of RDRAND instruction is available +func (c cpuInfo) rdrand() bool { + return c.features&rdrand != 0 +} + +// Rdseed indicates support of RDSEED instruction is available +func (c cpuInfo) rdseed() bool { + return c.features&rdseed != 0 +} + +// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) +func (c cpuInfo) adx() bool { + return c.features&adx != 0 +} + +// SHA indicates support of Intel SHA Extensions +func (c cpuInfo) sha() bool { + return c.features&sha != 0 +} + +// AVX512F indicates support of AVX-512 Foundation +func (c cpuInfo) avx512f() bool { + return c.features&avx512f != 0 +} + +// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions +func (c cpuInfo) avx512dq() bool { + return c.features&avx512dq != 0 +} + +// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions +func (c cpuInfo) avx512ifma() bool { + return c.features&avx512ifma != 0 +} + +// AVX512PF indicates support of AVX-512 Prefetch Instructions +func (c cpuInfo) avx512pf() bool { + return c.features&avx512pf != 0 +} + +// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions +func (c cpuInfo) avx512er() bool { + return c.features&avx512er != 0 +} + +// AVX512CD indicates support of AVX-512 Conflict Detection Instructions +func (c cpuInfo) avx512cd() bool { + return c.features&avx512cd != 0 +} + +// AVX512BW indicates support of AVX-512 Byte and Word Instructions +func (c cpuInfo) avx512bw() bool { + return c.features&avx512bw != 0 +} + +// AVX512VL indicates support of AVX-512 Vector Length Extensions +func (c cpuInfo) avx512vl() bool { + return c.features&avx512vl != 0 +} + +// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions +func (c cpuInfo) avx512vbmi() bool { + return c.features&avx512vbmi != 0 +} + +// MPX indicates support of Intel MPX (Memory Protection Extensions) +func (c cpuInfo) mpx() bool { + return c.features&mpx != 0 +} + +// ERMS indicates support of Enhanced REP MOVSB/STOSB +func (c cpuInfo) erms() bool { + return c.features&erms != 0 +} + +// RDTSCP Instruction is available. +func (c cpuInfo) rdtscp() bool { + return c.features&rdtscp != 0 +} + +// CX16 indicates if CMPXCHG16B instruction is available. +func (c cpuInfo) cx16() bool { + return c.features&cx16 != 0 +} + +// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection. +// So TSX simply checks that. +func (c cpuInfo) tsx() bool { + return c.features&(mpx|rtm) == mpx|rtm +} + +// Atom indicates an Atom processor +func (c cpuInfo) atom() bool { + return c.features&atom != 0 +} + +// Intel returns true if vendor is recognized as Intel +func (c cpuInfo) intel() bool { + return c.vendorid == intel +} + +// AMD returns true if vendor is recognized as AMD +func (c cpuInfo) amd() bool { + return c.vendorid == amd +} + +// Transmeta returns true if vendor is recognized as Transmeta +func (c cpuInfo) transmeta() bool { + return c.vendorid == transmeta +} + +// NSC returns true if vendor is recognized as National Semiconductor +func (c cpuInfo) nsc() bool { + return c.vendorid == nsc +} + +// VIA returns true if vendor is recognized as VIA +func (c cpuInfo) via() bool { + return c.vendorid == via +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c cpuInfo) rtcounter() uint64 { + if !c.rdtscp() { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c cpuInfo) ia32tscaux() uint32 { + if !c.rdtscp() { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c cpuInfo) logicalcpu() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. This is only a hint, and will very likely +// have many false negatives. +func (c cpuInfo) vm() bool { + switch c.vendorid { + case msvm, kvm, vmware, xenhvm: + return true + } + return false +} + +// Flags contains detected cpu features and caracteristics +type flags uint64 + +// String returns a string representation of the detected +// CPU features. +func (f flags) String() string { + return strings.Join(f.strings(), ",") +} + +// Strings returns and array of the detected features. +func (f flags) strings() []string { + s := support() + r := make([]string, 0, 20) + for i := uint(0); i < 64; i++ { + key := flags(1 << i) + val := flagNames[key] + if s&key != 0 { + r = append(r, val) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + if mfi < 0x4 || vendorID() != intel { + return 1 + } + + if mfi < 0xb { + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + switch vendorID() { + case intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case amd: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (int, int) { + if maxFunctionID() < 0x1 { + return 0, 0 + } + eax, _, _, _ := cpuid(1) + family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) + model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) + return int(family), int(model) +} + +func physicalCores() int { + switch vendorID() { + case intel: + return logicalCores() / threadsPerCore() + case amd: + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + return int(c&0xff) + 1 + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]vendor{ + "AMDisbetter!": amd, + "AuthenticAMD": amd, + "CentaurHauls": via, + "GenuineIntel": intel, + "TransmetaCPU": transmeta, + "GenuineTMx86": transmeta, + "Geode by NSC": nsc, + "VIA VIA VIA ": via, + "KVMKVMKVMKVM": kvm, + "Microsoft Hv": msvm, + "VMwareVMware": vmware, + "XenVMMXenVMM": xenhvm, +} + +func vendorID() vendor { + _, b, c, d := cpuid(0) + v := valAsString(b, d, c) + vend, ok := vendorMapping[string(v)] + if !ok { + return other + } + return vend +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *cpuInfo) cacheSize() { + c.cache.l1d = -1 + c.cache.l1i = -1 + c.cache.l2 = -1 + c.cache.l3 = -1 + vendor := vendorID() + switch vendor { + case intel: + if maxFunctionID() < 4 { + return + } + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.cache.l1d = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.cache.l1i = size + } else { + if c.cache.l1d < 0 { + c.cache.l1i = size + } + if c.cache.l1i < 0 { + c.cache.l1i = size + } + } + case 2: + c.cache.l2 = size + case 3: + c.cache.l3 = size + } + } + case amd: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.cache.l1d = int(((ecx >> 24) & 0xFF) * 1024) + c.cache.l1i = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.cache.l2 = int(((ecx >> 16) & 0xFFFF) * 1024) + } + + return +} + +type sgxsupport struct { + available bool + sgx1supported bool + sgx2supported bool + maxenclavesizenot64 int64 + maxenclavesize64 int64 +} + +func hasSGX(available bool) (rval sgxsupport) { + rval.available = available + + if !available { + return + } + + a, _, _, d := cpuidex(0x12, 0) + rval.sgx1supported = a&0x01 != 0 + rval.sgx2supported = a&0x02 != 0 + rval.maxenclavesizenot64 = 1 << (d & 0xFF) // pow 2 + rval.maxenclavesize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + + return +} + +func support() flags { + mfi := maxFunctionID() + vend := vendorID() + if mfi < 0x1 { + return 0 + } + rval := uint64(0) + _, _, c, d := cpuid(1) + if (d & (1 << 15)) != 0 { + rval |= cmov + } + if (d & (1 << 23)) != 0 { + rval |= mmx + } + if (d & (1 << 25)) != 0 { + rval |= mmxext + } + if (d & (1 << 25)) != 0 { + rval |= sse + } + if (d & (1 << 26)) != 0 { + rval |= sse2 + } + if (c & 1) != 0 { + rval |= sse3 + } + if (c & 0x00000200) != 0 { + rval |= ssse3 + } + if (c & 0x00080000) != 0 { + rval |= sse4 + } + if (c & 0x00100000) != 0 { + rval |= sse42 + } + if (c & (1 << 25)) != 0 { + rval |= aesni + } + if (c & (1 << 1)) != 0 { + rval |= clmul + } + if c&(1<<23) != 0 { + rval |= popcnt + } + if c&(1<<30) != 0 { + rval |= rdrand + } + if c&(1<<29) != 0 { + rval |= f16c + } + if c&(1<<13) != 0 { + rval |= cx16 + } + if vend == intel && (d&(1<<28)) != 0 && mfi >= 4 { + if threadsPerCore() > 1 { + rval |= htt + } + } + + // Check XGETBV, OXSAVE and AVX bits + if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + rval |= avx + if (c & 0x00001000) != 0 { + rval |= fma3 + } + } + } + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, _ := cpuidex(7, 0) + if (rval&avx) != 0 && (ebx&0x00000020) != 0 { + rval |= avx2 + } + if (ebx & 0x00000008) != 0 { + rval |= bmi1 + if (ebx & 0x00000100) != 0 { + rval |= bmi2 + } + } + if ebx&(1<<2) != 0 { + rval |= sgx + } + if ebx&(1<<4) != 0 { + rval |= hle + } + if ebx&(1<<9) != 0 { + rval |= erms + } + if ebx&(1<<11) != 0 { + rval |= rtm + } + if ebx&(1<<14) != 0 { + rval |= mpx + } + if ebx&(1<<18) != 0 { + rval |= rdseed + } + if ebx&(1<<19) != 0 { + rval |= adx + } + if ebx&(1<<29) != 0 { + rval |= sha + } + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { + if ebx&(1<<16) != 0 { + rval |= avx512f + } + if ebx&(1<<17) != 0 { + rval |= avx512dq + } + if ebx&(1<<21) != 0 { + rval |= avx512ifma + } + if ebx&(1<<26) != 0 { + rval |= avx512pf + } + if ebx&(1<<27) != 0 { + rval |= avx512er + } + if ebx&(1<<28) != 0 { + rval |= avx512cd + } + if ebx&(1<<30) != 0 { + rval |= avx512bw + } + if ebx&(1<<31) != 0 { + rval |= avx512vl + } + // ecx + if ecx&(1<<1) != 0 { + rval |= avx512vbmi + } + } + } + } + + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + rval |= lzcnt + rval |= popcnt + } + if (d & (1 << 31)) != 0 { + rval |= amd3dnow + } + if (d & (1 << 30)) != 0 { + rval |= amd3dnowext + } + if (d & (1 << 23)) != 0 { + rval |= mmx + } + if (d & (1 << 22)) != 0 { + rval |= mmxext + } + if (c & (1 << 6)) != 0 { + rval |= sse4a + } + if d&(1<<20) != 0 { + rval |= nx + } + if d&(1<<27) != 0 { + rval |= rdtscp + } + + /* Allow for selectively disabling SSE2 functions on AMD processors + with SSE2 support but not SSE4a. This includes Athlon64, some + Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster + than SSE2 often enough to utilize this special-case flag. + AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case + so that SSE2 is used unless explicitly disabled by checking + AV_CPU_FLAG_SSE2SLOW. */ + if vendorID() != intel && + rval&sse2 != 0 && (c&0x00000040) == 0 { + rval |= sse2slow + } + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if (rval & avx) != 0 { + if (c & 0x00000800) != 0 { + rval |= xop + } + if (c & 0x00010000) != 0 { + rval |= fma4 + } + } + + if vendorID() == intel { + family, model := familyModel() + if family == 6 && (model == 9 || model == 13 || model == 14) { + /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and + * 6/14 (core1 "yonah") theoretically support sse2, but it's + * usually slower than mmx. */ + if (rval & sse2) != 0 { + rval |= sse2slow + } + if (rval & sse3) != 0 { + rval |= sse3slow + } + } + /* The Atom processor has SSSE3 support, which is useful in many cases, + * but sometimes the SSSE3 version is slower than the SSE2 equivalent + * on the Atom, but is generally faster on other processors supporting + * SSSE3. This flag allows for selectively disabling certain SSSE3 + * functions on the Atom. */ + if family == 6 && model == 28 { + rval |= atom + } + } + } + return flags(rval) +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid_386.s b/vendor/github.com/klauspost/cpuid/private/cpuid_386.s new file mode 100644 index 0000000..4d73171 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid_386.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/private/cpuid_amd64.s new file mode 100644 index 0000000..3c1d60e --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid_amd64.s @@ -0,0 +1,42 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +//+build amd64,!gccgo + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid_detect_intel.go b/vendor/github.com/klauspost/cpuid/private/cpuid_detect_intel.go new file mode 100644 index 0000000..a5f04dd --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid_detect_intel.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386,!gccgo amd64,!gccgo + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm +} diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid_detect_ref.go b/vendor/github.com/klauspost/cpuid/private/cpuid_detect_ref.go new file mode 100644 index 0000000..909c5d9 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid_detect_ref.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build !amd64,!386 gccgo + +package cpuid + +func initCPU() { + cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + xgetbv = func(index uint32) (eax, edx uint32) { + return 0, 0 + } + + rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } +} diff --git a/vendor/github.com/klauspost/cpuid/private/cpuid_test.go b/vendor/github.com/klauspost/cpuid/private/cpuid_test.go new file mode 100644 index 0000000..31b7f3f --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/private/cpuid_test.go @@ -0,0 +1,739 @@ +// Generated, DO NOT EDIT, +// but copy it to your own project and rename the package. +// See more at http://github.com/klauspost/cpuid + +package cpuid + +import ( + "fmt" + "testing" +) + +// There is no real way to test a CPU identifier, since results will +// obviously differ on each machine. +func TestCPUID(t *testing.T) { + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("Name:", cpu.brandname) + t.Log("PhysicalCores:", cpu.physicalcores) + t.Log("ThreadsPerCore:", cpu.threadspercore) + t.Log("LogicalCores:", cpu.logicalcores) + t.Log("Family", cpu.family, "Model:", cpu.model) + t.Log("Features:", cpu.features) + t.Log("Cacheline bytes:", cpu.cacheline) + t.Log("L1 Instruction Cache:", cpu.cache.l1i, "bytes") + t.Log("L1 Data Cache:", cpu.cache.l1d, "bytes") + t.Log("L2 Cache:", cpu.cache.l2, "bytes") + t.Log("L3 Cache:", cpu.cache.l3, "bytes") + + if cpu.sse2() { + t.Log("We have SSE2") + } +} + +func TestDumpCPUID(t *testing.T) { + n := int(maxFunctionID()) + for i := 0; i <= n; i++ { + a, b, c, d := cpuidex(uint32(i), 0) + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d) + ex := uint32(1) + for { + a2, b2, c2, d2 := cpuidex(uint32(i), ex) + if a2 == a && b2 == b && d2 == d || ex > 50 || a2 == 0 { + break + } + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a2, b2, c2, d2) + a, b, c, d = a2, b2, c2, d2 + ex++ + } + } + n2 := maxExtendedFunction() + for i := uint32(0x80000000); i <= n2; i++ { + a, b, c, d := cpuid(i) + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d) + } +} + +func example() { + // Print basic CPU information: + fmt.Println("Name:", cpu.brandname) + fmt.Println("PhysicalCores:", cpu.physicalcores) + fmt.Println("ThreadsPerCore:", cpu.threadspercore) + fmt.Println("LogicalCores:", cpu.logicalcores) + fmt.Println("Family", cpu.family, "Model:", cpu.model) + fmt.Println("Features:", cpu.features) + fmt.Println("Cacheline bytes:", cpu.cacheline) + + // Test if we have a specific feature: + if cpu.sse() { + fmt.Println("We have Streaming SIMD Extensions") + } +} + +func TestBrandNameZero(t *testing.T) { + if len(cpu.brandname) > 0 { + // Cut out last byte + last := []byte(cpu.brandname[len(cpu.brandname)-1:]) + if last[0] == 0 { + t.Fatal("last byte was zero") + } else if last[0] == 32 { + t.Fatal("whitespace wasn't trimmed") + } + } +} + +// Generated here: http://play.golang.org/p/mko-0tFt0Q + +// TestCmov tests Cmov() function +func TestCmov(t *testing.T) { + got := cpu.cmov() + expected := cpu.features&cmov == cmov + if got != expected { + t.Fatalf("Cmov: expected %v, got %v", expected, got) + } + t.Log("CMOV Support:", got) +} + +// TestAmd3dnow tests Amd3dnow() function +func TestAmd3dnow(t *testing.T) { + got := cpu.amd3dnow() + expected := cpu.features&amd3dnow == amd3dnow + if got != expected { + t.Fatalf("Amd3dnow: expected %v, got %v", expected, got) + } + t.Log("AMD3DNOW Support:", got) +} + +// TestAmd3dnowExt tests Amd3dnowExt() function +func TestAmd3dnowExt(t *testing.T) { + got := cpu.amd3dnowext() + expected := cpu.features&amd3dnowext == amd3dnowext + if got != expected { + t.Fatalf("Amd3dnowExt: expected %v, got %v", expected, got) + } + t.Log("AMD3DNOWEXT Support:", got) +} + +// TestMMX tests MMX() function +func TestMMX(t *testing.T) { + got := cpu.mmx() + expected := cpu.features&mmx == mmx + if got != expected { + t.Fatalf("MMX: expected %v, got %v", expected, got) + } + t.Log("MMX Support:", got) +} + +// TestMMXext tests MMXext() function +func TestMMXext(t *testing.T) { + got := cpu.mmxext() + expected := cpu.features&mmxext == mmxext + if got != expected { + t.Fatalf("MMXExt: expected %v, got %v", expected, got) + } + t.Log("MMXEXT Support:", got) +} + +// TestSSE tests SSE() function +func TestSSE(t *testing.T) { + got := cpu.sse() + expected := cpu.features&sse == sse + if got != expected { + t.Fatalf("SSE: expected %v, got %v", expected, got) + } + t.Log("SSE Support:", got) +} + +// TestSSE2 tests SSE2() function +func TestSSE2(t *testing.T) { + got := cpu.sse2() + expected := cpu.features&sse2 == sse2 + if got != expected { + t.Fatalf("SSE2: expected %v, got %v", expected, got) + } + t.Log("SSE2 Support:", got) +} + +// TestSSE3 tests SSE3() function +func TestSSE3(t *testing.T) { + got := cpu.sse3() + expected := cpu.features&sse3 == sse3 + if got != expected { + t.Fatalf("SSE3: expected %v, got %v", expected, got) + } + t.Log("SSE3 Support:", got) +} + +// TestSSSE3 tests SSSE3() function +func TestSSSE3(t *testing.T) { + got := cpu.ssse3() + expected := cpu.features&ssse3 == ssse3 + if got != expected { + t.Fatalf("SSSE3: expected %v, got %v", expected, got) + } + t.Log("SSSE3 Support:", got) +} + +// TestSSE4 tests SSE4() function +func TestSSE4(t *testing.T) { + got := cpu.sse4() + expected := cpu.features&sse4 == sse4 + if got != expected { + t.Fatalf("SSE4: expected %v, got %v", expected, got) + } + t.Log("SSE4 Support:", got) +} + +// TestSSE42 tests SSE42() function +func TestSSE42(t *testing.T) { + got := cpu.sse42() + expected := cpu.features&sse42 == sse42 + if got != expected { + t.Fatalf("SSE42: expected %v, got %v", expected, got) + } + t.Log("SSE42 Support:", got) +} + +// TestAVX tests AVX() function +func TestAVX(t *testing.T) { + got := cpu.avx() + expected := cpu.features&avx == avx + if got != expected { + t.Fatalf("AVX: expected %v, got %v", expected, got) + } + t.Log("AVX Support:", got) +} + +// TestAVX2 tests AVX2() function +func TestAVX2(t *testing.T) { + got := cpu.avx2() + expected := cpu.features&avx2 == avx2 + if got != expected { + t.Fatalf("AVX2: expected %v, got %v", expected, got) + } + t.Log("AVX2 Support:", got) +} + +// TestFMA3 tests FMA3() function +func TestFMA3(t *testing.T) { + got := cpu.fma3() + expected := cpu.features&fma3 == fma3 + if got != expected { + t.Fatalf("FMA3: expected %v, got %v", expected, got) + } + t.Log("FMA3 Support:", got) +} + +// TestFMA4 tests FMA4() function +func TestFMA4(t *testing.T) { + got := cpu.fma4() + expected := cpu.features&fma4 == fma4 + if got != expected { + t.Fatalf("FMA4: expected %v, got %v", expected, got) + } + t.Log("FMA4 Support:", got) +} + +// TestXOP tests XOP() function +func TestXOP(t *testing.T) { + got := cpu.xop() + expected := cpu.features&xop == xop + if got != expected { + t.Fatalf("XOP: expected %v, got %v", expected, got) + } + t.Log("XOP Support:", got) +} + +// TestF16C tests F16C() function +func TestF16C(t *testing.T) { + got := cpu.f16c() + expected := cpu.features&f16c == f16c + if got != expected { + t.Fatalf("F16C: expected %v, got %v", expected, got) + } + t.Log("F16C Support:", got) +} + +// TestCX16 tests CX16() function +func TestCX16(t *testing.T) { + got := cpu.cx16() + expected := cpu.features&cx16 == cx16 + if got != expected { + t.Fatalf("CX16: expected %v, got %v", expected, got) + } + t.Log("CX16 Support:", got) +} + +// TestSGX tests SGX() function +func TestSGX(t *testing.T) { + got := cpu.sgx.available + expected := cpu.features&sgx == sgx + if got != expected { + t.Fatalf("SGX: expected %v, got %v", expected, got) + } + t.Log("SGX Support:", got) +} + +// TestBMI1 tests BMI1() function +func TestBMI1(t *testing.T) { + got := cpu.bmi1() + expected := cpu.features&bmi1 == bmi1 + if got != expected { + t.Fatalf("BMI1: expected %v, got %v", expected, got) + } + t.Log("BMI1 Support:", got) +} + +// TestBMI2 tests BMI2() function +func TestBMI2(t *testing.T) { + got := cpu.bmi2() + expected := cpu.features&bmi2 == bmi2 + if got != expected { + t.Fatalf("BMI2: expected %v, got %v", expected, got) + } + t.Log("BMI2 Support:", got) +} + +// TestTBM tests TBM() function +func TestTBM(t *testing.T) { + got := cpu.tbm() + expected := cpu.features&tbm == tbm + if got != expected { + t.Fatalf("TBM: expected %v, got %v", expected, got) + } + t.Log("TBM Support:", got) +} + +// TestLzcnt tests Lzcnt() function +func TestLzcnt(t *testing.T) { + got := cpu.lzcnt() + expected := cpu.features&lzcnt == lzcnt + if got != expected { + t.Fatalf("Lzcnt: expected %v, got %v", expected, got) + } + t.Log("LZCNT Support:", got) +} + +// TestLzcnt tests Lzcnt() function +func TestPopcnt(t *testing.T) { + got := cpu.popcnt() + expected := cpu.features&popcnt == popcnt + if got != expected { + t.Fatalf("Popcnt: expected %v, got %v", expected, got) + } + t.Log("POPCNT Support:", got) +} + +// TestAesNi tests AesNi() function +func TestAesNi(t *testing.T) { + got := cpu.aesni() + expected := cpu.features&aesni == aesni + if got != expected { + t.Fatalf("AesNi: expected %v, got %v", expected, got) + } + t.Log("AESNI Support:", got) +} + +// TestHTT tests HTT() function +func TestHTT(t *testing.T) { + got := cpu.htt() + expected := cpu.features&htt == htt + if got != expected { + t.Fatalf("HTT: expected %v, got %v", expected, got) + } + t.Log("HTT Support:", got) +} + +// TestClmul tests Clmul() function +func TestClmul(t *testing.T) { + got := cpu.clmul() + expected := cpu.features&clmul == clmul + if got != expected { + t.Fatalf("Clmul: expected %v, got %v", expected, got) + } + t.Log("CLMUL Support:", got) +} + +// TestSSE2Slow tests SSE2Slow() function +func TestSSE2Slow(t *testing.T) { + got := cpu.sse2slow() + expected := cpu.features&sse2slow == sse2slow + if got != expected { + t.Fatalf("SSE2Slow: expected %v, got %v", expected, got) + } + t.Log("SSE2SLOW Support:", got) +} + +// TestSSE3Slow tests SSE3slow() function +func TestSSE3Slow(t *testing.T) { + got := cpu.sse3slow() + expected := cpu.features&sse3slow == sse3slow + if got != expected { + t.Fatalf("SSE3slow: expected %v, got %v", expected, got) + } + t.Log("SSE3SLOW Support:", got) +} + +// TestAtom tests Atom() function +func TestAtom(t *testing.T) { + got := cpu.atom() + expected := cpu.features&atom == atom + if got != expected { + t.Fatalf("Atom: expected %v, got %v", expected, got) + } + t.Log("ATOM Support:", got) +} + +// TestNX tests NX() function (NX (No-Execute) bit) +func TestNX(t *testing.T) { + got := cpu.nx() + expected := cpu.features&nx == nx + if got != expected { + t.Fatalf("NX: expected %v, got %v", expected, got) + } + t.Log("NX Support:", got) +} + +// TestSSE4A tests SSE4A() function (AMD Barcelona microarchitecture SSE4a instructions) +func TestSSE4A(t *testing.T) { + got := cpu.sse4a() + expected := cpu.features&sse4a == sse4a + if got != expected { + t.Fatalf("SSE4A: expected %v, got %v", expected, got) + } + t.Log("SSE4A Support:", got) +} + +// TestHLE tests HLE() function (Hardware Lock Elision) +func TestHLE(t *testing.T) { + got := cpu.hle() + expected := cpu.features&hle == hle + if got != expected { + t.Fatalf("HLE: expected %v, got %v", expected, got) + } + t.Log("HLE Support:", got) +} + +// TestRTM tests RTM() function (Restricted Transactional Memory) +func TestRTM(t *testing.T) { + got := cpu.rtm() + expected := cpu.features&rtm == rtm + if got != expected { + t.Fatalf("RTM: expected %v, got %v", expected, got) + } + t.Log("RTM Support:", got) +} + +// TestRdrand tests RDRAND() function (RDRAND instruction is available) +func TestRdrand(t *testing.T) { + got := cpu.rdrand() + expected := cpu.features&rdrand == rdrand + if got != expected { + t.Fatalf("Rdrand: expected %v, got %v", expected, got) + } + t.Log("Rdrand Support:", got) +} + +// TestRdseed tests RDSEED() function (RDSEED instruction is available) +func TestRdseed(t *testing.T) { + got := cpu.rdseed() + expected := cpu.features&rdseed == rdseed + if got != expected { + t.Fatalf("Rdseed: expected %v, got %v", expected, got) + } + t.Log("Rdseed Support:", got) +} + +// TestADX tests ADX() function (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) +func TestADX(t *testing.T) { + got := cpu.adx() + expected := cpu.features&adx == adx + if got != expected { + t.Fatalf("ADX: expected %v, got %v", expected, got) + } + t.Log("ADX Support:", got) +} + +// TestSHA tests SHA() function (Intel SHA Extensions) +func TestSHA(t *testing.T) { + got := cpu.sha() + expected := cpu.features&sha == sha + if got != expected { + t.Fatalf("SHA: expected %v, got %v", expected, got) + } + t.Log("SHA Support:", got) +} + +// TestAVX512F tests AVX512F() function (AVX-512 Foundation) +func TestAVX512F(t *testing.T) { + got := cpu.avx512f() + expected := cpu.features&avx512f == avx512f + if got != expected { + t.Fatalf("AVX512F: expected %v, got %v", expected, got) + } + t.Log("AVX512F Support:", got) +} + +// TestAVX512DQ tests AVX512DQ() function (AVX-512 Doubleword and Quadword Instructions) +func TestAVX512DQ(t *testing.T) { + got := cpu.avx512dq() + expected := cpu.features&avx512dq == avx512dq + if got != expected { + t.Fatalf("AVX512DQ: expected %v, got %v", expected, got) + } + t.Log("AVX512DQ Support:", got) +} + +// TestAVX512IFMA tests AVX512IFMA() function (AVX-512 Integer Fused Multiply-Add Instructions) +func TestAVX512IFMA(t *testing.T) { + got := cpu.avx512ifma() + expected := cpu.features&avx512ifma == avx512ifma + if got != expected { + t.Fatalf("AVX512IFMA: expected %v, got %v", expected, got) + } + t.Log("AVX512IFMA Support:", got) +} + +// TestAVX512PF tests AVX512PF() function (AVX-512 Prefetch Instructions) +func TestAVX512PF(t *testing.T) { + got := cpu.avx512pf() + expected := cpu.features&avx512pf == avx512pf + if got != expected { + t.Fatalf("AVX512PF: expected %v, got %v", expected, got) + } + t.Log("AVX512PF Support:", got) +} + +// TestAVX512ER tests AVX512ER() function (AVX-512 Exponential and Reciprocal Instructions) +func TestAVX512ER(t *testing.T) { + got := cpu.avx512er() + expected := cpu.features&avx512er == avx512er + if got != expected { + t.Fatalf("AVX512ER: expected %v, got %v", expected, got) + } + t.Log("AVX512ER Support:", got) +} + +// TestAVX512CD tests AVX512CD() function (AVX-512 Conflict Detection Instructions) +func TestAVX512CD(t *testing.T) { + got := cpu.avx512cd() + expected := cpu.features&avx512cd == avx512cd + if got != expected { + t.Fatalf("AVX512CD: expected %v, got %v", expected, got) + } + t.Log("AVX512CD Support:", got) +} + +// TestAVX512BW tests AVX512BW() function (AVX-512 Byte and Word Instructions) +func TestAVX512BW(t *testing.T) { + got := cpu.avx512bw() + expected := cpu.features&avx512bw == avx512bw + if got != expected { + t.Fatalf("AVX512BW: expected %v, got %v", expected, got) + } + t.Log("AVX512BW Support:", got) +} + +// TestAVX512VL tests AVX512VL() function (AVX-512 Vector Length Extensions) +func TestAVX512VL(t *testing.T) { + got := cpu.avx512vl() + expected := cpu.features&avx512vl == avx512vl + if got != expected { + t.Fatalf("AVX512VL: expected %v, got %v", expected, got) + } + t.Log("AVX512VL Support:", got) +} + +// TestAVX512VL tests AVX512VBMI() function (AVX-512 Vector Bit Manipulation Instructions) +func TestAVX512VBMI(t *testing.T) { + got := cpu.avx512vbmi() + expected := cpu.features&avx512vbmi == avx512vbmi + if got != expected { + t.Fatalf("AVX512VBMI: expected %v, got %v", expected, got) + } + t.Log("AVX512VBMI Support:", got) +} + +// TestMPX tests MPX() function (Intel MPX (Memory Protection Extensions)) +func TestMPX(t *testing.T) { + got := cpu.mpx() + expected := cpu.features&mpx == mpx + if got != expected { + t.Fatalf("MPX: expected %v, got %v", expected, got) + } + t.Log("MPX Support:", got) +} + +// TestERMS tests ERMS() function (Enhanced REP MOVSB/STOSB) +func TestERMS(t *testing.T) { + got := cpu.erms() + expected := cpu.features&erms == erms + if got != expected { + t.Fatalf("ERMS: expected %v, got %v", expected, got) + } + t.Log("ERMS Support:", got) +} + +// TestVendor writes the detected vendor. Will be 0 if unknown +func TestVendor(t *testing.T) { + t.Log("Vendor ID:", cpu.vendorid) +} + +// Intel returns true if vendor is recognized as Intel +func TestIntel(t *testing.T) { + got := cpu.intel() + expected := cpu.vendorid == intel + if got != expected { + t.Fatalf("TestIntel: expected %v, got %v", expected, got) + } + t.Log("TestIntel:", got) +} + +// AMD returns true if vendor is recognized as AMD +func TestAMD(t *testing.T) { + got := cpu.amd() + expected := cpu.vendorid == amd + if got != expected { + t.Fatalf("TestAMD: expected %v, got %v", expected, got) + } + t.Log("TestAMD:", got) +} + +// Transmeta returns true if vendor is recognized as Transmeta +func TestTransmeta(t *testing.T) { + got := cpu.transmeta() + expected := cpu.vendorid == transmeta + if got != expected { + t.Fatalf("TestTransmeta: expected %v, got %v", expected, got) + } + t.Log("TestTransmeta:", got) +} + +// NSC returns true if vendor is recognized as National Semiconductor +func TestNSC(t *testing.T) { + got := cpu.nsc() + expected := cpu.vendorid == nsc + if got != expected { + t.Fatalf("TestNSC: expected %v, got %v", expected, got) + } + t.Log("TestNSC:", got) +} + +// VIA returns true if vendor is recognized as VIA +func TestVIA(t *testing.T) { + got := cpu.via() + expected := cpu.vendorid == via + if got != expected { + t.Fatalf("TestVIA: expected %v, got %v", expected, got) + } + t.Log("TestVIA:", got) +} + +// Test VM function +func TestVM(t *testing.T) { + t.Log("Vendor ID:", cpu.vm()) +} + +// NSC returns true if vendor is recognized as National Semiconductor +func TestCPUInfo_TSX(t *testing.T) { + got := cpu.tsx() + expected := cpu.hle() && cpu.rtm() + if got != expected { + t.Fatalf("TestNSC: expected %v, got %v", expected, got) + } + t.Log("TestNSC:", got) +} + +// Test RTCounter function +func TestRtCounter(t *testing.T) { + a := cpu.rtcounter() + b := cpu.rtcounter() + t.Log("CPU Counter:", a, b, b-a) +} + +// Prints the value of Ia32TscAux() +func TestIa32TscAux(t *testing.T) { + ecx := cpu.ia32tscaux() + t.Logf("Ia32TscAux:0x%x\n", ecx) + if ecx != 0 { + chip := (ecx & 0xFFF000) >> 12 + core := ecx & 0xFFF + t.Log("Likely chip, core:", chip, core) + } +} + +func TestThreadsPerCoreNZ(t *testing.T) { + if cpu.threadspercore == 0 { + t.Fatal("threads per core is zero") + } +} + +// Prints the value of LogicalCPU() +func TestLogicalCPU(t *testing.T) { + t.Log("Currently executing on cpu:", cpu.logicalcpu()) +} + +func TestMaxFunction(t *testing.T) { + expect := maxFunctionID() + if cpu.maxFunc != expect { + t.Fatal("Max function does not match, expected", expect, "but got", cpu.maxFunc) + } + expect = maxExtendedFunction() + if cpu.maxExFunc != expect { + t.Fatal("Max Extended function does not match, expected", expect, "but got", cpu.maxFunc) + } +} + +// This example will calculate the chip/core number on Linux +// Linux encodes numa id (<<12) and core id (8bit) into TSC_AUX. +func examplecpuinfo_ia32tscaux() { + ecx := cpu.ia32tscaux() + if ecx == 0 { + fmt.Println("Unknown CPU ID") + return + } + chip := (ecx & 0xFFF000) >> 12 + core := ecx & 0xFFF + fmt.Println("Chip, Core:", chip, core) +} + +/* +func TestPhysical(t *testing.T) { + var test16 = "CPUID 00000000: 0000000d-756e6547-6c65746e-49656e69 \nCPUID 00000001: 000206d7-03200800-1fbee3ff-bfebfbff \nCPUID 00000002: 76035a01-00f0b2ff-00000000-00ca0000 \nCPUID 00000003: 00000000-00000000-00000000-00000000 \nCPUID 00000004: 3c004121-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004122-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004143-01c0003f-000001ff-00000000 \nCPUID 00000004: 3c07c163-04c0003f-00003fff-00000006 \nCPUID 00000005: 00000040-00000040-00000003-00021120 \nCPUID 00000006: 00000075-00000002-00000009-00000000 \nCPUID 00000007: 00000000-00000000-00000000-00000000 \nCPUID 00000008: 00000000-00000000-00000000-00000000 \nCPUID 00000009: 00000001-00000000-00000000-00000000 \nCPUID 0000000a: 07300403-00000000-00000000-00000603 \nCPUID 0000000b: 00000000-00000000-00000003-00000003 \nCPUID 0000000b: 00000005-00000010-00000201-00000003 \nCPUID 0000000c: 00000000-00000000-00000000-00000000 \nCPUID 0000000d: 00000007-00000340-00000340-00000000 \nCPUID 0000000d: 00000001-00000000-00000000-00000000 \nCPUID 0000000d: 00000100-00000240-00000000-00000000 \nCPUID 80000000: 80000008-00000000-00000000-00000000 \nCPUID 80000001: 00000000-00000000-00000001-2c100800 \nCPUID 80000002: 20202020-49202020-6c65746e-20295228 \nCPUID 80000003: 6e6f6558-20295228-20555043-322d3545 \nCPUID 80000004: 20303636-20402030-30322e32-007a4847 \nCPUID 80000005: 00000000-00000000-00000000-00000000 \nCPUID 80000006: 00000000-00000000-01006040-00000000 \nCPUID 80000007: 00000000-00000000-00000000-00000100 \nCPUID 80000008: 0000302e-00000000-00000000-00000000" + restore := mockCPU([]byte(test16)) + Detect() + t.Log("Name:", CPU.BrandName) + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("PhysicalCores:", CPU.PhysicalCores) + t.Log("ThreadsPerCore:", CPU.ThreadsPerCore) + t.Log("LogicalCores:", CPU.LogicalCores) + t.Log("Family", CPU.Family, "Model:", CPU.Model) + t.Log("Features:", CPU.Features) + t.Log("Cacheline bytes:", CPU.CacheLine) + t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes") + t.Log("L2 Cache:", CPU.Cache.L2, "bytes") + t.Log("L3 Cache:", CPU.Cache.L3, "bytes") + if CPU.LogicalCores > 0 && CPU.PhysicalCores > 0 { + if CPU.LogicalCores != CPU.PhysicalCores*CPU.ThreadsPerCore { + t.Fatalf("Core count mismatch, LogicalCores (%d) != PhysicalCores (%d) * CPU.ThreadsPerCore (%d)", + CPU.LogicalCores, CPU.PhysicalCores, CPU.ThreadsPerCore) + } + } + + if CPU.ThreadsPerCore > 1 && !CPU.HTT() { + t.Fatalf("Hyperthreading not detected") + } + if CPU.ThreadsPerCore == 1 && CPU.HTT() { + t.Fatalf("Hyperthreading detected, but only 1 Thread per core") + } + restore() + Detect() + TestCPUID(t) +} +*/ diff --git a/vendor/github.com/klauspost/cpuid/testdata/cpuid_data.zip b/vendor/github.com/klauspost/cpuid/testdata/cpuid_data.zip new file mode 100644 index 0000000..885925f Binary files /dev/null and b/vendor/github.com/klauspost/cpuid/testdata/cpuid_data.zip differ diff --git a/vendor/github.com/klauspost/cpuid/testdata/getall.go b/vendor/github.com/klauspost/cpuid/testdata/getall.go new file mode 100644 index 0000000..9b51c7a --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/testdata/getall.go @@ -0,0 +1,77 @@ +package main + +import ( + "archive/zip" + _ "bytes" + "fmt" + "golang.org/x/net/html" + "io" + "net/http" + "os" + "strings" +) + +// Download all CPUID dumps from http://users.atw.hu/instlatx64/ +func main() { + resp, err := http.Get("http://users.atw.hu/instlatx64/?") + if err != nil { + panic(err) + } + + node, err := html.Parse(resp.Body) + if err != nil { + panic(err) + } + + file, err := os.Create("cpuid_data.zip") + if err != nil { + panic(err) + } + defer file.Close() + gw := zip.NewWriter(file) + + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + for _, a := range n.Attr { + if a.Key == "href" { + err := ParseURL(a.Val, gw) + if err != nil { + panic(err) + } + break + } + } + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + + f(node) + err = gw.Close() + if err != nil { + panic(err) + } +} + +func ParseURL(s string, gw *zip.Writer) error { + if strings.Contains(s, "CPUID.txt") { + fmt.Println("Adding", "http://users.atw.hu/instlatx64/"+s) + resp, err := http.Get("http://users.atw.hu/instlatx64/" + s) + if err != nil { + fmt.Println("Error getting ", s, ":", err) + } + defer resp.Body.Close() + w, err := gw.Create(s) + if err != nil { + return err + } + + _, err = io.Copy(w, resp.Body) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/klauspost/crc32/.gitignore b/vendor/github.com/klauspost/crc32/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/klauspost/crc32/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/crc32/.travis.yml b/vendor/github.com/klauspost/crc32/.travis.yml new file mode 100644 index 0000000..de64ae4 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip + +script: + - go test -v . + - go test -v -race . diff --git a/vendor/github.com/klauspost/crc32/LICENSE b/vendor/github.com/klauspost/crc32/LICENSE new file mode 100644 index 0000000..4fd5963 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2015 Klaus Post + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/crc32/README.md b/vendor/github.com/klauspost/crc32/README.md new file mode 100644 index 0000000..029625d --- /dev/null +++ b/vendor/github.com/klauspost/crc32/README.md @@ -0,0 +1,87 @@ +# crc32 +CRC32 hash with x64 optimizations + +This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup. + +[![Build Status](https://travis-ci.org/klauspost/crc32.svg?branch=master)](https://travis-ci.org/klauspost/crc32) + +# usage + +Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer. + +Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go. + +# changes +* Oct 20, 2016: Changes have been merged to upstream Go. Package updated to match. +* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable. + + +# performance + +For *Go 1.7* performance is equivalent to the standard library. So if you use this package for Go 1.7 you can switch back. + + +For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction: +``` +benchmark old ns/op new ns/op delta +BenchmarkCrc32KB 99955 10258 -89.74% + +benchmark old MB/s new MB/s speedup +BenchmarkCrc32KB 327.83 3194.20 9.74x +``` + +For other tables and "CLMUL" capable machines the performance is the same as the standard library. + +Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled. + +``` +Std: Standard Go 1.5 library +Crc: Indicates IEEE type CRC. +40B: Size of each slice encoded. +NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine). +Castagnoli: Castagnoli CRC type. + +BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s +BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8) +BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8) + +BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s +BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8) +BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm) + +BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8) +BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8) +BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm) + +BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8) +BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8) +BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm) + +BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s +BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm) +BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8) +BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s +BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm) +BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8) +BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s +BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm) +BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8) +BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s +BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm) +BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8) +BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm) +``` + +The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library. + +However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7. + +# license + +Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions. diff --git a/vendor/github.com/klauspost/crc32/crc32.go b/vendor/github.com/klauspost/crc32/crc32.go new file mode 100644 index 0000000..8aa91b1 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32.go @@ -0,0 +1,207 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32, +// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for +// information. +// +// Polynomials are represented in LSB-first form also known as reversed representation. +// +// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials +// for information. +package crc32 + +import ( + "hash" + "sync" +) + +// The size of a CRC-32 checksum in bytes. +const Size = 4 + +// Predefined polynomials. +const ( + // IEEE is by far and away the most common CRC-32 polynomial. + // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ... + IEEE = 0xedb88320 + + // Castagnoli's polynomial, used in iSCSI. + // Has better error detection characteristics than IEEE. + // http://dx.doi.org/10.1109/26.231911 + Castagnoli = 0x82f63b78 + + // Koopman's polynomial. + // Also has better error detection characteristics than IEEE. + // http://dx.doi.org/10.1109/DSN.2002.1028931 + Koopman = 0xeb31d82e +) + +// Table is a 256-word table representing the polynomial for efficient processing. +type Table [256]uint32 + +// This file makes use of functions implemented in architecture-specific files. +// The interface that they implement is as follows: +// +// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE +// // algorithm is available. +// archAvailableIEEE() bool +// +// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm. +// // It can only be called if archAvailableIEEE() returns true. +// archInitIEEE() +// +// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if +// // archInitIEEE() was previously called. +// archUpdateIEEE(crc uint32, p []byte) uint32 +// +// // archAvailableCastagnoli reports whether an architecture-specific +// // CRC32-C algorithm is available. +// archAvailableCastagnoli() bool +// +// // archInitCastagnoli initializes the architecture-specific CRC32-C +// // algorithm. It can only be called if archAvailableCastagnoli() returns +// // true. +// archInitCastagnoli() +// +// // archUpdateCastagnoli updates the given CRC32-C. It can only be called +// // if archInitCastagnoli() was previously called. +// archUpdateCastagnoli(crc uint32, p []byte) uint32 + +// castagnoliTable points to a lazily initialized Table for the Castagnoli +// polynomial. MakeTable will always return this value when asked to make a +// Castagnoli table so we can compare against it to find when the caller is +// using this polynomial. +var castagnoliTable *Table +var castagnoliTable8 *slicing8Table +var castagnoliArchImpl bool +var updateCastagnoli func(crc uint32, p []byte) uint32 +var castagnoliOnce sync.Once + +func castagnoliInit() { + castagnoliTable = simpleMakeTable(Castagnoli) + castagnoliArchImpl = archAvailableCastagnoli() + + if castagnoliArchImpl { + archInitCastagnoli() + updateCastagnoli = archUpdateCastagnoli + } else { + // Initialize the slicing-by-8 table. + castagnoliTable8 = slicingMakeTable(Castagnoli) + updateCastagnoli = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, castagnoliTable8, p) + } + } +} + +// IEEETable is the table for the IEEE polynomial. +var IEEETable = simpleMakeTable(IEEE) + +// ieeeTable8 is the slicing8Table for IEEE +var ieeeTable8 *slicing8Table +var ieeeArchImpl bool +var updateIEEE func(crc uint32, p []byte) uint32 +var ieeeOnce sync.Once + +func ieeeInit() { + ieeeArchImpl = archAvailableIEEE() + + if ieeeArchImpl { + archInitIEEE() + updateIEEE = archUpdateIEEE + } else { + // Initialize the slicing-by-8 table. + ieeeTable8 = slicingMakeTable(IEEE) + updateIEEE = func(crc uint32, p []byte) uint32 { + return slicingUpdate(crc, ieeeTable8, p) + } + } +} + +// MakeTable returns a Table constructed from the specified polynomial. +// The contents of this Table must not be modified. +func MakeTable(poly uint32) *Table { + switch poly { + case IEEE: + ieeeOnce.Do(ieeeInit) + return IEEETable + case Castagnoli: + castagnoliOnce.Do(castagnoliInit) + return castagnoliTable + } + return simpleMakeTable(poly) +} + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint32 + tab *Table +} + +// New creates a new hash.Hash32 computing the CRC-32 checksum +// using the polynomial represented by the Table. +// Its Sum method will lay the value out in big-endian byte order. +func New(tab *Table) hash.Hash32 { + if tab == IEEETable { + ieeeOnce.Do(ieeeInit) + } + return &digest{0, tab} +} + +// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum +// using the IEEE polynomial. +// Its Sum method will lay the value out in big-endian byte order. +func NewIEEE() hash.Hash32 { return New(IEEETable) } + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Reset() { d.crc = 0 } + +// Update returns the result of adding the bytes in p to the crc. +func Update(crc uint32, tab *Table, p []byte) uint32 { + switch tab { + case castagnoliTable: + return updateCastagnoli(crc, p) + case IEEETable: + // Unfortunately, because IEEETable is exported, IEEE may be used without a + // call to MakeTable. We have to make sure it gets initialized in that case. + ieeeOnce.Do(ieeeInit) + return updateIEEE(crc, p) + default: + return simpleUpdate(crc, tab, p) + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + switch d.tab { + case castagnoliTable: + d.crc = updateCastagnoli(d.crc, p) + case IEEETable: + // We only create digest objects through New() which takes care of + // initialization in this case. + d.crc = updateIEEE(d.crc, p) + default: + d.crc = simpleUpdate(d.crc, d.tab, p) + } + return len(p), nil +} + +func (d *digest) Sum32() uint32 { return d.crc } + +func (d *digest) Sum(in []byte) []byte { + s := d.Sum32() + return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// Checksum returns the CRC-32 checksum of data +// using the polynomial represented by the Table. +func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) } + +// ChecksumIEEE returns the CRC-32 checksum of data +// using the IEEE polynomial. +func ChecksumIEEE(data []byte) uint32 { + ieeeOnce.Do(ieeeInit) + return updateIEEE(0, data) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.go b/vendor/github.com/klauspost/crc32/crc32_amd64.go new file mode 100644 index 0000000..af2a0b8 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.go @@ -0,0 +1,230 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!gccgo + +// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a +// description of the interface that each architecture-specific file +// implements. + +package crc32 + +import "unsafe" + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// and IEEE CRC. + +// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use +// CPUID to test for SSE 4.1, 4.2 and CLMUL support. +func haveSSE41() bool +func haveSSE42() bool +func haveCLMUL() bool + +// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42(crc uint32, p []byte) uint32 + +// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42Triple( + crcA, crcB, crcC uint32, + a, b, c []byte, + rounds uint32, +) (retA uint32, retB uint32, retC uint32) + +// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ +// instruction as well as SSE 4.1. +//go:noescape +func ieeeCLMUL(crc uint32, p []byte) uint32 + +var sse42 = haveSSE42() +var useFastIEEE = haveCLMUL() && haveSSE41() + +const castagnoliK1 = 168 +const castagnoliK2 = 1344 + +type sse42Table [4]Table + +var castagnoliSSE42TableK1 *sse42Table +var castagnoliSSE42TableK2 *sse42Table + +func archAvailableCastagnoli() bool { + return sse42 +} + +func archInitCastagnoli() { + if !sse42 { + panic("arch-specific Castagnoli not available") + } + castagnoliSSE42TableK1 = new(sse42Table) + castagnoliSSE42TableK2 = new(sse42Table) + // See description in updateCastagnoli. + // t[0][i] = CRC(i000, O) + // t[1][i] = CRC(0i00, O) + // t[2][i] = CRC(00i0, O) + // t[3][i] = CRC(000i, O) + // where O is a sequence of K zeros. + var tmp [castagnoliK2]byte + for b := 0; b < 4; b++ { + for i := 0; i < 256; i++ { + val := uint32(i) << uint32(b*8) + castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1]) + castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:]) + } + } +} + +// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the +// table given) with the given initial crc value. This corresponds to +// CRC(crc, O) in the description in updateCastagnoli. +func castagnoliShift(table *sse42Table, crc uint32) uint32 { + return table[3][crc>>24] ^ + table[2][(crc>>16)&0xFF] ^ + table[1][(crc>>8)&0xFF] ^ + table[0][crc&0xFF] +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !sse42 { + panic("not available") + } + + // This method is inspired from the algorithm in Intel's white paper: + // "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction" + // The same strategy of splitting the buffer in three is used but the + // combining calculation is different; the complete derivation is explained + // below. + // + // -- The basic idea -- + // + // The CRC32 instruction (available in SSE4.2) can process 8 bytes at a + // time. In recent Intel architectures the instruction takes 3 cycles; + // however the processor can pipeline up to three instructions if they + // don't depend on each other. + // + // Roughly this means that we can process three buffers in about the same + // time we can process one buffer. + // + // The idea is then to split the buffer in three, CRC the three pieces + // separately and then combine the results. + // + // Combining the results requires precomputed tables, so we must choose a + // fixed buffer length to optimize. The longer the length, the faster; but + // only buffers longer than this length will use the optimization. We choose + // two cutoffs and compute tables for both: + // - one around 512: 168*3=504 + // - one around 4KB: 1344*3=4032 + // + // -- The nitty gritty -- + // + // Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with + // initial non-inverted CRC I). This function has the following properties: + // (a) CRC(I, AB) = CRC(CRC(I, A), B) + // (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B) + // + // Say we want to compute CRC(I, ABC) where A, B, C are three sequences of + // K bytes each, where K is a fixed constant. Let O be the sequence of K zero + // bytes. + // + // CRC(I, ABC) = CRC(I, ABO xor C) + // = CRC(I, ABO) xor CRC(0, C) + // = CRC(CRC(I, AB), O) xor CRC(0, C) + // = CRC(CRC(I, AO xor B), O) xor CRC(0, C) + // = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C) + // = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C) + // + // The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B), + // and CRC(0, C) efficiently. We just need to find a way to quickly compute + // CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these + // values; since we can't have a 32-bit table, we break it up into four + // 8-bit tables: + // + // CRC(uvwx, O) = CRC(u000, O) xor + // CRC(0v00, O) xor + // CRC(00w0, O) xor + // CRC(000x, O) + // + // We can compute tables corresponding to the four terms for all 8-bit + // values. + + crc = ^crc + + // If a buffer is long enough to use the optimization, process the first few + // bytes to align the buffer to an 8 byte boundary (if necessary). + if len(p) >= castagnoliK1*3 { + delta := int(uintptr(unsafe.Pointer(&p[0])) & 7) + if delta != 0 { + delta = 8 - delta + crc = castagnoliSSE42(crc, p[:delta]) + p = p[delta:] + } + } + + // Process 3*K2 at a time. + for len(p) >= castagnoliK2*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK2:], p[castagnoliK2*2:], + castagnoliK2/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC + p = p[castagnoliK2*3:] + } + + // Process 3*K1 at a time. + for len(p) >= castagnoliK1*3 { + // Compute CRC(I, A), CRC(0, B), and CRC(0, C). + crcA, crcB, crcC := castagnoliSSE42Triple( + crc, 0, 0, + p, p[castagnoliK1:], p[castagnoliK1*2:], + castagnoliK1/24) + + // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B) + crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB + // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C) + crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC + p = p[castagnoliK1*3:] + } + + // Use the simple implementation for what's left. + crc = castagnoliSSE42(crc, p) + return ^crc +} + +func archAvailableIEEE() bool { + return useFastIEEE +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !useFastIEEE { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !useFastIEEE { + panic("not available") + } + + if len(p) >= 64 { + left := len(p) & 15 + do := len(p) - left + crc = ^ieeeCLMUL(^crc, p[:do]) + p = p[do:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.s b/vendor/github.com/klauspost/crc32/crc32_amd64.s new file mode 100644 index 0000000..e8a7941 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64.s @@ -0,0 +1,319 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +#define NOSPLIT 4 +#define RODATA 8 + +// castagnoliSSE42 updates the (non-inverted) crc with the given buffer. +// +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + // If there are fewer than 8 bytes to process, skip alignment. + CMPQ CX, $8 + JL less_than_8 + + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + // Process the first few bytes to 8-byte align the input. + + // BX = 8 - BX. We need to process this many bytes to align. + SUBQ $1, BX + XORQ $7, BX + + BTQ $0, BX + JNC align_2 + + CRC32B (SI), AX + DECQ CX + INCQ SI + +align_2: + BTQ $1, BX + JNC align_4 + + // CRC32W (SI), AX + BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + + SUBQ $2, CX + ADDQ $2, SI + +align_4: + BTQ $2, BX + JNC aligned + + // CRC32L (SI), AX + BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + + SUBQ $4, CX + ADDQ $4, SI + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL less_than_8 + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +less_than_8: + // We may have some bytes left over; process 4 bytes, then 2, then 1. + BTQ $2, CX + JNC less_than_4 + + // CRC32L (SI), AX + BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + ADDQ $4, SI + +less_than_4: + BTQ $1, CX + JNC less_than_2 + + // CRC32W (SI), AX + BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06 + ADDQ $2, SI + +less_than_2: + BTQ $0, CX + JNC done + + CRC32B (SI), AX + +done: + MOVL AX, ret+32(FP) + RET + +// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds) +// bytes from each buffer. +// +// func castagnoliSSE42Triple( +// crc1, crc2, crc3 uint32, +// a, b, c []byte, +// rounds uint32, +// ) (retA uint32, retB uint32, retC uint32) +TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0 + MOVL crcA+0(FP), AX + MOVL crcB+4(FP), CX + MOVL crcC+8(FP), DX + + MOVQ a+16(FP), R8 // data pointer + MOVQ b+40(FP), R9 // data pointer + MOVQ c+64(FP), R10 // data pointer + + MOVL rounds+88(FP), R11 + +loop: + CRC32Q (R8), AX + CRC32Q (R9), CX + CRC32Q (R10), DX + + CRC32Q 8(R8), AX + CRC32Q 8(R9), CX + CRC32Q 8(R10), DX + + CRC32Q 16(R8), AX + CRC32Q 16(R9), CX + CRC32Q 16(R10), DX + + ADDQ $24, R8 + ADDQ $24, R9 + ADDQ $24, R10 + + DECQ R11 + JNZ loop + + MOVL AX, retA+96(FP) + MOVL CX, retB+100(FP) + MOVL DX, retC+104(FP) + RET + +// func haveSSE42() bool +TEXT ·haveSSE42(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $20, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// func haveCLMUL() bool +TEXT ·haveCLMUL(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $1, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// func haveSSE41() bool +TEXT ·haveSSE41(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $19, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// CRC32 polynomial data +// +// These constants are lifted from the +// Linux kernel, since they avoid the costly +// PSHUFB 16 byte reversal proposed in the +// original Intel paper. +DATA r2r1kp<>+0(SB)/8, $0x154442bd4 +DATA r2r1kp<>+8(SB)/8, $0x1c6e41596 +DATA r4r3kp<>+0(SB)/8, $0x1751997d0 +DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e +DATA rupolykp<>+0(SB)/8, $0x1db710641 +DATA rupolykp<>+8(SB)/8, $0x1f7011641 +DATA r5kp<>+0(SB)/8, $0x163cd6124 + +GLOBL r2r1kp<>(SB), RODATA, $16 +GLOBL r4r3kp<>(SB), RODATA, $16 +GLOBL rupolykp<>(SB), RODATA, $16 +GLOBL r5kp<>(SB), RODATA, $8 + +// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +// len(p) must be at least 64, and must be a multiple of 16. + +// func ieeeCLMUL(crc uint32, p []byte) uint32 +TEXT ·ieeeCLMUL(SB), NOSPLIT, $0 + MOVL crc+0(FP), X0 // Initial CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + MOVOU (SI), X1 + MOVOU 16(SI), X2 + MOVOU 32(SI), X3 + MOVOU 48(SI), X4 + PXOR X0, X1 + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left + JB remain64 + + MOVOA r2r1kp<>+0(SB), X0 + +loopback64: + MOVOA X1, X5 + MOVOA X2, X6 + MOVOA X3, X7 + MOVOA X4, X8 + + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0, X0, X2 + PCLMULQDQ $0, X0, X3 + PCLMULQDQ $0, X0, X4 + + // Load next early + MOVOU (SI), X11 + MOVOU 16(SI), X12 + MOVOU 32(SI), X13 + MOVOU 48(SI), X14 + + PCLMULQDQ $0x11, X0, X5 + PCLMULQDQ $0x11, X0, X6 + PCLMULQDQ $0x11, X0, X7 + PCLMULQDQ $0x11, X0, X8 + + PXOR X5, X1 + PXOR X6, X2 + PXOR X7, X3 + PXOR X8, X4 + + PXOR X11, X1 + PXOR X12, X2 + PXOR X13, X3 + PXOR X14, X4 + + ADDQ $0x40, DI + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left? + JGE loopback64 + + // Fold result into a single register (X1) +remain64: + MOVOA r4r3kp<>+0(SB), X0 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X2, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X3, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X4, X1 + + // If there is less than 16 bytes left we are done + CMPQ CX, $16 + JB finish + + // Encode 16 bytes +remain16: + MOVOU (SI), X10 + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X10, X1 + SUBQ $16, CX + ADDQ $16, SI + CMPQ CX, $16 + JGE remain16 + +finish: + // Fold final result into 32 bits and return it + PCMPEQB X3, X3 + PCLMULQDQ $1, X1, X0 + PSRLDQ $8, X1 + PXOR X0, X1 + + MOVOA X1, X2 + MOVQ r5kp<>+0(SB), X0 + + // Creates 32 bit mask. Note that we don't care about upper half. + PSRLQ $32, X3 + + PSRLDQ $4, X2 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + MOVOA rupolykp<>+0(SB), X0 + + MOVOA X1, X2 + PAND X3, X1 + PCLMULQDQ $0x10, X0, X1 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + // PEXTRD $1, X1, AX (SSE 4.1) + BYTE $0x66; BYTE $0x0f; BYTE $0x3a + BYTE $0x16; BYTE $0xc8; BYTE $0x01 + MOVL AX, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.go b/vendor/github.com/klauspost/crc32/crc32_amd64p32.go new file mode 100644 index 0000000..3222b06 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64p32.go @@ -0,0 +1,43 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine,!gccgo + +package crc32 + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// CRC. + +// haveSSE42 is defined in crc32_amd64p32.s and uses CPUID to test for SSE 4.2 +// support. +func haveSSE42() bool + +// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32 +// instruction. +//go:noescape +func castagnoliSSE42(crc uint32, p []byte) uint32 + +var sse42 = haveSSE42() + +func archAvailableCastagnoli() bool { + return sse42 +} + +func archInitCastagnoli() { + if !sse42 { + panic("not available") + } + // No initialization necessary. +} + +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !sse42 { + panic("not available") + } + return castagnoliSSE42(crc, p) +} + +func archAvailableIEEE() bool { return false } +func archInitIEEE() { panic("not available") } +func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.s b/vendor/github.com/klauspost/crc32/crc32_amd64p32.s new file mode 100644 index 0000000..a578d68 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_amd64p32.s @@ -0,0 +1,67 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +#define NOSPLIT 4 +#define RODATA 8 + +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVL p+4(FP), SI // data pointer + MOVL p_len+8(FP), CX // len(p) + + NOTL AX + + // If there's less than 8 bytes to process, we do it byte-by-byte. + CMPQ CX, $8 + JL cleanup + + // Process individual bytes until the input is 8-byte aligned. +startup: + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + CRC32B (SI), AX + DECQ CX + INCQ SI + JMP startup + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL cleanup + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +cleanup: + // We may have some bytes left over that we process one at a time. + CMPQ CX, $0 + JE done + + CRC32B (SI), AX + INCQ SI + DECQ CX + JMP cleanup + +done: + NOTL AX + MOVL AX, ret+16(FP) + RET + +// func haveSSE42() bool +TEXT ·haveSSE42(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $20, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + diff --git a/vendor/github.com/klauspost/crc32/crc32_generic.go b/vendor/github.com/klauspost/crc32/crc32_generic.go new file mode 100644 index 0000000..abacbb6 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_generic.go @@ -0,0 +1,89 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains CRC32 algorithms that are not specific to any architecture +// and don't use hardware acceleration. +// +// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table. +// +// The slicing-by-8 algorithm is a faster implementation that uses a bigger +// table (8*256*4 bytes). + +package crc32 + +// simpleMakeTable allocates and constructs a Table for the specified +// polynomial. The table is suitable for use with the simple algorithm +// (simpleUpdate). +func simpleMakeTable(poly uint32) *Table { + t := new(Table) + simplePopulateTable(poly, t) + return t +} + +// simplePopulateTable constructs a Table for the specified polynomial, suitable +// for use with simpleUpdate. +func simplePopulateTable(poly uint32, t *Table) { + for i := 0; i < 256; i++ { + crc := uint32(i) + for j := 0; j < 8; j++ { + if crc&1 == 1 { + crc = (crc >> 1) ^ poly + } else { + crc >>= 1 + } + } + t[i] = crc + } +} + +// simpleUpdate uses the simple algorithm to update the CRC, given a table that +// was previously computed using simpleMakeTable. +func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 { + crc = ^crc + for _, v := range p { + crc = tab[byte(crc)^v] ^ (crc >> 8) + } + return ^crc +} + +// Use slicing-by-8 when payload >= this value. +const slicing8Cutoff = 16 + +// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm. +type slicing8Table [8]Table + +// slicingMakeTable constructs a slicing8Table for the specified polynomial. The +// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate). +func slicingMakeTable(poly uint32) *slicing8Table { + t := new(slicing8Table) + simplePopulateTable(poly, &t[0]) + for i := 0; i < 256; i++ { + crc := t[0][i] + for j := 1; j < 8; j++ { + crc = t[0][crc&0xFF] ^ (crc >> 8) + t[j][i] = crc + } + } + return t +} + +// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a +// table that was previously computed using slicingMakeTable. +func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 { + if len(p) >= slicing8Cutoff { + crc = ^crc + for len(p) > 8 { + crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 + crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^ + tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^ + tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF] + p = p[8:] + } + crc = ^crc + } + if len(p) == 0 { + return crc + } + return simpleUpdate(crc, &tab[0], p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_otherarch.go b/vendor/github.com/klauspost/crc32/crc32_otherarch.go new file mode 100644 index 0000000..cc96076 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_otherarch.go @@ -0,0 +1,15 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!amd64p32,!s390x + +package crc32 + +func archAvailableIEEE() bool { return false } +func archInitIEEE() { panic("not available") } +func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") } + +func archAvailableCastagnoli() bool { return false } +func archInitCastagnoli() { panic("not available") } +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") } diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.go b/vendor/github.com/klauspost/crc32/crc32_s390x.go new file mode 100644 index 0000000..ce96f03 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.go @@ -0,0 +1,91 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x + +package crc32 + +const ( + vxMinLen = 64 + vxAlignMask = 15 // align to 16 bytes +) + +// hasVectorFacility reports whether the machine has the z/Architecture +// vector facility installed and enabled. +func hasVectorFacility() bool + +var hasVX = hasVectorFacility() + +// vectorizedCastagnoli implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +//go:noescape +func vectorizedCastagnoli(crc uint32, p []byte) uint32 + +// vectorizedIEEE implements CRC32 using vector instructions. +// It is defined in crc32_s390x.s. +//go:noescape +func vectorizedIEEE(crc uint32, p []byte) uint32 + +func archAvailableCastagnoli() bool { + return hasVX +} + +var archCastagnoliTable8 *slicing8Table + +func archInitCastagnoli() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archCastagnoliTable8 = slicingMakeTable(Castagnoli) +} + +// archUpdateCastagnoli calculates the checksum of p using +// vectorizedCastagnoli. +func archUpdateCastagnoli(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedCastagnoli(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archCastagnoliTable8, p) +} + +func archAvailableIEEE() bool { + return hasVX +} + +var archIeeeTable8 *slicing8Table + +func archInitIEEE() { + if !hasVX { + panic("not available") + } + // We still use slicing-by-8 for small buffers. + archIeeeTable8 = slicingMakeTable(IEEE) +} + +// archUpdateIEEE calculates the checksum of p using vectorizedIEEE. +func archUpdateIEEE(crc uint32, p []byte) uint32 { + if !hasVX { + panic("not available") + } + // Use vectorized function if data length is above threshold. + if len(p) >= vxMinLen { + aligned := len(p) & ^vxAlignMask + crc = vectorizedIEEE(crc, p[:aligned]) + p = p[aligned:] + } + if len(p) == 0 { + return crc + } + return slicingUpdate(crc, archIeeeTable8, p) +} diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.s b/vendor/github.com/klauspost/crc32/crc32_s390x.s new file mode 100644 index 0000000..e980ca2 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_s390x.s @@ -0,0 +1,249 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build s390x + +#include "textflag.h" + +// Vector register range containing CRC-32 constants + +#define CONST_PERM_LE2BE V9 +#define CONST_R2R1 V10 +#define CONST_R4R3 V11 +#define CONST_R5 V12 +#define CONST_RU_POLY V13 +#define CONST_CRC_POLY V14 + +// The CRC-32 constant block contains reduction constants to fold and +// process particular chunks of the input data stream in parallel. +// +// Note that the constant definitions below are extended in order to compute +// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction. +// The rightmost doubleword can be 0 to prevent contribution to the result or +// can be multiplied by 1 to perform an XOR without the need for a separate +// VECTOR EXCLUSIVE OR instruction. +// +// The polynomials used are bit-reflected: +// +// IEEE: P'(x) = 0x0edb88320 +// Castagnoli: P'(x) = 0x082f63b78 + +// IEEE polynomial constants +DATA ·crcleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crcleconskp+8(SB)/8, $0x0706050403020100 +DATA ·crcleconskp+16(SB)/8, $0x00000001c6e41596 // R2 +DATA ·crcleconskp+24(SB)/8, $0x0000000154442bd4 // R1 +DATA ·crcleconskp+32(SB)/8, $0x00000000ccaa009e // R4 +DATA ·crcleconskp+40(SB)/8, $0x00000001751997d0 // R3 +DATA ·crcleconskp+48(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+56(SB)/8, $0x0000000163cd6124 // R5 +DATA ·crcleconskp+64(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+72(SB)/8, $0x00000001F7011641 // u' +DATA ·crcleconskp+80(SB)/8, $0x0000000000000000 +DATA ·crcleconskp+88(SB)/8, $0x00000001DB710641 // P'(x) << 1 + +GLOBL ·crcleconskp(SB), RODATA, $144 + +// Castagonli Polynomial constants +DATA ·crccleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask +DATA ·crccleconskp+8(SB)/8, $0x0706050403020100 +DATA ·crccleconskp+16(SB)/8, $0x000000009e4addf8 // R2 +DATA ·crccleconskp+24(SB)/8, $0x00000000740eef02 // R1 +DATA ·crccleconskp+32(SB)/8, $0x000000014cd00bd6 // R4 +DATA ·crccleconskp+40(SB)/8, $0x00000000f20c0dfe // R3 +DATA ·crccleconskp+48(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+56(SB)/8, $0x00000000dd45aab8 // R5 +DATA ·crccleconskp+64(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+72(SB)/8, $0x00000000dea713f1 // u' +DATA ·crccleconskp+80(SB)/8, $0x0000000000000000 +DATA ·crccleconskp+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1 + +GLOBL ·crccleconskp(SB), RODATA, $144 + +// func hasVectorFacility() bool +TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1 + MOVD $x-24(SP), R1 + XC $24, 0(R1), 0(R1) // clear the storage + MOVD $2, R0 // R0 is the number of double words stored -1 + WORD $0xB2B01000 // STFLE 0(R1) + XOR R0, R0 // reset the value of R0 + MOVBZ z-8(SP), R1 + AND $0x40, R1 + BEQ novector + +vectorinstalled: + // check if the vector instruction has been enabled + VLEIB $0, $0xF, V16 + VLGVB $0, V16, R1 + CMPBNE R1, $0xF, novector + MOVB $1, ret+0(FP) // have vx + RET + +novector: + MOVB $0, ret+0(FP) // no vx + RET + +// The CRC-32 function(s) use these calling conventions: +// +// Parameters: +// +// R2: Initial CRC value, typically ~0; and final CRC (return) value. +// R3: Input buffer pointer, performance might be improved if the +// buffer is on a doubleword boundary. +// R4: Length of the buffer, must be 64 bytes or greater. +// +// Register usage: +// +// R5: CRC-32 constant pool base pointer. +// V0: Initial CRC value and intermediate constants and results. +// V1..V4: Data for CRC computation. +// V5..V8: Next data chunks that are fetched from the input buffer. +// +// V9..V14: CRC-32 constants. + +// func vectorizedIEEE(crc uint32, p []byte) uint32 +TEXT ·vectorizedIEEE(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + MOVD $·crcleconskp(SB), R5 + BR vectorizedBody<>(SB) + +// func vectorizedCastagnoli(crc uint32, p []byte) uint32 +TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0 + MOVWZ crc+0(FP), R2 // R2 stores the CRC value + MOVD p+8(FP), R3 // data pointer + MOVD p_len+16(FP), R4 // len(p) + + // R5: crc-32 constant pool base pointer, constant is used to reduce crc + MOVD $·crccleconskp(SB), R5 + BR vectorizedBody<>(SB) + +TEXT vectorizedBody<>(SB), NOSPLIT, $0 + XOR $0xffffffff, R2 // NOTW R2 + VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY + + // Load the initial CRC value into the rightmost word of V0 + VZERO V0 + VLVGF $3, R2, V0 + + // Crash if the input size is less than 64-bytes. + CMP R4, $64 + BLT crash + + // Load a 64-byte data chunk and XOR with CRC + VLM 0(R3), V1, V4 // 64-bytes into V1..V4 + + // Reflect the data if the CRC operation is in the bit-reflected domain + VPERM V1, V1, CONST_PERM_LE2BE, V1 + VPERM V2, V2, CONST_PERM_LE2BE, V2 + VPERM V3, V3, CONST_PERM_LE2BE, V3 + VPERM V4, V4, CONST_PERM_LE2BE, V4 + + VX V0, V1, V1 // V1 ^= CRC + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 + + // Check remaining buffer size and jump to proper folding method + CMP R4, $64 + BLT less_than_64bytes + +fold_64bytes_loop: + // Load the next 64-byte data chunk into V5 to V8 + VLM 0(R3), V5, V8 + VPERM V5, V5, CONST_PERM_LE2BE, V5 + VPERM V6, V6, CONST_PERM_LE2BE, V6 + VPERM V7, V7, CONST_PERM_LE2BE, V7 + VPERM V8, V8, CONST_PERM_LE2BE, V8 + + // Perform a GF(2) multiplication of the doublewords in V1 with + // the reduction constants in V0. The intermediate result is + // then folded (accumulated) with the next data chunk in V5 and + // stored in V1. Repeat this step for the register contents + // in V2, V3, and V4 respectively. + + VGFMAG CONST_R2R1, V1, V5, V1 + VGFMAG CONST_R2R1, V2, V6, V2 + VGFMAG CONST_R2R1, V3, V7, V3 + VGFMAG CONST_R2R1, V4, V8, V4 + + // Adjust buffer pointer and length for next loop + ADD $64, R3 // BUF = BUF + 64 + ADD $(-64), R4 // LEN = LEN - 64 + + CMP R4, $64 + BGE fold_64bytes_loop + +less_than_64bytes: + // Fold V1 to V4 into a single 128-bit value in V1 + VGFMAG CONST_R4R3, V1, V2, V1 + VGFMAG CONST_R4R3, V1, V3, V1 + VGFMAG CONST_R4R3, V1, V4, V1 + + // Check whether to continue with 64-bit folding + CMP R4, $16 + BLT final_fold + +fold_16bytes_loop: + VL 0(R3), V2 // Load next data chunk + VPERM V2, V2, CONST_PERM_LE2BE, V2 + + VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk + + // Adjust buffer pointer and size for folding next data chunk + ADD $16, R3 + ADD $-16, R4 + + // Process remaining data chunks + CMP R4, $16 + BGE fold_16bytes_loop + +final_fold: + VLEIB $7, $0x40, V9 + VSRLB V9, CONST_R4R3, V0 + VLEIG $0, $1, V0 + + VGFMG V0, V1, V1 + + VLEIB $7, $0x20, V9 // Shift by words + VSRLB V9, V1, V2 // Store remaining bits in V2 + VUPLLF V1, V1 // Split rightmost doubleword + VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2 + + // The input values to the Barret reduction are the degree-63 polynomial + // in V1 (R(x)), degree-32 generator polynomial, and the reduction + // constant u. The Barret reduction result is the CRC value of R(x) mod + // P(x). + // + // The Barret reduction algorithm is defined as: + // + // 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u + // 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) + // 3. C(x) = R(x) XOR T2(x) mod x^32 + // + // Note: To compensate the division by x^32, use the vector unpack + // instruction to move the leftmost word into the leftmost doubleword + // of the vector register. The rightmost doubleword is multiplied + // with zero to not contribute to the intermedate results. + + // T1(x) = floor( R(x) / x^32 ) GF2MUL u + VUPLLF V1, V2 + VGFMG CONST_RU_POLY, V2, V2 + + // Compute the GF(2) product of the CRC polynomial in VO with T1(x) in + // V2 and XOR the intermediate result, T2(x), with the value in V1. + // The final result is in the rightmost word of V2. + + VUPLLF V2, V2 + VGFMAG CONST_CRC_POLY, V2, V1, V2 + +done: + VLGVF $2, V2, R2 + XOR $0xffffffff, R2 // NOTW R2 + MOVWZ R2, ret + 32(FP) + RET + +crash: + MOVD $0, (R0) // input size is less than 64-bytes diff --git a/vendor/github.com/klauspost/crc32/crc32_test.go b/vendor/github.com/klauspost/crc32/crc32_test.go new file mode 100644 index 0000000..0399436 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_test.go @@ -0,0 +1,284 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package crc32 + +import ( + crand "crypto/rand" + "hash" + mrand "math/rand" + "testing" +) + +type test struct { + ieee, castagnoli uint32 + in string +} + +var golden = []test{ + {0x0, 0x0, ""}, + {0xe8b7be43, 0xc1d04330, "a"}, + {0x9e83486d, 0xe2a22936, "ab"}, + {0x352441c2, 0x364b3fb7, "abc"}, + {0xed82cd11, 0x92c80a31, "abcd"}, + {0x8587d865, 0xc450d697, "abcde"}, + {0x4b8e39ef, 0x53bceff1, "abcdef"}, + {0x312a6aa6, 0xe627f441, "abcdefg"}, + {0xaeef2a50, 0xa9421b7, "abcdefgh"}, + {0x8da988af, 0x2ddc99fc, "abcdefghi"}, + {0x3981703a, 0xe6599437, "abcdefghij"}, + {0x6b9cdfe7, 0xb2cc01fe, "Discard medicine more than two years old."}, + {0xc90ef73f, 0xe28207f, "He who has a shady past knows that nice guys finish last."}, + {0xb902341f, 0xbe93f964, "I wouldn't marry him with a ten foot pole."}, + {0x42080e8, 0x9e3be0c3, "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {0x154c6d11, 0xf505ef04, "The days of the digital watch are numbered. -Tom Stoppard"}, + {0x4c418325, 0x85d3dc82, "Nepal premier won't resign."}, + {0x33955150, 0xc5142380, "For every action there is an equal and opposite government program."}, + {0x26216a4b, 0x75eb77dd, "His money is twice tainted: 'taint yours and 'taint mine."}, + {0x1abbe45e, 0x91ebe9f7, "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {0xc89a94f7, 0xf0b1168e, "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {0xab3abe14, 0x572b74e2, "size: a.out: bad magic"}, + {0xbab102b6, 0x8a58a6d5, "The major problem is with sendmail. -Mark Horton"}, + {0x999149d7, 0x9c426c50, "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {0x6d52a33c, 0x735400a4, "If the enemy is within range, then so are you."}, + {0x90631e8d, 0xbec49c95, "It's well we cannot hear the screams/That we create in others' dreams."}, + {0x78309130, 0xa95a2079, "You remind me of a TV show, but that's all right: I watch it anyway."}, + {0x7d0a377f, 0xde2e65c5, "C is as portable as Stonehedge!!"}, + {0x8c79fd79, 0x297a88ed, "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {0xa20b7167, 0x66ed1d8b, "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {0x8e0bb443, 0xdcded527, "How can you write a big system without C++? -Paul Glick"}, +} + +// testGoldenIEEE verifies that the given function returns +// correct IEEE checksums. +func testGoldenIEEE(t *testing.T, crcFunc func(b []byte) uint32) { + for _, g := range golden { + if crc := crcFunc([]byte(g.in)); crc != g.ieee { + t.Errorf("IEEE(%s) = 0x%x want 0x%x", g.in, crc, g.ieee) + } + } +} + +// testGoldenCastagnoli verifies that the given function returns +// correct IEEE checksums. +func testGoldenCastagnoli(t *testing.T, crcFunc func(b []byte) uint32) { + for _, g := range golden { + if crc := crcFunc([]byte(g.in)); crc != g.castagnoli { + t.Errorf("Castagnoli(%s) = 0x%x want 0x%x", g.in, crc, g.castagnoli) + } + } +} + +// testCrossCheck generates random buffers of various lengths and verifies that +// the two "update" functions return the same result. +func testCrossCheck(t *testing.T, crcFunc1, crcFunc2 func(crc uint32, b []byte) uint32) { + // The AMD64 implementation has some cutoffs at lengths 168*3=504 and + // 1344*3=4032. We should make sure lengths around these values are in the + // list. + lengths := []int{0, 1, 2, 3, 4, 5, 10, 16, 50, 100, 128, + 500, 501, 502, 503, 504, 505, 512, 1000, 1024, 2000, + 4030, 4031, 4032, 4033, 4036, 4040, 4048, 4096, 5000, 10000} + for _, length := range lengths { + p := make([]byte, length) + _, _ = crand.Read(p) + crcInit := uint32(mrand.Int63()) + crc1 := crcFunc1(crcInit, p) + crc2 := crcFunc2(crcInit, p) + if crc1 != crc2 { + t.Errorf("mismatch: 0x%x vs 0x%x (buffer length %d)", crc1, crc2, length) + } + } +} + +// TestSimple tests the simple generic algorithm. +func TestSimple(t *testing.T) { + tab := simpleMakeTable(IEEE) + testGoldenIEEE(t, func(b []byte) uint32 { + return simpleUpdate(0, tab, b) + }) + + tab = simpleMakeTable(Castagnoli) + testGoldenCastagnoli(t, func(b []byte) uint32 { + return simpleUpdate(0, tab, b) + }) +} + +// TestSimple tests the slicing-by-8 algorithm. +func TestSlicing(t *testing.T) { + tab := slicingMakeTable(IEEE) + testGoldenIEEE(t, func(b []byte) uint32 { + return slicingUpdate(0, tab, b) + }) + + tab = slicingMakeTable(Castagnoli) + testGoldenCastagnoli(t, func(b []byte) uint32 { + return slicingUpdate(0, tab, b) + }) + + // Cross-check various polys against the simple algorithm. + for _, poly := range []uint32{IEEE, Castagnoli, Koopman, 0xD5828281} { + t1 := simpleMakeTable(poly) + f1 := func(crc uint32, b []byte) uint32 { + return simpleUpdate(crc, t1, b) + } + t2 := slicingMakeTable(poly) + f2 := func(crc uint32, b []byte) uint32 { + return slicingUpdate(crc, t2, b) + } + testCrossCheck(t, f1, f2) + } +} + +func TestArchIEEE(t *testing.T) { + if !archAvailableIEEE() { + t.Skip("Arch-specific IEEE not available.") + } + archInitIEEE() + slicingTable := slicingMakeTable(IEEE) + testCrossCheck(t, archUpdateIEEE, func(crc uint32, b []byte) uint32 { + return slicingUpdate(crc, slicingTable, b) + }) +} + +func TestArchCastagnoli(t *testing.T) { + if !archAvailableCastagnoli() { + t.Skip("Arch-specific Castagnoli not available.") + } + archInitCastagnoli() + slicingTable := slicingMakeTable(Castagnoli) + testCrossCheck(t, archUpdateCastagnoli, func(crc uint32, b []byte) uint32 { + return slicingUpdate(crc, slicingTable, b) + }) +} + +func TestGolden(t *testing.T) { + testGoldenIEEE(t, ChecksumIEEE) + + // Some implementations have special code to deal with misaligned + // data; test that as well. + for delta := 1; delta <= 7; delta++ { + testGoldenIEEE(t, func(b []byte) uint32 { + ieee := NewIEEE() + d := delta + if d >= len(b) { + d = len(b) + } + ieee.Write(b[:d]) + ieee.Write(b[d:]) + return ieee.Sum32() + }) + } + + castagnoliTab := MakeTable(Castagnoli) + if castagnoliTab == nil { + t.Errorf("nil Castagnoli Table") + } + + testGoldenCastagnoli(t, func(b []byte) uint32 { + castagnoli := New(castagnoliTab) + castagnoli.Write(b) + return castagnoli.Sum32() + }) + + // Some implementations have special code to deal with misaligned + // data; test that as well. + for delta := 1; delta <= 7; delta++ { + testGoldenCastagnoli(t, func(b []byte) uint32 { + castagnoli := New(castagnoliTab) + d := delta + if d >= len(b) { + d = len(b) + } + castagnoli.Write(b[:d]) + castagnoli.Write(b[d:]) + return castagnoli.Sum32() + }) + } +} + +func BenchmarkIEEECrc40B(b *testing.B) { + benchmark(b, NewIEEE(), 40, 0) +} + +func BenchmarkIEEECrc1KB(b *testing.B) { + benchmark(b, NewIEEE(), 1<<10, 0) +} + +func BenchmarkIEEECrc4KB(b *testing.B) { + benchmark(b, NewIEEE(), 4<<10, 0) +} + +func BenchmarkIEEECrc32KB(b *testing.B) { + benchmark(b, NewIEEE(), 32<<10, 0) +} + +func BenchmarkCastagnoliCrc15B(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 15, 0) +} + +func BenchmarkCastagnoliCrc15BMisaligned(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 15, 1) +} + +func BenchmarkCastagnoliCrc40B(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 40, 0) +} + +func BenchmarkCastagnoliCrc40BMisaligned(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 40, 1) +} + +func BenchmarkCastagnoliCrc512(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 512, 0) +} + +func BenchmarkCastagnoliCrc512Misaligned(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 512, 1) +} + +func BenchmarkCastagnoliCrc1KB(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 1<<10, 0) +} + +func BenchmarkCastagnoliCrc1KBMisaligned(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 1<<10, 1) +} + +func BenchmarkCastagnoliCrc4KB(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 4<<10, 0) +} + +func BenchmarkCastagnoliCrc4KBMisaligned(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 4<<10, 1) +} + +func BenchmarkCastagnoliCrc32KB(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 32<<10, 0) +} + +func BenchmarkCastagnoliCrc32KBMisaligned(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 32<<10, 1) +} + +func benchmark(b *testing.B, h hash.Hash32, n, alignment int64) { + b.SetBytes(n) + data := make([]byte, n+alignment) + data = data[alignment:] + for i := range data { + data[i] = byte(i) + } + in := make([]byte, 0, h.Size()) + + // Warm up + h.Reset() + h.Write(data) + h.Sum(in) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + h.Reset() + h.Write(data) + h.Sum(in) + } +} diff --git a/vendor/github.com/klauspost/crc32/example_test.go b/vendor/github.com/klauspost/crc32/example_test.go new file mode 100644 index 0000000..621bf83 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/example_test.go @@ -0,0 +1,28 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package crc32_test + +import ( + "fmt" + "hash/crc32" +) + +func ExampleMakeTable() { + // In this package, the CRC polynomial is represented in reversed notation, + // or LSB-first representation. + // + // LSB-first representation is a hexadecimal number with n bits, in which the + // most significant bit represents the coefficient of x⁰ and the least significant + // bit represents the coefficient of xⁿ⁻¹ (the coefficient for xⁿ is implicit). + // + // For example, CRC32-Q, as defined by the following polynomial, + // x³²+ x³¹+ x²⁴+ x²²+ x¹⁶+ x¹⁴+ x⁸+ x⁷+ x⁵+ x³+ x¹+ x⁰ + // has the reversed notation 0b11010101100000101000001010000001, so the value + // that should be passed to MakeTable is 0xD5828281. + crc32q := crc32.MakeTable(0xD5828281) + fmt.Printf("%08x\n", crc32.Checksum([]byte("Hello world"), crc32q)) + // Output: + // 2964d064 +} diff --git a/vendor/github.com/thehowl/fasthttprouter/.gitignore b/vendor/github.com/thehowl/fasthttprouter/.gitignore new file mode 100644 index 0000000..a2153bf --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/.gitignore @@ -0,0 +1,6 @@ +*.swp + +coverage.out +examples/basic/basic +examples/hosts/hosts +examples/auth/auth diff --git a/vendor/github.com/thehowl/fasthttprouter/.travis.yml b/vendor/github.com/thehowl/fasthttprouter/.travis.yml new file mode 100644 index 0000000..9892e41 --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/.travis.yml @@ -0,0 +1,20 @@ +sudo: false +language: go + +go: + - 1.7 + - 1.8 + - 1.9 + - tip + +before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + - go get github.com/golang/lint/golint + +script: + - go test -v -covermode=count -coverprofile=coverage.out + - go vet ./... + - test -z "$(gofmt -d -s . | tee /dev/stderr)" + - test -z "$(golint ./... | tee /dev/stderr)" + - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci diff --git a/vendor/github.com/thehowl/fasthttprouter/HttpRouterLicense b/vendor/github.com/thehowl/fasthttprouter/HttpRouterLicense new file mode 100644 index 0000000..b829abc --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/HttpRouterLicense @@ -0,0 +1,24 @@ +Copyright (c) 2013 Julien Schmidt. All rights reserved. + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * The names of the contributors may not be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL JULIEN SCHMIDT BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/thehowl/fasthttprouter/LICENSE b/vendor/github.com/thehowl/fasthttprouter/LICENSE new file mode 100644 index 0000000..639f781 --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2017 Morgan Bazalgette +Copyright (c) 2015-2016, 招牌疯子 +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of uq nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/thehowl/fasthttprouter/README.md b/vendor/github.com/thehowl/fasthttprouter/README.md new file mode 100644 index 0000000..a09fe9c --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/README.md @@ -0,0 +1,220 @@ +# FastHttpRouter +[![Build Status](https://travis-ci.org/thehowl/fasthttprouter.svg?branch=master)](https://travis-ci.org/thehowl/fasthttprouter) +[![Coverage Status](https://coveralls.io/repos/thehowl/fasthttprouter/badge.svg?branch=master&service=github)](https://coveralls.io/github/thehowl/fasthttprouter?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/thehowl/fasthttprouter)](https://goreportcard.com/report/github.com/thehowl/fasthttprouter) +[![GoDoc](http://godoc.org/github.com/thehowl/fasthttprouter?status.svg)](http://godoc.org/github.com/thehowl/fasthttprouter) +[![GitHub release](https://img.shields.io/github/release/thehowl/fasthttprouter.svg)](https://github.com/thehowl/fasthttprouter/releases) + +**thehowl/fasthttprouter** is a fork from the original +[fasthttprouter](https://github.com/buaazp/fasthttprouter), which in turn is a +fork from [httprouter](https://github.com/julienschmidt/httprouter). The only +advantage that this fork brings compared to buaazp's code is that instead of +using the original fasthttp repo, we're using +[@erikdubbelboer's fork](https://github.com/erikdubbelboer/fasthttp), since it +is more updated than the main fork and includes essential patches for the code +that for some reason valyala is not merging. + +FastHttpRouter is forked from [httprouter](https://github.com/julienschmidt/httprouter) which is a lightweight high performance HTTP request router +(also called *multiplexer* or just *mux* for short) for [fasthttp](https://github.com/erikdubbelboer/fasthttp). + +This router is optimized for high performance and a small memory footprint. It scales well even with very long paths and a large number of routes. A compressing dynamic trie (radix tree) structure is used for efficient matching. + +#### License Related + +Following the original project, the original license of julienschmidt/httprouter +is kept at [HttpRouterLicense](HttpRouterLicense), and buaazp placed his license +on top of that inside of [LICENSE](LICENSE). I simply added a copyright clause +on top of it. + +tl;dr: BSD 3 clause, three times + +#### Releases + +- [2016.10.24] [v0.1.0](https://github.com/thehowl/fasthttprouter/releases/tag/v0.1.0) The first release version of `fasthttprouter`. + +## Features + +**Best Performance:** FastHttpRouter is **one of the fastest** go web frameworks in the [go-web-framework-benchmark](https://github.com/smallnest/go-web-framework-benchmark). Even faster than httprouter itself. + +- Basic Test: The first test case is to mock 0 ms, 10 ms, 100 ms, 500 ms processing time in handlers. The concurrency clients are 5000. + +![](http://ww3.sinaimg.cn/large/4c422e03jw1f2p6nyqh9ij20mm0aktbj.jpg) + +- Concurrency Test: In 30 ms processing time, the tets result for 100, 1000, 5000 clients is: + +![](http://ww4.sinaimg.cn/large/4c422e03jw1f2p6o1cdbij20lk09sack.jpg) + +See below for technical details of the implementation. + +**Only explicit matches:** With other routers, like [http.ServeMux](http://golang.org/pkg/net/http/#ServeMux), +a requested URL path could match multiple patterns. Therefore they have some +awkward pattern priority rules, like *longest match* or *first registered, +first matched*. By design of this router, a request can only match exactly one +or no route. As a result, there are also no unintended matches, which makes it +great for SEO and improves the user experience. + +**Stop caring about trailing slashes:** Choose the URL style you like, the +router automatically redirects the client if a trailing slash is missing or if +there is one extra. Of course it only does so, if the new path has a handler. +If you don't like it, you can [turn off this behavior](http://godoc.org/github.com/thehowl/fasthttprouter#Router.RedirectTrailingSlash). + +**Path auto-correction:** Besides detecting the missing or additional trailing +slash at no extra cost, the router can also fix wrong cases and remove +superfluous path elements (like `../` or `//`). +Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users? +FastHttpRouter can help him by making a case-insensitive look-up and redirecting him +to the correct URL. + +**Parameters in your routing pattern:** Stop parsing the requested URL path, +just give the path segment a name and the router delivers the dynamic value to +you. Because of the design of the router, path parameters are very cheap. + +**Zero Garbage:** The matching and dispatching process generates zero bytes of +garbage. In fact, the only heap allocations that are made, is by building the +slice of the key-value pairs for path parameters. If the request path contains +no parameters, not a single heap allocation is necessary. + +**No more server crashes:** You can set a [Panic handler](http://godoc.org/github.com/thehowl/fasthttprouter#Router.PanicHandler) to deal with panics +occurring during handling a HTTP request. The router then recovers and lets the +PanicHandler log what happened and deliver a nice error page. + +**Perfect for APIs:** The router design encourages to build sensible, hierarchical +RESTful APIs. Moreover it has builtin native support for [OPTIONS requests](http://zacstewart.com/2012/04/14/http-options-method.html) +and `405 Method Not Allowed` replies. + +Of course you can also set **custom [NotFound](http://godoc.org/github.com/thehowl/fasthttprouter#Router.NotFound) and [MethodNotAllowed](http://godoc.org/github.com/thehowl/fasthttprouter#Router.MethodNotAllowed) handlers** and [**serve static files**](http://godoc.org/github.com/thehowl/fasthttprouter#Router.ServeFiles). + +## Usage + +This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/thehowl/fasthttprouter) for details: + +Let's start with a trivial example: + +```go +package main + +import ( + "fmt" + "log" + + "github.com/thehowl/fasthttprouter" + "github.com/erikdubbelboer/fasthttp" +) + +func Index(ctx *fasthttp.RequestCtx) { + fmt.Fprint(ctx, "Welcome!\n") +} + +func Hello(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "hello, %s!\n", ctx.UserValue("name")) +} + +func main() { + router := fasthttprouter.New() + router.GET("/", Index) + router.GET("/hello/:name", Hello) + + log.Fatal(fasthttp.ListenAndServe(":8080", router.Handler)) +} +``` + +### Named parameters + +As you can see, `:name` is a *named parameter*. The values are accessible via `RequestCtx.UserValues`. You can get the value of a parameter by using the `ctx.UserValue("name")`. + +Named parameters only match a single path segment: + +``` +Pattern: /user/:user + + /user/gordon match + /user/you match + /user/gordon/profile no match + /user/ no match +``` + +**Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other. + +### Catch-All parameters + +The second type are *catch-all* parameters and have the form `*name`. +Like the name suggests, they match everything. +Therefore they must always be at the **end** of the pattern: + +``` +Pattern: /src/*filepath + + /src/ match + /src/somefile.go match + /src/subdir/somefile.go match +``` + +## How does it work? + +The router relies on a tree structure which makes heavy use of *common prefixes*, it is basically a *compact* [*prefix tree*](https://en.wikipedia.org/wiki/Trie) (or just [*Radix tree*](https://en.wikipedia.org/wiki/Radix_tree)). Nodes with a common prefix also share a common parent. Here is a short example what the routing tree for the `GET` request method could look like: + +``` +Priority Path Handle +9 \ *<1> +3 ├s nil +2 |├earch\ *<2> +1 |└upport\ *<3> +2 ├blog\ *<4> +1 | └:post nil +1 | └\ *<5> +2 ├about-us\ *<6> +1 | └team\ *<7> +1 └contact\ *<8> +``` + +Every `*` represents the memory address of a handler function (a pointer). If you follow a path trough the tree from the root to the leaf, you get the complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder ([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a tree structure also allows us to use dynamic parts like the `:post` parameter, since we actually match against the routing patterns instead of just comparing hashes. [As benchmarks show][benchmark], this works very well and efficient. + +Since URL paths have a hierarchical structure and make use only of a limited set of characters (byte values), it is very likely that there are a lot of common prefixes. This allows us to easily reduce the routing into ever smaller problems. Moreover the router manages a separate tree for every request method. For one thing it is more space efficient than holding a method->handle map in every single node, for another thing is also allows us to greatly reduce the routing problem before even starting the look-up in the prefix-tree. + +For even better scalability, the child nodes on each tree level are ordered by priority, where the priority is just the number of handles registered in sub nodes (children, grandchildren, and so on..). This helps in two ways: + +1. Nodes which are part of the most routing paths are evaluated first. This helps to make as much routes as possible to be reachable as fast as possible. +2. It is some sort of cost compensation. The longest reachable path (highest cost) can always be evaluated first. The following scheme visualizes the tree structure. Nodes are evaluated from top to bottom and from left to right. + +``` +├------------ +├--------- +├----- +├---- +├-- +├-- +└- +``` + +## Why doesn't this work with `http.Handler`? + +Becasue fasthttp doesn't provide http.Handler. See this [description](https://github.com/erikdubbelboer/fasthttp#switching-from-nethttp-to-fasthttp). + +Fasthttp works with [RequestHandler](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHandler) functions instead of objects implementing Handler interface. So a FastHttpRouter provides a [Handler](https://godoc.org/github.com/thehowl/fasthttprouter#Router.Handler) interface to implement the fasthttp.ListenAndServe interface. + +Just try it out for yourself, the usage of FastHttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up. + +## Where can I find Middleware *X*? + +This package just provides a very efficient request router with a few extra features. The router is just a [`fasthttp.RequestHandler`](https://godoc.org/github.com/erikdubbelboer/fasthttp#RequestHandler), you can chain any `fasthttp.RequestHandler` compatible middleware before the router. Or you could [just write your own](https://justinas.org/writing-http-middleware-in-go/), it's very easy! + +Have a look at these midware examples: + +- [Auth Midware](examples/auth) +- [Multi Hosts Midware](examples/hosts) + +## Chaining with the NotFound handler + +**NOTE: It might be required to set [Router.HandleMethodNotAllowed](http://godoc.org/github.com/thehowl/fasthttprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.** + +You can use another [http.Handler](http://golang.org/pkg/net/http/#Handler), for example another router, to handle requests which could not be matched by this router by using the [Router.NotFound](http://godoc.org/github.com/thehowl/fasthttprouter#Router.NotFound) handler. This allows chaining. + +### Static files +The `NotFound` handler can for example be used to serve static files from the root path `/` (like an index.html file along with other assets): + +```go +// Serve static files from the ./public directory +router.NotFound = fasthttp.FSHandler("./public", 0) +``` + +But this approach sidesteps the strict core rules of this router to avoid routing problems. A cleaner approach is to use a distinct sub-path for serving files, like `/static/*filepath` or `/files/*filepath`. \ No newline at end of file diff --git a/vendor/github.com/thehowl/fasthttprouter/examples/auth/README.md b/vendor/github.com/thehowl/fasthttprouter/examples/auth/README.md new file mode 100644 index 0000000..40b2990 --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/examples/auth/README.md @@ -0,0 +1,89 @@ +# Example of FastHttpRouter + +These examples show you the usage of `fasthttprouter`. You can easily build a web application with it. Or you can make your own midwares such as custom logger, metrics, or any one you want. + +### Basic Authentication + +Basic Authentication (RFC 2617) for handles: + +```go +package main + +import ( + "encoding/base64" + "fmt" + "log" + "strings" + + "github.com/thehowl/fasthttprouter" + "github.com/erikdubbelboer/fasthttp" +) + +// basicAuth returns the username and password provided in the request's +// Authorization header, if the request uses HTTP Basic Authentication. +// See RFC 2617, Section 2. +func basicAuth(ctx *fasthttp.RequestCtx) (username, password string, ok bool) { + auth := ctx.Request.Header.Peek("Authorization") + if auth == nil { + return + } + return parseBasicAuth(string(auth)) +} + +// parseBasicAuth parses an HTTP Basic Authentication string. +// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). +func parseBasicAuth(auth string) (username, password string, ok bool) { + const prefix = "Basic " + if !strings.HasPrefix(auth, prefix) { + return + } + c, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} + +// BasicAuth is the basic auth handler +func BasicAuth(h fasthttp.RequestHandler, requiredUser, requiredPassword string) fasthttp.RequestHandler { + return fasthttp.RequestHandler(func(ctx *fasthttp.RequestCtx) { + // Get the Basic Authentication credentials + user, password, hasAuth := basicAuth(ctx) + + if hasAuth && user == requiredUser && password == requiredPassword { + // Delegate request to the given handle + h(ctx) + return + } + // Request Basic Authentication otherwise + ctx.Error(fasthttp.StatusMessage(fasthttp.StatusUnauthorized), fasthttp.StatusUnauthorized) + ctx.Response.Header.Set("WWW-Authenticate", "Basic realm=Restricted") + }) +} + +// Index is the index handler +func Index(ctx *fasthttp.RequestCtx) { + fmt.Fprint(ctx, "Not protected!\n") +} + +// Protected is the Protected handler +func Protected(ctx *fasthttp.RequestCtx) { + fmt.Fprint(ctx, "Protected!\n") +} + +func main() { + user := "gordon" + pass := "secret!" + + router := fasthttprouter.New() + router.GET("/", Index) + router.GET("/protected/", BasicAuth(Protected, user, pass)) + + log.Fatal(fasthttp.ListenAndServe(":8080", router.Handler)) +} +``` diff --git a/vendor/github.com/thehowl/fasthttprouter/examples/auth/auth.go b/vendor/github.com/thehowl/fasthttprouter/examples/auth/auth.go new file mode 100644 index 0000000..332cb83 --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/examples/auth/auth.go @@ -0,0 +1,79 @@ +package main + +import ( + "encoding/base64" + "fmt" + "log" + "strings" + + "github.com/erikdubbelboer/fasthttp" + "github.com/thehowl/fasthttprouter" +) + +// basicAuth returns the username and password provided in the request's +// Authorization header, if the request uses HTTP Basic Authentication. +// See RFC 2617, Section 2. +func basicAuth(ctx *fasthttp.RequestCtx) (username, password string, ok bool) { + auth := ctx.Request.Header.Peek("Authorization") + if auth == nil { + return + } + return parseBasicAuth(string(auth)) +} + +// parseBasicAuth parses an HTTP Basic Authentication string. +// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). +func parseBasicAuth(auth string) (username, password string, ok bool) { + const prefix = "Basic " + if !strings.HasPrefix(auth, prefix) { + return + } + c, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} + +// BasicAuth is the basic auth handler +func BasicAuth(h fasthttp.RequestHandler, requiredUser, requiredPassword string) fasthttp.RequestHandler { + return fasthttp.RequestHandler(func(ctx *fasthttp.RequestCtx) { + // Get the Basic Authentication credentials + user, password, hasAuth := basicAuth(ctx) + + if hasAuth && user == requiredUser && password == requiredPassword { + // Delegate request to the given handle + h(ctx) + return + } + // Request Basic Authentication otherwise + ctx.Error(fasthttp.StatusMessage(fasthttp.StatusUnauthorized), fasthttp.StatusUnauthorized) + ctx.Response.Header.Set("WWW-Authenticate", "Basic realm=Restricted") + }) +} + +// Index is the index handler +func Index(ctx *fasthttp.RequestCtx) { + fmt.Fprint(ctx, "Not protected!\n") +} + +// Protected is the Protected handler +func Protected(ctx *fasthttp.RequestCtx) { + fmt.Fprint(ctx, "Protected!\n") +} + +func main() { + user := "gordon" + pass := "secret!" + + router := fasthttprouter.New() + router.GET("/", Index) + router.GET("/protected/", BasicAuth(Protected, user, pass)) + + log.Fatal(fasthttp.ListenAndServe(":8080", router.Handler)) +} diff --git a/vendor/github.com/thehowl/fasthttprouter/examples/basic/README.md b/vendor/github.com/thehowl/fasthttprouter/examples/basic/README.md new file mode 100644 index 0000000..accb9cb --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/examples/basic/README.md @@ -0,0 +1,54 @@ +# Example of FastHttpRouter + +These examples show you the usage of `fasthttprouter`. You can easily build a web application with it. Or you can make your own midwares such as custom logger, metrics, or any one you want. + +### Basic example + +This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/thehowl/fasthttprouter) for details. + +Let's start with a trivial example: + +```go +package main + +import ( + "fmt" + "log" + + "github.com/thehowl/fasthttprouter" + "github.com/erikdubbelboer/fasthttp" +) + +// Index is the index handler +func Index(ctx *fasthttp.RequestCtx) { + fmt.Fprint(ctx, "Welcome!\n") +} + +// Hello is the Hello handler +func Hello(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "hello, %s!\n", ctx.UserValue("name")) +} + +// MultiParams is the multi params handler +func MultiParams(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "hi, %s, %s!\n", ctx.UserValue("name"), ctx.UserValue("word")) +} + +// QueryArgs is used for uri query args test #11: +// if the req uri is /ping?name=foo, output: Pong! foo +// if the req uri is /piNg?name=foo, redirect to /ping, output: Pong! +func QueryArgs(ctx *fasthttp.RequestCtx) { + name := ctx.QueryArgs().Peek("name") + fmt.Fprintf(ctx, "Pong! %s\n", string(name)) +} + +func main() { + router := fasthttprouter.New() + router.GET("/", Index) + router.GET("/hello/:name", Hello) + router.GET("/multi/:name/:word", MultiParams) + router.GET("/ping", QueryArgs) + + log.Fatal(fasthttp.ListenAndServe(":8080", router.Handler)) +} +``` diff --git a/vendor/github.com/thehowl/fasthttprouter/examples/basic/basic.go b/vendor/github.com/thehowl/fasthttprouter/examples/basic/basic.go new file mode 100644 index 0000000..ce61782 --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/examples/basic/basic.go @@ -0,0 +1,42 @@ +package main + +import ( + "fmt" + "log" + + "github.com/erikdubbelboer/fasthttp" + "github.com/thehowl/fasthttprouter" +) + +// Index is the index handler +func Index(ctx *fasthttp.RequestCtx) { + fmt.Fprint(ctx, "Welcome!\n") +} + +// Hello is the Hello handler +func Hello(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "hello, %s!\n", ctx.UserValue("name")) +} + +// MultiParams is the multi params handler +func MultiParams(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "hi, %s, %s!\n", ctx.UserValue("name"), ctx.UserValue("word")) +} + +// QueryArgs is used for uri query args test #11: +// if the req uri is /ping?name=foo, output: Pong! foo +// if the req uri is /piNg?name=foo, redirect to /ping, output: Pong! +func QueryArgs(ctx *fasthttp.RequestCtx) { + name := ctx.QueryArgs().Peek("name") + fmt.Fprintf(ctx, "Pong! %s\n", string(name)) +} + +func main() { + router := fasthttprouter.New() + router.GET("/", Index) + router.GET("/hello/:name", Hello) + router.GET("/multi/:name/:word", MultiParams) + router.GET("/ping", QueryArgs) + + log.Fatal(fasthttp.ListenAndServe(":8080", router.Handler)) +} diff --git a/vendor/github.com/thehowl/fasthttprouter/examples/hosts/README.md b/vendor/github.com/thehowl/fasthttprouter/examples/hosts/README.md new file mode 100644 index 0000000..f8f8db8 --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/examples/hosts/README.md @@ -0,0 +1,63 @@ +# Example of FastHttpRouter + +These examples show you the usage of `fasthttprouter`. You can easily build a web application with it. Or you can make your own midwares such as custom logger, metrics, or any one you want. + +### Multi-domain / Sub-domains + +Here is a quick example: Does your server serve multiple domains / hosts? +You want to use sub-domains? +Define a router per host! + +```go +package main + +import ( + "fmt" + "log" + + "github.com/thehowl/fasthttprouter" + "github.com/erikdubbelboer/fasthttp" +) + +// Index is the index handler +func Index(ctx *fasthttp.RequestCtx) { + fmt.Fprint(ctx, "Welcome!\n") +} + +// Hello is the Hello handler +func Hello(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "hello, %s!\n", ctx.UserValue("name")) +} + +// HostSwitch is the host-handler map +// We need an object that implements the fasthttp.RequestHandler interface. +// We just use a map here, in which we map host names (with port) to fasthttp.RequestHandlers +type HostSwitch map[string]fasthttp.RequestHandler + +// CheckHost Implement a CheckHost method on our new type +func (hs HostSwitch) CheckHost(ctx *fasthttp.RequestCtx) { + // Check if a http.Handler is registered for the given host. + // If yes, use it to handle the request. + if handler := hs[string(ctx.Host())]; handler != nil { + handler(ctx) + } else { + // Handle host names for wich no handler is registered + ctx.Error("Forbidden", 403) // Or Redirect? + } +} + +func main() { + // Initialize a router as usual + router := fasthttprouter.New() + router.GET("/", Index) + router.GET("/hello/:name", Hello) + + // Make a new HostSwitch and insert the router (our http handler) + // for example.com and port 12345 + hs := make(HostSwitch) + hs["example.com:12345"] = router.Handler + + // Use the HostSwitch to listen and serve on port 12345 + log.Fatal(fasthttp.ListenAndServe(":12345", hs.CheckHost)) +} +``` diff --git a/vendor/github.com/thehowl/fasthttprouter/examples/hosts/hosts.go b/vendor/github.com/thehowl/fasthttprouter/examples/hosts/hosts.go new file mode 100644 index 0000000..a3aed52 --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/examples/hosts/hosts.go @@ -0,0 +1,51 @@ +package main + +import ( + "fmt" + "log" + + "github.com/erikdubbelboer/fasthttp" + "github.com/thehowl/fasthttprouter" +) + +// Index is the index handler +func Index(ctx *fasthttp.RequestCtx) { + fmt.Fprint(ctx, "Welcome!\n") +} + +// Hello is the Hello handler +func Hello(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "hello, %s!\n", ctx.UserValue("name")) +} + +// HostSwitch is the host-handler map +// We need an object that implements the fasthttp.RequestHandler interface. +// We just use a map here, in which we map host names (with port) to fasthttp.RequestHandlers +type HostSwitch map[string]fasthttp.RequestHandler + +// CheckHost Implement a CheckHost method on our new type +func (hs HostSwitch) CheckHost(ctx *fasthttp.RequestCtx) { + // Check if a http.Handler is registered for the given host. + // If yes, use it to handle the request. + if handler := hs[string(ctx.Host())]; handler != nil { + handler(ctx) + } else { + // Handle host names for wich no handler is registered + ctx.Error("Forbidden", 403) // Or Redirect? + } +} + +func main() { + // Initialize a router as usual + router := fasthttprouter.New() + router.GET("/", Index) + router.GET("/hello/:name", Hello) + + // Make a new HostSwitch and insert the router (our http handler) + // for example.com and port 12345 + hs := make(HostSwitch) + hs["example.com:12345"] = router.Handler + + // Use the HostSwitch to listen and serve on port 12345 + log.Fatal(fasthttp.ListenAndServe(":12345", hs.CheckHost)) +} diff --git a/vendor/github.com/thehowl/fasthttprouter/path.go b/vendor/github.com/thehowl/fasthttprouter/path.go new file mode 100644 index 0000000..77f6064 --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/path.go @@ -0,0 +1,123 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package fasthttprouter + +// CleanPath is the URL version of path.Clean, it returns a canonical URL path +// for p, eliminating . and .. elements. +// +// The following rules are applied iteratively until no further processing can +// be done: +// 1. Replace multiple slashes with a single slash. +// 2. Eliminate each . path name element (the current directory). +// 3. Eliminate each inner .. path name element (the parent directory) +// along with the non-.. element that precedes it. +// 4. Eliminate .. elements that begin a rooted path: +// that is, replace "/.." by "/" at the beginning of a path. +// +// If the result of this process is an empty string, "/" is returned +func CleanPath(p string) string { + // Turn empty string into "/" + if p == "" { + return "/" + } + + n := len(p) + var buf []byte + + // Invariants: + // reading from path; r is index of next byte to process. + // writing to buf; w is index of next byte to write. + + // path must start with '/' + r := 1 + w := 1 + + if p[0] != '/' { + r = 0 + buf = make([]byte, n+1) + buf[0] = '/' + } + + trailing := n > 2 && p[n-1] == '/' + + // A bit more clunky without a 'lazybuf' like the path package, but the loop + // gets completely inlined (bufApp). So in contrast to the path package this + // loop has no expensive function calls (except 1x make) + + for r < n { + switch { + case p[r] == '/': + // empty path element, trailing slash is added after the end + r++ + + case p[r] == '.' && r+1 == n: + trailing = true + r++ + + case p[r] == '.' && p[r+1] == '/': + // . element + r++ + + case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): + // .. element: remove to last / + r += 2 + + if w > 1 { + // can backtrack + w-- + + if buf == nil { + for w > 1 && p[w] != '/' { + w-- + } + } else { + for w > 1 && buf[w] != '/' { + w-- + } + } + } + + default: + // real path element. + // add slash if needed + if w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + // copy element + for r < n && p[r] != '/' { + bufApp(&buf, p, w, p[r]) + w++ + r++ + } + } + } + + // re-append trailing slash + if trailing && w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + if buf == nil { + return p[:w] + } + return string(buf[:w]) +} + +// internal helper to lazily create a buffer if necessary +func bufApp(buf *[]byte, s string, w int, c byte) { + if *buf == nil { + if s[w] == c { + return + } + + *buf = make([]byte, len(s)) + copy(*buf, s[:w]) + } + (*buf)[w] = c +} diff --git a/vendor/github.com/thehowl/fasthttprouter/path_test.go b/vendor/github.com/thehowl/fasthttprouter/path_test.go new file mode 100644 index 0000000..fc280de --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/path_test.go @@ -0,0 +1,92 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package fasthttprouter + +import ( + "runtime" + "testing" +) + +var cleanTests = []struct { + path, result string +}{ + // Already clean + {"/", "/"}, + {"/abc", "/abc"}, + {"/a/b/c", "/a/b/c"}, + {"/abc/", "/abc/"}, + {"/a/b/c/", "/a/b/c/"}, + + // missing root + {"", "/"}, + {"abc", "/abc"}, + {"abc/def", "/abc/def"}, + {"a/b/c", "/a/b/c"}, + + // Remove doubled slash + {"//", "/"}, + {"/abc//", "/abc/"}, + {"/abc/def//", "/abc/def/"}, + {"/a/b/c//", "/a/b/c/"}, + {"/abc//def//ghi", "/abc/def/ghi"}, + {"//abc", "/abc"}, + {"///abc", "/abc"}, + {"//abc//", "/abc/"}, + + // Remove . elements + {".", "/"}, + {"./", "/"}, + {"/abc/./def", "/abc/def"}, + {"/./abc/def", "/abc/def"}, + {"/abc/.", "/abc/"}, + + // Remove .. elements + {"..", "/"}, + {"../", "/"}, + {"../../", "/"}, + {"../..", "/"}, + {"../../abc", "/abc"}, + {"/abc/def/ghi/../jkl", "/abc/def/jkl"}, + {"/abc/def/../ghi/../jkl", "/abc/jkl"}, + {"/abc/def/..", "/abc"}, + {"/abc/def/../..", "/"}, + {"/abc/def/../../..", "/"}, + {"/abc/def/../../..", "/"}, + {"/abc/def/../../../ghi/jkl/../../../mno", "/mno"}, + + // Combinations + {"abc/./../def", "/def"}, + {"abc//./../def", "/def"}, + {"abc/../../././../def", "/def"}, +} + +func TestPathClean(t *testing.T) { + for _, test := range cleanTests { + if s := CleanPath(test.path); s != test.result { + t.Errorf("CleanPath(%q) = %q, want %q", test.path, s, test.result) + } + if s := CleanPath(test.result); s != test.result { + t.Errorf("CleanPath(%q) = %q, want %q", test.result, s, test.result) + } + } +} + +func TestPathCleanMallocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping malloc count in short mode") + } + if runtime.GOMAXPROCS(0) > 1 { + t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1") + return + } + + for _, test := range cleanTests { + allocs := testing.AllocsPerRun(100, func() { CleanPath(test.result) }) + if allocs > 0 { + t.Errorf("CleanPath(%q): %v allocs, want zero", test.result, allocs) + } + } +} diff --git a/vendor/github.com/thehowl/fasthttprouter/router.go b/vendor/github.com/thehowl/fasthttprouter/router.go new file mode 100644 index 0000000..e3e9074 --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/router.go @@ -0,0 +1,389 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +// Package fasthttprouter is a trie based high performance HTTP request router. +// +// A trivial example is: +// +// package main + +// import ( +// "fmt" +// "log" +// +// "github.com/thehowl/fasthttprouter" +// "github.com/erikdubbelboer/fasthttp" +// ) + +// func Index(ctx *fasthttp.RequestCtx) { +// fmt.Fprint(ctx, "Welcome!\n") +// } + +// func Hello(ctx *fasthttp.RequestCtx) { +// fmt.Fprintf(ctx, "hello, %s!\n", ctx.UserValue("name")) +// } + +// func main() { +// router := fasthttprouter.New() +// router.GET("/", Index) +// router.GET("/hello/:name", Hello) + +// log.Fatal(fasthttp.ListenAndServe(":8080", router.Handler)) +// } +// +// The router matches incoming requests by the request method and the path. +// If a handle is registered for this path and method, the router delegates the +// request to that function. +// For the methods GET, POST, PUT, PATCH and DELETE shortcut functions exist to +// register handles, for all other methods router.Handle can be used. +// +// The registered path, against which the router matches incoming requests, can +// contain two types of parameters: +// Syntax Type +// :name named parameter +// *name catch-all parameter +// +// Named parameters are dynamic path segments. They match anything until the +// next '/' or the path end: +// Path: /blog/:category/:post +// +// Requests: +// /blog/go/request-routers match: category="go", post="request-routers" +// /blog/go/request-routers/ no match, but the router would redirect +// /blog/go/ no match +// /blog/go/request-routers/comments no match +// +// Catch-all parameters match anything until the path end, including the +// directory index (the '/' before the catch-all). Since they match anything +// until the end, catch-all parameters must always be the final path element. +// Path: /files/*filepath +// +// Requests: +// /files/ match: filepath="/" +// /files/LICENSE match: filepath="/LICENSE" +// /files/templates/article.html match: filepath="/templates/article.html" +// /files no match, but the router would redirect +// +// The value of parameters is inside ctx.UserValue +// To retrieve the value of a parameter: +// // use the name of the parameter +// user := ps.UserValue("user") +// + +package fasthttprouter + +import ( + "strings" + "unsafe" + + "github.com/erikdubbelboer/fasthttp" +) + +var ( + defaultContentType = []byte("text/plain; charset=utf-8") + questionMark = []byte("?") +) + +// Router is a http.Handler which can be used to dispatch requests to different +// handler functions via configurable routes +type Router struct { + trees map[string]*node + + // Enables automatic redirection if the current route can't be matched but a + // handler for the path with (without) the trailing slash exists. + // For example if /foo/ is requested but a route only exists for /foo, the + // client is redirected to /foo with http status code 301 for GET requests + // and 307 for all other request methods. + RedirectTrailingSlash bool + + // If enabled, the router tries to fix the current request path, if no + // handle is registered for it. + // First superfluous path elements like ../ or // are removed. + // Afterwards the router does a case-insensitive lookup of the cleaned path. + // If a handle can be found for this route, the router makes a redirection + // to the corrected path with status code 301 for GET requests and 307 for + // all other request methods. + // For example /FOO and /..//Foo could be redirected to /foo. + // RedirectTrailingSlash is independent of this option. + RedirectFixedPath bool + + // If enabled, the router checks if another method is allowed for the + // current route, if the current request can not be routed. + // If this is the case, the request is answered with 'Method Not Allowed' + // and HTTP status code 405. + // If no other Method is allowed, the request is delegated to the NotFound + // handler. + HandleMethodNotAllowed bool + + // If enabled, the router automatically replies to OPTIONS requests. + // Custom OPTIONS handlers take priority over automatic replies. + HandleOPTIONS bool + + // Configurable http.Handler which is called when no matching route is + // found. If it is not set, http.NotFound is used. + NotFound fasthttp.RequestHandler + + // Configurable http.Handler which is called when a request + // cannot be routed and HandleMethodNotAllowed is true. + // If it is not set, http.Error with http.StatusMethodNotAllowed is used. + // The "Allow" header with allowed request methods is set before the handler + // is called. + MethodNotAllowed fasthttp.RequestHandler + + // Function to handle panics recovered from http handlers. + // It should be used to generate a error page and return the http error code + // 500 (Internal Server Error). + // The handler can be used to keep your server from crashing because of + // unrecovered panics. + PanicHandler func(*fasthttp.RequestCtx, interface{}) +} + +// New returns a new initialized Router. +// Path auto-correction, including trailing slashes, is enabled by default. +func New() *Router { + return &Router{ + RedirectTrailingSlash: true, + RedirectFixedPath: true, + HandleMethodNotAllowed: true, + HandleOPTIONS: true, + } +} + +// GET is a shortcut for router.Handle("GET", path, handle) +func (r *Router) GET(path string, handle fasthttp.RequestHandler) { + r.Handle("GET", path, handle) +} + +// HEAD is a shortcut for router.Handle("HEAD", path, handle) +func (r *Router) HEAD(path string, handle fasthttp.RequestHandler) { + r.Handle("HEAD", path, handle) +} + +// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle) +func (r *Router) OPTIONS(path string, handle fasthttp.RequestHandler) { + r.Handle("OPTIONS", path, handle) +} + +// POST is a shortcut for router.Handle("POST", path, handle) +func (r *Router) POST(path string, handle fasthttp.RequestHandler) { + r.Handle("POST", path, handle) +} + +// PUT is a shortcut for router.Handle("PUT", path, handle) +func (r *Router) PUT(path string, handle fasthttp.RequestHandler) { + r.Handle("PUT", path, handle) +} + +// PATCH is a shortcut for router.Handle("PATCH", path, handle) +func (r *Router) PATCH(path string, handle fasthttp.RequestHandler) { + r.Handle("PATCH", path, handle) +} + +// DELETE is a shortcut for router.Handle("DELETE", path, handle) +func (r *Router) DELETE(path string, handle fasthttp.RequestHandler) { + r.Handle("DELETE", path, handle) +} + +// Handle registers a new request handle with the given path and method. +// +// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut +// functions can be used. +// +// This function is intended for bulk loading and to allow the usage of less +// frequently used, non-standardized or custom methods (e.g. for internal +// communication with a proxy). +func (r *Router) Handle(method, path string, handle fasthttp.RequestHandler) { + if path[0] != '/' { + panic("path must begin with '/' in path '" + path + "'") + } + + if r.trees == nil { + r.trees = make(map[string]*node) + } + + root := r.trees[method] + if root == nil { + root = new(node) + r.trees[method] = root + } + + root.addRoute(path, handle) +} + +// ServeFiles serves files from the given file system root. +// The path must end with "/*filepath", files are then served from the local +// path /defined/root/dir/*filepath. +// For example if root is "/etc" and *filepath is "passwd", the local file +// "/etc/passwd" would be served. +// Internally a http.FileServer is used, therefore http.NotFound is used instead +// of the Router's NotFound handler. +// router.ServeFiles("/src/*filepath", "/var/www") +func (r *Router) ServeFiles(path string, rootPath string) { + if len(path) < 10 || path[len(path)-10:] != "/*filepath" { + panic("path must end with /*filepath in path '" + path + "'") + } + prefix := path[:len(path)-10] + + fileHandler := fasthttp.FSHandler(rootPath, strings.Count(prefix, "/")) + + r.GET(path, func(ctx *fasthttp.RequestCtx) { + fileHandler(ctx) + }) +} + +func (r *Router) recv(ctx *fasthttp.RequestCtx) { + if rcv := recover(); rcv != nil { + r.PanicHandler(ctx, rcv) + } +} + +// Lookup allows the manual lookup of a method + path combo. +// This is e.g. useful to build a framework around this router. +// If the path was found, it returns the handle function and the path parameter +// values. Otherwise the third return value indicates whether a redirection to +// the same path with an extra / without the trailing slash should be performed. +func (r *Router) Lookup(method, path string, ctx *fasthttp.RequestCtx) (fasthttp.RequestHandler, bool) { + if root := r.trees[method]; root != nil { + return root.getValue(path, ctx) + } + return nil, false +} + +func (r *Router) allowed(path, reqMethod string) (allow string) { + if path == "*" || path == "/*" { // server-wide + for method := range r.trees { + if method == "OPTIONS" { + continue + } + + // add request method to list of allowed methods + if len(allow) == 0 { + allow = method + } else { + allow += ", " + method + } + } + } else { // specific path + for method := range r.trees { + // Skip the requested method - we already tried this one + if method == reqMethod || method == "OPTIONS" { + continue + } + + handle, _ := r.trees[method].getValue(path, nil) + if handle != nil { + // add request method to list of allowed methods + if len(allow) == 0 { + allow = method + } else { + allow += ", " + method + } + } + } + } + if len(allow) > 0 { + allow += ", OPTIONS" + } + return +} + +// Handler makes the router implement the fasthttp.ListenAndServe interface. +func (r *Router) Handler(ctx *fasthttp.RequestCtx) { + if r.PanicHandler != nil { + defer r.recv(ctx) + } + + path := b2s(ctx.Path()) + method := b2s(ctx.Method()) + if root := r.trees[method]; root != nil { + if f, tsr := root.getValue(path, ctx); f != nil { + f(ctx) + return + } else if method != "CONNECT" && path != "/" { + code := 301 // Permanent redirect, request with GET method + if method != "GET" { + // Temporary redirect, request with same method + // As of Go 1.3, Go does not support status code 308. + code = 307 + } + + if tsr && r.RedirectTrailingSlash { + var uri string + if len(path) > 1 && path[len(path)-1] == '/' { + uri = path[:len(path)-1] + } else { + uri = path + "/" + } + + if len(ctx.URI().QueryString()) > 0 { + uri += "?" + string(ctx.QueryArgs().QueryString()) + } + + ctx.Redirect(uri, code) + return + } + + // Try to fix the request path + if r.RedirectFixedPath { + fixedPath, found := root.findCaseInsensitivePath( + CleanPath(path), + r.RedirectTrailingSlash, + ) + + if found { + queryBuf := ctx.URI().QueryString() + if len(queryBuf) > 0 { + fixedPath = append(fixedPath, questionMark...) + fixedPath = append(fixedPath, queryBuf...) + } + uri := string(fixedPath) + ctx.Redirect(uri, code) + return + } + } + } + } + + if method == "OPTIONS" { + // Handle OPTIONS requests + if r.HandleOPTIONS { + if allow := r.allowed(path, method); len(allow) > 0 { + ctx.Response.Header.Set("Allow", allow) + return + } + } + } else { + // Handle 405 + if r.HandleMethodNotAllowed { + if allow := r.allowed(path, method); len(allow) > 0 { + ctx.Response.Header.Set("Allow", allow) + if r.MethodNotAllowed != nil { + r.MethodNotAllowed(ctx) + } else { + ctx.SetStatusCode(fasthttp.StatusMethodNotAllowed) + ctx.SetContentTypeBytes(defaultContentType) + ctx.SetBodyString(fasthttp.StatusMessage(fasthttp.StatusMethodNotAllowed)) + } + return + } + } + } + + // Handle 404 + if r.NotFound != nil { + r.NotFound(ctx) + } else { + ctx.Error(fasthttp.StatusMessage(fasthttp.StatusNotFound), + fasthttp.StatusNotFound) + } +} + +// b2s converts byte slice to a string without memory allocation. +// See https://groups.google.com/forum/#!msg/Golang-Nuts/ENgbUzYvCuU/90yGx7GUAgAJ . +// +// Note it may break if string and/or slice header will change +// in the future go versions. +func b2s(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/vendor/github.com/thehowl/fasthttprouter/router_test.go b/vendor/github.com/thehowl/fasthttprouter/router_test.go new file mode 100644 index 0000000..497eaac --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/router_test.go @@ -0,0 +1,916 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package fasthttprouter + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "testing" + "time" + + "github.com/erikdubbelboer/fasthttp" +) + +func TestRouter(t *testing.T) { + router := New() + + routed := false + router.Handle("GET", "/user/:name", func(ctx *fasthttp.RequestCtx) { + routed = true + want := map[string]string{"name": "gopher"} + + if ctx.UserValue("name") != want["name"] { + t.Fatalf("wrong wildcard values: want %v, got %v", want["name"], ctx.UserValue("name")) + } + ctx.Success("foo/bar", []byte("success")) + }) + + s := &fasthttp.Server{ + Handler: router.Handler, + } + + rw := &readWriter{} + rw.r.WriteString("GET /user/gopher?baz HTTP/1.1\r\n\r\n") + + ch := make(chan error) + go func() { + ch <- s.ServeConn(rw) + }() + + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + if !routed { + t.Fatal("routing failed") + } +} + +type handlerStruct struct { + handled *bool +} + +func (h handlerStruct) ServeHTTP(w http.ResponseWriter, r *http.Request) { + *h.handled = true +} + +func TestRouterAPI(t *testing.T) { + var get, head, options, post, put, patch, deleted bool + + router := New() + router.GET("/GET", func(ctx *fasthttp.RequestCtx) { + get = true + }) + router.HEAD("/GET", func(ctx *fasthttp.RequestCtx) { + head = true + }) + router.OPTIONS("/GET", func(ctx *fasthttp.RequestCtx) { + options = true + }) + router.POST("/POST", func(ctx *fasthttp.RequestCtx) { + post = true + }) + router.PUT("/PUT", func(ctx *fasthttp.RequestCtx) { + put = true + }) + router.PATCH("/PATCH", func(ctx *fasthttp.RequestCtx) { + patch = true + }) + router.DELETE("/DELETE", func(ctx *fasthttp.RequestCtx) { + deleted = true + }) + + s := &fasthttp.Server{ + Handler: router.Handler, + } + + rw := &readWriter{} + ch := make(chan error) + + rw.r.WriteString("GET /GET HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if !get { + t.Error("routing GET failed") + } + + rw.r.WriteString("HEAD /GET HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if !head { + t.Error("routing HEAD failed") + } + + rw.r.WriteString("OPTIONS /GET HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if !options { + t.Error("routing OPTIONS failed") + } + + rw.r.WriteString("POST /POST HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if !post { + t.Error("routing POST failed") + } + + rw.r.WriteString("PUT /PUT HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if !put { + t.Error("routing PUT failed") + } + + rw.r.WriteString("PATCH /PATCH HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if !patch { + t.Error("routing PATCH failed") + } + + rw.r.WriteString("DELETE /DELETE HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if !deleted { + t.Error("routing DELETE failed") + } +} + +func TestRouterRoot(t *testing.T) { + router := New() + recv := catchPanic(func() { + router.GET("noSlashRoot", nil) + }) + if recv == nil { + t.Fatal("registering path not beginning with '/' did not panic") + } +} + +func TestRouterChaining(t *testing.T) { + router1 := New() + router2 := New() + router1.NotFound = router2.Handler + + fooHit := false + router1.POST("/foo", func(ctx *fasthttp.RequestCtx) { + fooHit = true + ctx.SetStatusCode(fasthttp.StatusOK) + }) + + barHit := false + router2.POST("/bar", func(ctx *fasthttp.RequestCtx) { + barHit = true + ctx.SetStatusCode(fasthttp.StatusOK) + }) + + s := &fasthttp.Server{ + Handler: router1.Handler, + } + + rw := &readWriter{} + ch := make(chan error) + + rw.r.WriteString("POST /foo HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + br := bufio.NewReader(&rw.w) + var resp fasthttp.Response + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if !(resp.Header.StatusCode() == fasthttp.StatusOK && fooHit) { + t.Errorf("Regular routing failed with router chaining.") + t.FailNow() + } + + rw.r.WriteString("POST /bar HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if !(resp.Header.StatusCode() == fasthttp.StatusOK && barHit) { + t.Errorf("Chained routing failed with router chaining.") + t.FailNow() + } + + rw.r.WriteString("POST /qax HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if !(resp.Header.StatusCode() == fasthttp.StatusNotFound) { + t.Errorf("NotFound behavior failed with router chaining.") + t.FailNow() + } +} + +func TestRouterOPTIONS(t *testing.T) { + // TODO: because fasthttp is not support OPTIONS method now, + // these test cases will be used in the future. + handlerFunc := func(_ *fasthttp.RequestCtx) {} + + router := New() + router.POST("/path", handlerFunc) + + // test not allowed + // * (server) + s := &fasthttp.Server{ + Handler: router.Handler, + } + + rw := &readWriter{} + ch := make(chan error) + + rw.r.WriteString("OPTIONS * HTTP/1.1\r\nHost:\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + br := bufio.NewReader(&rw.w) + var resp fasthttp.Response + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if resp.Header.StatusCode() != fasthttp.StatusOK { + t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", + resp.Header.StatusCode(), resp.Header.String()) + } else if allow := string(resp.Header.Peek("Allow")); allow != "POST, OPTIONS" { + t.Error("unexpected Allow header value: " + allow) + } + + // path + rw.r.WriteString("OPTIONS /path HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if resp.Header.StatusCode() != fasthttp.StatusOK { + t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", + resp.Header.StatusCode(), resp.Header.String()) + } else if allow := string(resp.Header.Peek("Allow")); allow != "POST, OPTIONS" { + t.Error("unexpected Allow header value: " + allow) + } + + rw.r.WriteString("OPTIONS /doesnotexist HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if !(resp.Header.StatusCode() == fasthttp.StatusNotFound) { + t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", + resp.Header.StatusCode(), resp.Header.String()) + } + + // add another method + router.GET("/path", handlerFunc) + + // test again + // * (server) + rw.r.WriteString("OPTIONS * HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if resp.Header.StatusCode() != fasthttp.StatusOK { + t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", + resp.Header.StatusCode(), resp.Header.String()) + } else if allow := string(resp.Header.Peek("Allow")); allow != "POST, GET, OPTIONS" && allow != "GET, POST, OPTIONS" { + t.Error("unexpected Allow header value: " + allow) + } + + // path + rw.r.WriteString("OPTIONS /path HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if resp.Header.StatusCode() != fasthttp.StatusOK { + t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", + resp.Header.StatusCode(), resp.Header.String()) + } else if allow := string(resp.Header.Peek("Allow")); allow != "POST, GET, OPTIONS" && allow != "GET, POST, OPTIONS" { + t.Error("unexpected Allow header value: " + allow) + } + + // custom handler + var custom bool + router.OPTIONS("/path", func(_ *fasthttp.RequestCtx) { + custom = true + }) + + // test again + // * (server) + rw.r.WriteString("OPTIONS * HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if resp.Header.StatusCode() != fasthttp.StatusOK { + t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", + resp.Header.StatusCode(), resp.Header.String()) + } else if allow := string(resp.Header.Peek("Allow")); allow != "POST, GET, OPTIONS" && allow != "GET, POST, OPTIONS" { + t.Error("unexpected Allow header value: " + allow) + } + if custom { + t.Error("custom handler called on *") + } + + // path + rw.r.WriteString("OPTIONS /path HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if resp.Header.StatusCode() != fasthttp.StatusOK { + t.Errorf("OPTIONS handling failed: Code=%d, Header=%v", + resp.Header.StatusCode(), resp.Header.String()) + } + if !custom { + t.Error("custom handler not called") + } +} + +func TestRouterNotAllowed(t *testing.T) { + handlerFunc := func(_ *fasthttp.RequestCtx) {} + + router := New() + router.POST("/path", handlerFunc) + + // Test not allowed + s := &fasthttp.Server{ + Handler: router.Handler, + } + + rw := &readWriter{} + ch := make(chan error) + + rw.r.WriteString("GET /path HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + br := bufio.NewReader(&rw.w) + var resp fasthttp.Response + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if !(resp.Header.StatusCode() == fasthttp.StatusMethodNotAllowed) { + t.Errorf("NotAllowed handling failed: Code=%d", resp.Header.StatusCode()) + } else if allow := string(resp.Header.Peek("Allow")); allow != "POST, OPTIONS" { + t.Error("unexpected Allow header value: " + allow) + } + + // add another method + router.DELETE("/path", handlerFunc) + router.OPTIONS("/path", handlerFunc) // must be ignored + + // test again + rw.r.WriteString("GET /path HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if !(resp.Header.StatusCode() == fasthttp.StatusMethodNotAllowed) { + t.Errorf("NotAllowed handling failed: Code=%d", resp.Header.StatusCode()) + } else if allow := string(resp.Header.Peek("Allow")); allow != "POST, DELETE, OPTIONS" && allow != "DELETE, POST, OPTIONS" { + t.Error("unexpected Allow header value: " + allow) + } + + responseText := "custom method" + router.MethodNotAllowed = fasthttp.RequestHandler(func(ctx *fasthttp.RequestCtx) { + ctx.SetStatusCode(fasthttp.StatusTeapot) + ctx.Write([]byte(responseText)) + }) + rw.r.WriteString("GET /path HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if !bytes.Equal(resp.Body(), []byte(responseText)) { + t.Errorf("unexpected response got %q want %q", string(resp.Body()), responseText) + } + if resp.Header.StatusCode() != fasthttp.StatusTeapot { + t.Errorf("unexpected response code %d want %d", resp.Header.StatusCode(), fasthttp.StatusTeapot) + } + if allow := string(resp.Header.Peek("Allow")); allow != "POST, DELETE, OPTIONS" && allow != "DELETE, POST, OPTIONS" { + t.Error("unexpected Allow header value: " + allow) + } +} + +func TestRouterNotFound(t *testing.T) { + handlerFunc := func(_ *fasthttp.RequestCtx) {} + + router := New() + router.GET("/path", handlerFunc) + router.GET("/dir/", handlerFunc) + router.GET("/", handlerFunc) + + testRoutes := []struct { + route string + code int + location string + }{ + {"/path/", 301, "/path"}, // TSR -/ + {"/dir", 301, "/dir/"}, // TSR +/ + //{"", 301, "/"}, // TSR +/ unsupported by fasthttp + {"/PATH", 301, "/path"}, // Fixed Case + {"/DIR", 301, "/dir/"}, // Fixed Case + {"/PATH/", 301, "/path"}, // Fixed Case -/ + {"/DIR/", 301, "/dir/"}, // Fixed Case +/ + {"/paTh/?name=foo", 301, "/path?name=foo"}, // Fixed Case With Params +/ + {"/paTh?name=foo", 301, "/path?name=foo"}, // Fixed Case With Params +/ + {"/../path", 200, ""}, // CleanPath + {"/nope", 404, ""}, // NotFound + } + + s := &fasthttp.Server{ + Handler: router.Handler, + } + + rw := &readWriter{} + br := bufio.NewReader(&rw.w) + var resp fasthttp.Response + ch := make(chan error) + for _, tr := range testRoutes { + rw.r.WriteString(fmt.Sprintf("GET %s HTTP/1.1\r\n\r\n", tr.route)) + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + loc := "http://" + tr.location + if tr.location == "" { + loc = "" + } + if !(resp.Header.StatusCode() == tr.code && + (tr.code == 404 || string(resp.Header.Peek("Location")) == loc)) { + t.Errorf("NotFound handling route %s failed: Code=%d Header=%s", + tr.route, resp.Header.StatusCode(), string(resp.Header.Peek("Location"))) + } + } + + // Test custom not found handler + var notFound bool + router.NotFound = fasthttp.RequestHandler(func(ctx *fasthttp.RequestCtx) { + ctx.SetStatusCode(404) + notFound = true + }) + rw.r.WriteString("GET /nope HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if !(resp.Header.StatusCode() == 404 && notFound == true) { + t.Errorf("Custom NotFound handler failed: Code=%d, Header=%v", resp.Header.StatusCode(), string(resp.Header.Peek("Location"))) + } + + // Test other method than GET (want 307 instead of 301) + router.PATCH("/path", handlerFunc) + rw.r.WriteString("PATCH /path/ HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if !(resp.Header.StatusCode() == 307) { + t.Errorf("Custom NotFound handler failed: Code=%d, Header=%v", resp.Header.StatusCode(), string(resp.Header.Peek("Location"))) + } + + // Test special case where no node for the prefix "/" exists + router = New() + router.GET("/a", handlerFunc) + s.Handler = router.Handler + rw.r.WriteString("GET / HTTP/1.1\r\n\r\n") + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if !(resp.Header.StatusCode() == 404) { + t.Errorf("NotFound handling route / failed: Code=%d", resp.Header.StatusCode()) + } +} + +func TestRouterPanicHandler(t *testing.T) { + router := New() + panicHandled := false + + router.PanicHandler = func(ctx *fasthttp.RequestCtx, p interface{}) { + panicHandled = true + } + + router.Handle("PUT", "/user/:name", func(_ *fasthttp.RequestCtx) { + panic("oops!") + }) + + defer func() { + if rcv := recover(); rcv != nil { + t.Fatal("handling panic failed") + } + }() + + s := &fasthttp.Server{ + Handler: router.Handler, + } + + rw := &readWriter{} + ch := make(chan error) + + rw.r.WriteString(string("PUT /user/gopher HTTP/1.1\r\n\r\n")) + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout") + } + + if !panicHandled { + t.Fatal("simulating failed") + } +} + +func TestRouterLookup(t *testing.T) { + routed := false + wantHandle := func(_ *fasthttp.RequestCtx) { + routed = true + } + + router := New() + ctx := &fasthttp.RequestCtx{} + + // try empty router first + handle, tsr := router.Lookup("GET", "/nope", ctx) + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if tsr { + t.Error("Got wrong TSR recommendation!") + } + + // insert route and try again + router.GET("/user/:name", wantHandle) + + handle, tsr = router.Lookup("GET", "/user/gopher", ctx) + if handle == nil { + t.Fatal("Got no handle!") + } else { + handle(nil) + if !routed { + t.Fatal("Routing failed!") + } + } + if ctx.UserValue("name") != "gopher" { + t.Error("Param not set!") + } + + handle, tsr = router.Lookup("GET", "/user/gopher/", ctx) + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if !tsr { + t.Error("Got no TSR recommendation!") + } + + handle, tsr = router.Lookup("GET", "/nope", ctx) + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if tsr { + t.Error("Got wrong TSR recommendation!") + } +} + +type mockFileSystem struct { + opened bool +} + +func (mfs *mockFileSystem) Open(name string) (http.File, error) { + mfs.opened = true + return nil, errors.New("this is just a mock") +} + +func TestRouterServeFiles(t *testing.T) { + router := New() + + recv := catchPanic(func() { + router.ServeFiles("/noFilepath", os.TempDir()) + }) + if recv == nil { + t.Fatal("registering path not ending with '*filepath' did not panic") + } + body := []byte("fake ico") + ioutil.WriteFile(os.TempDir()+"/favicon.ico", body, 0644) + + router.ServeFiles("/*filepath", os.TempDir()) + + s := &fasthttp.Server{ + Handler: router.Handler, + } + + rw := &readWriter{} + ch := make(chan error) + + rw.r.WriteString(string("GET /favicon.ico HTTP/1.1\r\n\r\n")) + go func() { + ch <- s.ServeConn(rw) + }() + select { + case err := <-ch: + if err != nil { + t.Fatalf("return error %s", err) + } + case <-time.After(500 * time.Millisecond): + t.Fatalf("timeout") + } + + br := bufio.NewReader(&rw.w) + var resp fasthttp.Response + if err := resp.Read(br); err != nil { + t.Fatalf("Unexpected error when reading response: %s", err) + } + if resp.Header.StatusCode() != 200 { + t.Fatalf("Unexpected status code %d. Expected %d", resp.Header.StatusCode(), 423) + } + if !bytes.Equal(resp.Body(), body) { + t.Fatalf("Unexpected body %q. Expected %q", resp.Body(), string(body)) + } +} + +type readWriter struct { + net.Conn + r bytes.Buffer + w bytes.Buffer +} + +var zeroTCPAddr = &net.TCPAddr{ + IP: net.IPv4zero, +} + +func (rw *readWriter) Close() error { + return nil +} + +func (rw *readWriter) Read(b []byte) (int, error) { + return rw.r.Read(b) +} + +func (rw *readWriter) Write(b []byte) (int, error) { + return rw.w.Write(b) +} + +func (rw *readWriter) RemoteAddr() net.Addr { + return zeroTCPAddr +} + +func (rw *readWriter) LocalAddr() net.Addr { + return zeroTCPAddr +} + +func (rw *readWriter) SetReadDeadline(t time.Time) error { + return nil +} + +func (rw *readWriter) SetWriteDeadline(t time.Time) error { + return nil +} diff --git a/vendor/github.com/thehowl/fasthttprouter/tree.go b/vendor/github.com/thehowl/fasthttprouter/tree.go new file mode 100644 index 0000000..179a49d --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/tree.go @@ -0,0 +1,642 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package fasthttprouter + +import ( + "strings" + "unicode" + "unicode/utf8" + + "github.com/erikdubbelboer/fasthttp" +) + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +func countParams(path string) uint8 { + var n uint + for i := 0; i < len(path); i++ { + if path[i] != ':' && path[i] != '*' { + continue + } + n++ + } + if n >= 255 { + return 255 + } + return uint8(n) +} + +type nodeType uint8 + +const ( + static nodeType = iota // default + root + param + catchAll +) + +type node struct { + path string + wildChild bool + nType nodeType + maxParams uint8 + indices string + children []*node + handle fasthttp.RequestHandler + priority uint32 +} + +// increments priority of the given child and reorders if necessary +func (n *node) incrementChildPrio(pos int) int { + n.children[pos].priority++ + prio := n.children[pos].priority + + // adjust position (move to front) + newPos := pos + for newPos > 0 && n.children[newPos-1].priority < prio { + // swap node positions + n.children[newPos-1], n.children[newPos] = n.children[newPos], n.children[newPos-1] + + newPos-- + } + + // build new index char string + if newPos != pos { + n.indices = n.indices[:newPos] + // unchanged prefix, might be empty + n.indices[pos:pos+1] + // the index char we move + n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos' + } + + return newPos +} + +// addRoute adds a node with the given handle to the path. +// Not concurrency-safe! +func (n *node) addRoute(path string, handle fasthttp.RequestHandler) { + fullPath := path + n.priority++ + numParams := countParams(path) + + // non-empty tree + if len(n.path) > 0 || len(n.children) > 0 { + walk: + for { + // Update maxParams of the current node + if numParams > n.maxParams { + n.maxParams = numParams + } + + // Find the longest common prefix. + // This also implies that the common prefix contains no ':' or '*' + // since the existing key can't contain those chars. + i := 0 + max := min(len(path), len(n.path)) + for i < max && path[i] == n.path[i] { + i++ + } + + // Split edge + if i < len(n.path) { + child := node{ + path: n.path[i:], + wildChild: n.wildChild, + nType: static, + indices: n.indices, + children: n.children, + handle: n.handle, + priority: n.priority - 1, + } + + // Update maxParams (max of all children) + for i := range child.children { + if child.children[i].maxParams > child.maxParams { + child.maxParams = child.children[i].maxParams + } + } + + n.children = []*node{&child} + // []byte for proper unicode char conversion, see #65 + n.indices = string([]byte{n.path[i]}) + n.path = path[:i] + n.handle = nil + n.wildChild = false + } + + // Make new node a child of this node + if i < len(path) { + path = path[i:] + + if n.wildChild { + n = n.children[0] + n.priority++ + + // Update maxParams of the child node + if numParams > n.maxParams { + n.maxParams = numParams + } + numParams-- + + // Check if the wildcard matches + if len(path) >= len(n.path) && n.path == path[:len(n.path)] && + // Check for longer wildcard, e.g. :name and :names + (len(n.path) >= len(path) || path[len(n.path)] == '/') { + continue walk + } else { + // Wildcard conflict + pathSeg := strings.SplitN(path, "/", 2)[0] + prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path + panic("'" + pathSeg + + "' in new path '" + fullPath + + "' conflicts with existing wildcard '" + n.path + + "' in existing prefix '" + prefix + + "'") + } + } + + c := path[0] + + // slash after param + if n.nType == param && c == '/' && len(n.children) == 1 { + n = n.children[0] + n.priority++ + continue walk + } + + // Check if a child with the next path byte exists + for i := 0; i < len(n.indices); i++ { + if c == n.indices[i] { + i = n.incrementChildPrio(i) + n = n.children[i] + continue walk + } + } + + // Otherwise insert it + if c != ':' && c != '*' { + // []byte for proper unicode char conversion, see #65 + n.indices += string([]byte{c}) + child := &node{ + maxParams: numParams, + } + n.children = append(n.children, child) + n.incrementChildPrio(len(n.indices) - 1) + n = child + } + n.insertChild(numParams, path, fullPath, handle) + return + + } else if i == len(path) { // Make node a (in-path) leaf + if n.handle != nil { + panic("a handle is already registered for path '" + fullPath + "'") + } + n.handle = handle + } + return + } + } else { // Empty tree + n.insertChild(numParams, path, fullPath, handle) + n.nType = root + } +} + +func (n *node) insertChild(numParams uint8, path, fullPath string, handle fasthttp.RequestHandler) { + var offset int // already handled bytes of the path + + // find prefix until first wildcard (beginning with ':'' or '*'') + for i, max := 0, len(path); numParams > 0; i++ { + c := path[i] + if c != ':' && c != '*' { + continue + } + + // find wildcard end (either '/' or path end) + end := i + 1 + for end < max && path[end] != '/' { + switch path[end] { + // the wildcard name must not contain ':' and '*' + case ':', '*': + panic("only one wildcard per path segment is allowed, has: '" + + path[i:] + "' in path '" + fullPath + "'") + default: + end++ + } + } + + // check if this Node existing children which would be + // unreachable if we insert the wildcard here + if len(n.children) > 0 { + panic("wildcard route '" + path[i:end] + + "' conflicts with existing children in path '" + fullPath + "'") + } + + // check if the wildcard has a name + if end-i < 2 { + panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") + } + + if c == ':' { // param + // split path at the beginning of the wildcard + if i > 0 { + n.path = path[offset:i] + offset = i + } + + child := &node{ + nType: param, + maxParams: numParams, + } + n.children = []*node{child} + n.wildChild = true + n = child + n.priority++ + numParams-- + + // if the path doesn't end with the wildcard, then there + // will be another non-wildcard subpath starting with '/' + if end < max { + n.path = path[offset:end] + offset = end + + child := &node{ + maxParams: numParams, + priority: 1, + } + n.children = []*node{child} + n = child + } + + } else { // catchAll + if end != max || numParams > 1 { + panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") + } + + if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { + panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") + } + + // currently fixed width 1 for '/' + i-- + if path[i] != '/' { + panic("no / before catch-all in path '" + fullPath + "'") + } + + n.path = path[offset:i] + + // first node: catchAll node with empty path + child := &node{ + wildChild: true, + nType: catchAll, + maxParams: 1, + } + n.children = []*node{child} + n.indices = string(path[i]) + n = child + n.priority++ + + // second node: node holding the variable + child = &node{ + path: path[i:], + nType: catchAll, + maxParams: 1, + handle: handle, + priority: 1, + } + n.children = []*node{child} + + return + } + } + + // insert remaining path part and handle to the leaf + n.path = path[offset:] + n.handle = handle +} + +// Returns the handle registered with the given path (key). The values of +// wildcards are saved to a map. +// If no handle can be found, a TSR (trailing slash redirect) recommendation is +// made if a handle exists with an extra (without the) trailing slash for the +// given path. +func (n *node) getValue(path string, ctx *fasthttp.RequestCtx) (handle fasthttp.RequestHandler, tsr bool) { +walk: // outer loop for walking the tree + for { + if len(path) > len(n.path) { + if path[:len(n.path)] == n.path { + path = path[len(n.path):] + // If this node does not have a wildcard (param or catchAll) + // child, we can just look up the next child node and continue + // to walk down the tree + if !n.wildChild { + c := path[0] + for i := 0; i < len(n.indices); i++ { + if c == n.indices[i] { + n = n.children[i] + continue walk + } + } + + // Nothing found. + // We can recommend to redirect to the same URL without a + // trailing slash if a leaf exists for that path. + tsr = (path == "/" && n.handle != nil) + return + + } + + // handle wildcard child + n = n.children[0] + switch n.nType { + case param: + // find param end (either '/' or path end) + end := 0 + for end < len(path) && path[end] != '/' { + end++ + } + + // handle calls to Router.allowed method with nil context + if ctx != nil { + ctx.SetUserValue(n.path[1:], path[:end]) + } + + // we need to go deeper! + if end < len(path) { + if len(n.children) > 0 { + path = path[end:] + n = n.children[0] + continue walk + } + + // ... but we can't + tsr = (len(path) == end+1) + return + } + + if handle = n.handle; handle != nil { + return + } else if len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists for TSR recommendation + n = n.children[0] + tsr = (n.path == "/" && n.handle != nil) + } + + return + + case catchAll: + if ctx != nil { + // save param value + ctx.SetUserValue(n.path[2:], path) + } + handle = n.handle + return + + default: + panic("invalid node type") + } + } + } else if path == n.path { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if handle = n.handle; handle != nil { + return + } + + if path == "/" && n.wildChild && n.nType != root { + tsr = true + return + } + + // No handle found. Check if a handle for this path + a + // trailing slash exists for trailing slash recommendation + for i := 0; i < len(n.indices); i++ { + if n.indices[i] == '/' { + n = n.children[i] + tsr = (len(n.path) == 1 && n.handle != nil) || + (n.nType == catchAll && n.children[0].handle != nil) + return + } + } + + return + } + + // Nothing found. We can recommend to redirect to the same URL with an + // extra trailing slash if a leaf exists for that path + tsr = (path == "/") || + (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && + path == n.path[:len(n.path)-1] && n.handle != nil) + return + } +} + +// Makes a case-insensitive lookup of the given path and tries to find a handler. +// It can optionally also fix trailing slashes. +// It returns the case-corrected path and a bool indicating whether the lookup +// was successful. +func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) ([]byte, bool) { + return n.findCaseInsensitivePathRec( + path, + strings.ToLower(path), + make([]byte, 0, len(path)+1), // preallocate enough memory for new path + [4]byte{}, // empty rune buffer + fixTrailingSlash, + ) +} + +// shift bytes in array by n bytes left +func shiftNRuneBytes(rb [4]byte, n int) [4]byte { + switch n { + case 0: + return rb + case 1: + return [4]byte{rb[1], rb[2], rb[3], 0} + case 2: + return [4]byte{rb[2], rb[3]} + case 3: + return [4]byte{rb[3]} + default: + return [4]byte{} + } +} + +// recursive case-insensitive lookup function used by n.findCaseInsensitivePath +func (n *node) findCaseInsensitivePathRec(path, loPath string, ciPath []byte, rb [4]byte, fixTrailingSlash bool) ([]byte, bool) { + loNPath := strings.ToLower(n.path) + +walk: // outer loop for walking the tree + for len(loPath) >= len(loNPath) && (len(loNPath) == 0 || loPath[1:len(loNPath)] == loNPath[1:]) { + // add common path to result + ciPath = append(ciPath, n.path...) + + if path = path[len(n.path):]; len(path) > 0 { + loOld := loPath + loPath = loPath[len(loNPath):] + + // If this node does not have a wildcard (param or catchAll) child, + // we can just look up the next child node and continue to walk down + // the tree + if !n.wildChild { + // skip rune bytes already processed + rb = shiftNRuneBytes(rb, len(loNPath)) + + if rb[0] != 0 { + // old rune not finished + for i := 0; i < len(n.indices); i++ { + if n.indices[i] == rb[0] { + // continue with child node + n = n.children[i] + loNPath = strings.ToLower(n.path) + continue walk + } + } + } else { + // process a new rune + var rv rune + + // find rune start + // runes are up to 4 byte long, + // -4 would definitely be another rune + var off int + for max := min(len(loNPath), 3); off < max; off++ { + if i := len(loNPath) - off; utf8.RuneStart(loOld[i]) { + // read rune from cached lowercase path + rv, _ = utf8.DecodeRuneInString(loOld[i:]) + break + } + } + + // calculate lowercase bytes of current rune + utf8.EncodeRune(rb[:], rv) + // skipp already processed bytes + rb = shiftNRuneBytes(rb, off) + + for i := 0; i < len(n.indices); i++ { + // lowercase matches + if n.indices[i] == rb[0] { + // must use a recursive approach since both the + // uppercase byte and the lowercase byte might exist + // as an index + if out, found := n.children[i].findCaseInsensitivePathRec( + path, loPath, ciPath, rb, fixTrailingSlash, + ); found { + return out, true + } + break + } + } + + // same for uppercase rune, if it differs + if up := unicode.ToUpper(rv); up != rv { + utf8.EncodeRune(rb[:], up) + rb = shiftNRuneBytes(rb, off) + + for i := 0; i < len(n.indices); i++ { + // uppercase matches + if n.indices[i] == rb[0] { + // continue with child node + n = n.children[i] + loNPath = strings.ToLower(n.path) + continue walk + } + } + } + } + + // Nothing found. We can recommend to redirect to the same URL + // without a trailing slash if a leaf exists for that path + return ciPath, (fixTrailingSlash && path == "/" && n.handle != nil) + } + + n = n.children[0] + switch n.nType { + case param: + // find param end (either '/' or path end) + k := 0 + for k < len(path) && path[k] != '/' { + k++ + } + + // add param value to case insensitive path + ciPath = append(ciPath, path[:k]...) + + // we need to go deeper! + if k < len(path) { + if len(n.children) > 0 { + // continue with child node + n = n.children[0] + loNPath = strings.ToLower(n.path) + loPath = loPath[k:] + path = path[k:] + continue + } + + // ... but we can't + if fixTrailingSlash && len(path) == k+1 { + return ciPath, true + } + return ciPath, false + } + + if n.handle != nil { + return ciPath, true + } else if fixTrailingSlash && len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists + n = n.children[0] + if n.path == "/" && n.handle != nil { + return append(ciPath, '/'), true + } + } + return ciPath, false + + case catchAll: + return append(ciPath, path...), true + + default: + panic("invalid node type") + } + } else { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if n.handle != nil { + return ciPath, true + } + + // No handle found. + // Try to fix the path by adding a trailing slash + if fixTrailingSlash { + for i := 0; i < len(n.indices); i++ { + if n.indices[i] == '/' { + n = n.children[i] + if (len(n.path) == 1 && n.handle != nil) || + (n.nType == catchAll && n.children[0].handle != nil) { + return append(ciPath, '/'), true + } + return ciPath, false + } + } + } + return ciPath, false + } + } + + // Nothing found. + // Try to fix the path by adding / removing a trailing slash + if fixTrailingSlash { + if path == "/" { + return ciPath, true + } + if len(loPath)+1 == len(loNPath) && loNPath[len(loPath)] == '/' && + loPath[1:] == loNPath[1:len(loPath)] && n.handle != nil { + return append(ciPath, n.path...), true + } + } + return ciPath, false +} diff --git a/vendor/github.com/thehowl/fasthttprouter/tree_test.go b/vendor/github.com/thehowl/fasthttprouter/tree_test.go new file mode 100644 index 0000000..8fcdd13 --- /dev/null +++ b/vendor/github.com/thehowl/fasthttprouter/tree_test.go @@ -0,0 +1,671 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package fasthttprouter + +import ( + "fmt" + "strings" + "testing" + + "github.com/erikdubbelboer/fasthttp" +) + +func printChildren(n *node, prefix string) { + fmt.Printf(" %02d:%02d %s%s[%d] %v %t %d \r\n", n.priority, n.maxParams, prefix, n.path, len(n.children), n.handle, n.wildChild, n.nType) + for l := len(n.path); l > 0; l-- { + prefix += " " + } + for _, child := range n.children { + printChildren(child, prefix) + } +} + +// Used as a workaround since we can't compare functions or their addresses +var fakeHandlerValue string + +func fakeHandler(val string) fasthttp.RequestHandler { + return func(*fasthttp.RequestCtx) { + fakeHandlerValue = val + } +} + +type testRequests []struct { + path string + nilHandler bool + route string + ps map[string]string +} + +func acquarieReqeustCtx(path string) *fasthttp.RequestCtx { + var requestCtx fasthttp.RequestCtx + var fastRequest fasthttp.Request + fastRequest.SetRequestURI(path) + requestCtx.Init(&fastRequest, nil, nil) + return &requestCtx +} + +func checkRequests(t *testing.T, tree *node, requests testRequests) { + for _, request := range requests { + requestCtx := acquarieReqeustCtx(request.path) + handler, _ := tree.getValue(request.path, requestCtx) + + if handler == nil { + if !request.nilHandler { + t.Errorf("handle mismatch for route '%s': Expected non-nil handle", request.path) + } + } else if request.nilHandler { + t.Errorf("handle mismatch for route '%s': Expected nil handle", request.path) + } else { + handler(nil) + if fakeHandlerValue != request.route { + t.Errorf("handle mismatch for route '%s': Wrong handle (%s != %s)", request.path, fakeHandlerValue, request.route) + } + } + + for expectedKey, expectedVal := range request.ps { + if requestCtx.UserValue(expectedKey) != expectedVal { + t.Errorf(" mismatch for route '%s'", request.path) + } + } + } +} + +func checkPriorities(t *testing.T, n *node) uint32 { + var prio uint32 + for i := range n.children { + prio += checkPriorities(t, n.children[i]) + } + + if n.handle != nil { + prio++ + } + + if n.priority != prio { + t.Errorf( + "priority mismatch for node '%s': is %d, should be %d", + n.path, n.priority, prio, + ) + } + + return prio +} + +func checkMaxParams(t *testing.T, n *node) uint8 { + var maxParams uint8 + for i := range n.children { + params := checkMaxParams(t, n.children[i]) + if params > maxParams { + maxParams = params + } + } + if n.nType > root && !n.wildChild { + maxParams++ + } + + if n.maxParams != maxParams { + t.Errorf( + "maxParams mismatch for node '%s': is %d, should be %d", + n.path, n.maxParams, maxParams, + ) + } + + return maxParams +} + +func TestCountParams(t *testing.T) { + if countParams("/path/:param1/static/*catch-all") != 2 { + t.Fail() + } + if countParams(strings.Repeat("/:param", 256)) != 255 { + t.Fail() + } +} + +func TestTreeAddAndGet(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/contact", + "/co", + "/c", + "/a", + "/ab", + "/doc/", + "/doc/go_faq.html", + "/doc/go1.html", + "/α", + "/β", + } + for _, route := range routes { + tree.addRoute(route, fakeHandler(route)) + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/a", false, "/a", nil}, + {"/", true, "", nil}, + {"/hi", false, "/hi", nil}, + {"/contact", false, "/contact", nil}, + {"/co", false, "/co", nil}, + {"/con", true, "", nil}, // key mismatch + {"/cona", true, "", nil}, // key mismatch + {"/no", true, "", nil}, // no matching child + {"/ab", false, "/ab", nil}, + {"/α", false, "/α", nil}, + {"/β", false, "/β", nil}, + }) + + checkPriorities(t, tree) + checkMaxParams(t, tree) +} + +func TestTreeWildcard(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/", + "/cmd/:tool/:sub", + "/cmd/:tool/", + "/src/*filepath", + "/search/", + "/search/:query", + "/user_:name", + "/user_:name/about", + "/files/:dir/*filepath", + "/doc/", + "/doc/go_faq.html", + "/doc/go1.html", + "/info/:user/public", + "/info/:user/project/:project", + } + for _, route := range routes { + tree.addRoute(route, fakeHandler(route)) + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/", false, "/", nil}, + {"/cmd/test/", false, "/cmd/:tool/", map[string]string{"tool": "test"}}, + {"/cmd/test", true, "", map[string]string{"tool": "test"}}, + {"/cmd/test/3", false, "/cmd/:tool/:sub", map[string]string{"tool": "test", "sub": "3"}}, + {"/src/", false, "/src/*filepath", map[string]string{"filepath": "/"}}, + {"/src/some/file.png", false, "/src/*filepath", map[string]string{"filepath": "/some/file.png"}}, + {"/search/", false, "/search/", nil}, + {"/search/someth!ng+in+ünìcodé", false, "/search/:query", map[string]string{"query": "someth!ng+in+ünìcodé"}}, + {"/search/someth!ng+in+ünìcodé/", true, "", map[string]string{"query": "someth!ng+in+ünìcodé"}}, + {"/user_gopher", false, "/user_:name", map[string]string{"name": "gopher"}}, + {"/user_gopher/about", false, "/user_:name/about", map[string]string{"name": "gopher"}}, + {"/files/js/inc/framework.js", false, "/files/:dir/*filepath", map[string]string{"dir": "js", "filepath": "/inc/framework.js"}}, + {"/info/gordon/public", false, "/info/:user/public", map[string]string{"user": "gordon"}}, + {"/info/gordon/project/go", false, "/info/:user/project/:project", map[string]string{"user": "gordon", "project": "go"}}, + }) + + checkPriorities(t, tree) + checkMaxParams(t, tree) +} + +func catchPanic(testFunc func()) (recv interface{}) { + defer func() { + recv = recover() + }() + + testFunc() + return +} + +type testRoute struct { + path string + conflict bool +} + +func testRoutes(t *testing.T, routes []testRoute) { + tree := &node{} + + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route.path, nil) + }) + + if route.conflict { + if recv == nil { + t.Errorf("no panic for conflicting route '%s'", route.path) + } + } else if recv != nil { + t.Errorf("unexpected panic for route '%s': %v", route.path, recv) + } + } + + //printChildren(tree, "") +} + +func TestTreeWildcardConflict(t *testing.T) { + routes := []testRoute{ + {"/cmd/:tool/:sub", false}, + {"/cmd/vet", true}, + {"/src/*filepath", false}, + {"/src/*filepathx", true}, + {"/src/", true}, + {"/src1/", false}, + {"/src1/*filepath", true}, + {"/src2*filepath", true}, + {"/search/:query", false}, + {"/search/invalid", true}, + {"/user_:name", false}, + {"/user_x", true}, + {"/user_:name", false}, + {"/id:id", false}, + {"/id/:id", true}, + } + testRoutes(t, routes) +} + +func TestTreeChildConflict(t *testing.T) { + routes := []testRoute{ + {"/cmd/vet", false}, + {"/cmd/:tool/:sub", true}, + {"/src/AUTHORS", false}, + {"/src/*filepath", true}, + {"/user_x", false}, + {"/user_:name", true}, + {"/id/:id", false}, + {"/id:id", true}, + {"/:id", true}, + {"/*filepath", true}, + } + testRoutes(t, routes) +} + +func TestTreeDupliatePath(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/", + "/doc/", + "/src/*filepath", + "/search/:query", + "/user_:name", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + + // Add again + recv = catchPanic(func() { + tree.addRoute(route, nil) + }) + if recv == nil { + t.Fatalf("no panic while inserting duplicate route '%s", route) + } + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/", false, "/", nil}, + {"/doc/", false, "/doc/", nil}, + {"/src/some/file.png", false, "/src/*filepath", map[string]string{"filepath": "/some/file.png"}}, + {"/search/someth!ng+in+ünìcodé", false, "/search/:query", map[string]string{"query": "someth!ng+in+ünìcodé"}}, + {"/user_gopher", false, "/user_:name", map[string]string{"name": "gopher"}}, + }) +} + +func TestEmptyWildcardName(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/user:", + "/user:/", + "/cmd/:/", + "/src/*", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, nil) + }) + if recv == nil { + t.Fatalf("no panic while inserting route with empty wildcard name '%s", route) + } + } +} + +func TestTreeCatchAllConflict(t *testing.T) { + routes := []testRoute{ + {"/src/*filepath/x", true}, + {"/src2/", false}, + {"/src2/*filepath/x", true}, + } + testRoutes(t, routes) +} + +func TestTreeCatchAllConflictRoot(t *testing.T) { + routes := []testRoute{ + {"/", false}, + {"/*filepath", true}, + } + testRoutes(t, routes) +} + +func TestTreeDoubleWildcard(t *testing.T) { + const panicMsg = "only one wildcard per path segment is allowed" + + routes := [...]string{ + "/:foo:bar", + "/:foo:bar/", + "/:foo*bar", + } + + for _, route := range routes { + tree := &node{} + recv := catchPanic(func() { + tree.addRoute(route, nil) + }) + + if rs, ok := recv.(string); !ok || !strings.HasPrefix(rs, panicMsg) { + t.Fatalf(`"Expected panic "%s" for route '%s', got "%v"`, panicMsg, route, recv) + } + } +} + +/*func TestTreeDuplicateWildcard(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/:id/:name/:id", + } + for _, route := range routes { + ... + } +}*/ + +func TestTreeTrailingSlashRedirect(t *testing.T) { + tree := &node{} + ctx := &fasthttp.RequestCtx{} + + routes := [...]string{ + "/hi", + "/b/", + "/search/:query", + "/cmd/:tool/", + "/src/*filepath", + "/x", + "/x/y", + "/y/", + "/y/z", + "/0/:id", + "/0/:id/1", + "/1/:id/", + "/1/:id/2", + "/aa", + "/a/", + "/admin", + "/admin/:category", + "/admin/:category/:page", + "/doc", + "/doc/go_faq.html", + "/doc/go1.html", + "/no/a", + "/no/b", + "/api/hello/:name", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + } + + //printChildren(tree, "") + + tsrRoutes := [...]string{ + "/hi/", + "/b", + "/search/gopher/", + "/cmd/vet", + "/src", + "/x/", + "/y", + "/0/go/", + "/1/go", + "/a", + "/admin/", + "/admin/config/", + "/admin/config/permissions/", + "/doc/", + } + for _, route := range tsrRoutes { + handler, tsr := tree.getValue(route, ctx) + if handler != nil { + t.Fatalf("non-nil handler for TSR route '%s", route) + } else if !tsr { + t.Errorf("expected TSR recommendation for route '%s'", route) + } + } + + noTsrRoutes := [...]string{ + "/", + "/no", + "/no/", + "/_", + "/_/", + "/api/world/abc", + } + for _, route := range noTsrRoutes { + handler, tsr := tree.getValue(route, ctx) + if handler != nil { + t.Fatalf("non-nil handler for No-TSR route '%s", route) + } else if tsr { + t.Errorf("expected no TSR recommendation for route '%s'", route) + } + } +} + +func TestTreeRootTrailingSlashRedirect(t *testing.T) { + tree := &node{} + + recv := catchPanic(func() { + tree.addRoute("/:test", fakeHandler("/:test")) + }) + if recv != nil { + t.Fatalf("panic inserting test route: %v", recv) + } + + handler, tsr := tree.getValue("/", nil) + if handler != nil { + t.Fatalf("non-nil handler") + } else if tsr { + t.Errorf("expected no TSR recommendation") + } +} + +func TestTreeFindCaseInsensitivePath(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/b/", + "/ABC/", + "/search/:query", + "/cmd/:tool/", + "/src/*filepath", + "/x", + "/x/y", + "/y/", + "/y/z", + "/0/:id", + "/0/:id/1", + "/1/:id/", + "/1/:id/2", + "/aa", + "/a/", + "/doc", + "/doc/go_faq.html", + "/doc/go1.html", + "/doc/go/away", + "/no/a", + "/no/b", + "/Π", + "/u/apfêl/", + "/u/äpfêl/", + "/u/öpfêl", + "/v/Äpfêl/", + "/v/Öpfêl", + "/w/♬", // 3 byte + "/w/♭/", // 3 byte, last byte differs + "/w/𠜎", // 4 byte + "/w/𠜏/", // 4 byte + } + + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + } + + // Check out == in for all registered routes + // With fixTrailingSlash = true + for _, route := range routes { + out, found := tree.findCaseInsensitivePath(route, true) + if !found { + t.Errorf("Route '%s' not found!", route) + } else if string(out) != route { + t.Errorf("Wrong result for route '%s': %s", route, string(out)) + } + } + // With fixTrailingSlash = false + for _, route := range routes { + out, found := tree.findCaseInsensitivePath(route, false) + if !found { + t.Errorf("Route '%s' not found!", route) + } else if string(out) != route { + t.Errorf("Wrong result for route '%s': %s", route, string(out)) + } + } + + tests := []struct { + in string + out string + found bool + slash bool + }{ + {"/HI", "/hi", true, false}, + {"/HI/", "/hi", true, true}, + {"/B", "/b/", true, true}, + {"/B/", "/b/", true, false}, + {"/abc", "/ABC/", true, true}, + {"/abc/", "/ABC/", true, false}, + {"/aBc", "/ABC/", true, true}, + {"/aBc/", "/ABC/", true, false}, + {"/abC", "/ABC/", true, true}, + {"/abC/", "/ABC/", true, false}, + {"/SEARCH/QUERY", "/search/QUERY", true, false}, + {"/SEARCH/QUERY/", "/search/QUERY", true, true}, + {"/CMD/TOOL/", "/cmd/TOOL/", true, false}, + {"/CMD/TOOL", "/cmd/TOOL/", true, true}, + {"/SRC/FILE/PATH", "/src/FILE/PATH", true, false}, + {"/x/Y", "/x/y", true, false}, + {"/x/Y/", "/x/y", true, true}, + {"/X/y", "/x/y", true, false}, + {"/X/y/", "/x/y", true, true}, + {"/X/Y", "/x/y", true, false}, + {"/X/Y/", "/x/y", true, true}, + {"/Y/", "/y/", true, false}, + {"/Y", "/y/", true, true}, + {"/Y/z", "/y/z", true, false}, + {"/Y/z/", "/y/z", true, true}, + {"/Y/Z", "/y/z", true, false}, + {"/Y/Z/", "/y/z", true, true}, + {"/y/Z", "/y/z", true, false}, + {"/y/Z/", "/y/z", true, true}, + {"/Aa", "/aa", true, false}, + {"/Aa/", "/aa", true, true}, + {"/AA", "/aa", true, false}, + {"/AA/", "/aa", true, true}, + {"/aA", "/aa", true, false}, + {"/aA/", "/aa", true, true}, + {"/A/", "/a/", true, false}, + {"/A", "/a/", true, true}, + {"/DOC", "/doc", true, false}, + {"/DOC/", "/doc", true, true}, + {"/NO", "", false, true}, + {"/DOC/GO", "", false, true}, + {"/π", "/Π", true, false}, + {"/π/", "/Π", true, true}, + {"/u/ÄPFÊL/", "/u/äpfêl/", true, false}, + {"/u/ÄPFÊL", "/u/äpfêl/", true, true}, + {"/u/ÖPFÊL/", "/u/öpfêl", true, true}, + {"/u/ÖPFÊL", "/u/öpfêl", true, false}, + {"/v/äpfêL/", "/v/Äpfêl/", true, false}, + {"/v/äpfêL", "/v/Äpfêl/", true, true}, + {"/v/öpfêL/", "/v/Öpfêl", true, true}, + {"/v/öpfêL", "/v/Öpfêl", true, false}, + {"/w/♬/", "/w/♬", true, true}, + {"/w/♭", "/w/♭/", true, true}, + {"/w/𠜎/", "/w/𠜎", true, true}, + {"/w/𠜏", "/w/𠜏/", true, true}, + } + // With fixTrailingSlash = true + for _, test := range tests { + out, found := tree.findCaseInsensitivePath(test.in, true) + if found != test.found || (found && (string(out) != test.out)) { + t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", + test.in, string(out), found, test.out, test.found) + return + } + } + // With fixTrailingSlash = false + for _, test := range tests { + out, found := tree.findCaseInsensitivePath(test.in, false) + if test.slash { + if found { // test needs a trailingSlash fix. It must not be found! + t.Errorf("Found without fixTrailingSlash: %s; got %s", test.in, string(out)) + } + } else { + if found != test.found || (found && (string(out) != test.out)) { + t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", + test.in, string(out), found, test.out, test.found) + return + } + } + } +} + +func TestTreeInvalidNodeType(t *testing.T) { + const panicMsg = "invalid node type" + + tree := &node{} + tree.addRoute("/", fakeHandler("/")) + tree.addRoute("/:page", fakeHandler("/:page")) + + // set invalid node type + tree.children[0].nType = 42 + + // normal lookup + recv := catchPanic(func() { + tree.getValue("/test", nil) + }) + if rs, ok := recv.(string); !ok || rs != panicMsg { + t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv) + } + + // case-insensitive lookup + recv = catchPanic(func() { + tree.findCaseInsensitivePath("/test", true) + }) + if rs, ok := recv.(string); !ok || rs != panicMsg { + t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv) + } +} diff --git a/vendor/github.com/valyala/bytebufferpool/.travis.yml b/vendor/github.com/valyala/bytebufferpool/.travis.yml new file mode 100644 index 0000000..6a6ec2e --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - 1.6 + +script: + # build test for supported platforms + - GOOS=linux go build + - GOOS=darwin go build + - GOOS=freebsd go build + - GOOS=windows go build + - GOARCH=386 go build + + # run tests on a standard platform + - go test -v ./... diff --git a/vendor/github.com/valyala/bytebufferpool/LICENSE b/vendor/github.com/valyala/bytebufferpool/LICENSE new file mode 100644 index 0000000..f7c935c --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/valyala/bytebufferpool/README.md b/vendor/github.com/valyala/bytebufferpool/README.md new file mode 100644 index 0000000..061357e --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/README.md @@ -0,0 +1,21 @@ +[![Build Status](https://travis-ci.org/valyala/bytebufferpool.svg)](https://travis-ci.org/valyala/bytebufferpool) +[![GoDoc](https://godoc.org/github.com/valyala/bytebufferpool?status.svg)](http://godoc.org/github.com/valyala/bytebufferpool) +[![Go Report](http://goreportcard.com/badge/valyala/bytebufferpool)](http://goreportcard.com/report/valyala/bytebufferpool) + +# bytebufferpool + +An implementation of a pool of byte buffers with anti-memory-waste protection. + +The pool may waste limited amount of memory due to fragmentation. +This amount equals to the maximum total size of the byte buffers +in concurrent use. + +# Benchmark results +Currently bytebufferpool is fastest and most effective buffer pool written in Go. + +You can find results [here](https://omgnull.github.io/go-benchmark/buffer/). + +# bytebufferpool users + +* [fasthttp](https://github.com/valyala/fasthttp) +* [quicktemplate](https://github.com/valyala/quicktemplate) diff --git a/vendor/github.com/valyala/bytebufferpool/bytebuffer.go b/vendor/github.com/valyala/bytebufferpool/bytebuffer.go new file mode 100644 index 0000000..07a055a --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/bytebuffer.go @@ -0,0 +1,111 @@ +package bytebufferpool + +import "io" + +// ByteBuffer provides byte buffer, which can be used for minimizing +// memory allocations. +// +// ByteBuffer may be used with functions appending data to the given []byte +// slice. See example code for details. +// +// Use Get for obtaining an empty byte buffer. +type ByteBuffer struct { + + // B is a byte buffer to use in append-like workloads. + // See example code for details. + B []byte +} + +// Len returns the size of the byte buffer. +func (b *ByteBuffer) Len() int { + return len(b.B) +} + +// ReadFrom implements io.ReaderFrom. +// +// The function appends all the data read from r to b. +func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) { + p := b.B + nStart := int64(len(p)) + nMax := int64(cap(p)) + n := nStart + if nMax == 0 { + nMax = 64 + p = make([]byte, nMax) + } else { + p = p[:nMax] + } + for { + if n == nMax { + nMax *= 2 + bNew := make([]byte, nMax) + copy(bNew, p) + p = bNew + } + nn, err := r.Read(p[n:]) + n += int64(nn) + if err != nil { + b.B = p[:n] + n -= nStart + if err == io.EOF { + return n, nil + } + return n, err + } + } +} + +// WriteTo implements io.WriterTo. +func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(b.B) + return int64(n), err +} + +// Bytes returns b.B, i.e. all the bytes accumulated in the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +func (b *ByteBuffer) Bytes() []byte { + return b.B +} + +// Write implements io.Writer - it appends p to ByteBuffer.B +func (b *ByteBuffer) Write(p []byte) (int, error) { + b.B = append(b.B, p...) + return len(p), nil +} + +// WriteByte appends the byte c to the buffer. +// +// The purpose of this function is bytes.Buffer compatibility. +// +// The function always returns nil. +func (b *ByteBuffer) WriteByte(c byte) error { + b.B = append(b.B, c) + return nil +} + +// WriteString appends s to ByteBuffer.B. +func (b *ByteBuffer) WriteString(s string) (int, error) { + b.B = append(b.B, s...) + return len(s), nil +} + +// Set sets ByteBuffer.B to p. +func (b *ByteBuffer) Set(p []byte) { + b.B = append(b.B[:0], p...) +} + +// SetString sets ByteBuffer.B to s. +func (b *ByteBuffer) SetString(s string) { + b.B = append(b.B[:0], s...) +} + +// String returns string representation of ByteBuffer.B. +func (b *ByteBuffer) String() string { + return string(b.B) +} + +// Reset makes ByteBuffer.B empty. +func (b *ByteBuffer) Reset() { + b.B = b.B[:0] +} diff --git a/vendor/github.com/valyala/bytebufferpool/bytebuffer_example_test.go b/vendor/github.com/valyala/bytebufferpool/bytebuffer_example_test.go new file mode 100644 index 0000000..1cbaaf5 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/bytebuffer_example_test.go @@ -0,0 +1,21 @@ +package bytebufferpool_test + +import ( + "fmt" + + "github.com/valyala/bytebufferpool" +) + +func ExampleByteBuffer() { + bb := bytebufferpool.Get() + + bb.WriteString("first line\n") + bb.Write([]byte("second line\n")) + bb.B = append(bb.B, "third line\n"...) + + fmt.Printf("bytebuffer contents=%q", bb.B) + + // It is safe to release byte buffer now, since it is + // no longer used. + bytebufferpool.Put(bb) +} diff --git a/vendor/github.com/valyala/bytebufferpool/bytebuffer_test.go b/vendor/github.com/valyala/bytebufferpool/bytebuffer_test.go new file mode 100644 index 0000000..7bb658f --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/bytebuffer_test.go @@ -0,0 +1,138 @@ +package bytebufferpool + +import ( + "bytes" + "fmt" + "io" + "testing" + "time" +) + +func TestByteBufferReadFrom(t *testing.T) { + prefix := "foobar" + expectedS := "asadfsdafsadfasdfisdsdfa" + prefixLen := int64(len(prefix)) + expectedN := int64(len(expectedS)) + + var bb ByteBuffer + bb.WriteString(prefix) + + rf := (io.ReaderFrom)(&bb) + for i := 0; i < 20; i++ { + r := bytes.NewBufferString(expectedS) + n, err := rf.ReadFrom(r) + if n != expectedN { + t.Fatalf("unexpected n=%d. Expecting %d. iteration %d", n, expectedN, i) + } + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + bbLen := int64(bb.Len()) + expectedLen := prefixLen + int64(i+1)*expectedN + if bbLen != expectedLen { + t.Fatalf("unexpected byteBuffer length: %d. Expecting %d", bbLen, expectedLen) + } + for j := 0; j < i; j++ { + start := prefixLen + int64(j)*expectedN + b := bb.B[start : start+expectedN] + if string(b) != expectedS { + t.Fatalf("unexpected byteBuffer contents: %q. Expecting %q", b, expectedS) + } + } + } +} + +func TestByteBufferWriteTo(t *testing.T) { + expectedS := "foobarbaz" + var bb ByteBuffer + bb.WriteString(expectedS[:3]) + bb.WriteString(expectedS[3:]) + + wt := (io.WriterTo)(&bb) + var w bytes.Buffer + for i := 0; i < 10; i++ { + n, err := wt.WriteTo(&w) + if n != int64(len(expectedS)) { + t.Fatalf("unexpected n returned from WriteTo: %d. Expecting %d", n, len(expectedS)) + } + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + s := string(w.Bytes()) + if s != expectedS { + t.Fatalf("unexpected string written %q. Expecting %q", s, expectedS) + } + w.Reset() + } +} + +func TestByteBufferGetPutSerial(t *testing.T) { + testByteBufferGetPut(t) +} + +func TestByteBufferGetPutConcurrent(t *testing.T) { + concurrency := 10 + ch := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + testByteBufferGetPut(t) + ch <- struct{}{} + }() + } + + for i := 0; i < concurrency; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout!") + } + } +} + +func testByteBufferGetPut(t *testing.T) { + for i := 0; i < 10; i++ { + expectedS := fmt.Sprintf("num %d", i) + b := Get() + b.B = append(b.B, "num "...) + b.B = append(b.B, fmt.Sprintf("%d", i)...) + if string(b.B) != expectedS { + t.Fatalf("unexpected result: %q. Expecting %q", b.B, expectedS) + } + Put(b) + } +} + +func testByteBufferGetString(t *testing.T) { + for i := 0; i < 10; i++ { + expectedS := fmt.Sprintf("num %d", i) + b := Get() + b.SetString(expectedS) + if b.String() != expectedS { + t.Fatalf("unexpected result: %q. Expecting %q", b.B, expectedS) + } + Put(b) + } +} + +func TestByteBufferGetStringSerial(t *testing.T) { + testByteBufferGetString(t) +} + +func TestByteBufferGetStringConcurrent(t *testing.T) { + concurrency := 10 + ch := make(chan struct{}, concurrency) + for i := 0; i < concurrency; i++ { + go func() { + testByteBufferGetString(t) + ch <- struct{}{} + }() + } + + for i := 0; i < concurrency; i++ { + select { + case <-ch: + case <-time.After(time.Second): + t.Fatalf("timeout!") + } + } +} diff --git a/vendor/github.com/valyala/bytebufferpool/bytebuffer_timing_test.go b/vendor/github.com/valyala/bytebufferpool/bytebuffer_timing_test.go new file mode 100644 index 0000000..29f92de --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/bytebuffer_timing_test.go @@ -0,0 +1,32 @@ +package bytebufferpool + +import ( + "bytes" + "testing" +) + +func BenchmarkByteBufferWrite(b *testing.B) { + s := []byte("foobarbaz") + b.RunParallel(func(pb *testing.PB) { + var buf ByteBuffer + for pb.Next() { + for i := 0; i < 100; i++ { + buf.Write(s) + } + buf.Reset() + } + }) +} + +func BenchmarkBytesBufferWrite(b *testing.B) { + s := []byte("foobarbaz") + b.RunParallel(func(pb *testing.PB) { + var buf bytes.Buffer + for pb.Next() { + for i := 0; i < 100; i++ { + buf.Write(s) + } + buf.Reset() + } + }) +} diff --git a/vendor/github.com/valyala/bytebufferpool/doc.go b/vendor/github.com/valyala/bytebufferpool/doc.go new file mode 100644 index 0000000..e511b7c --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/doc.go @@ -0,0 +1,7 @@ +// Package bytebufferpool implements a pool of byte buffers +// with anti-fragmentation protection. +// +// The pool may waste limited amount of memory due to fragmentation. +// This amount equals to the maximum total size of the byte buffers +// in concurrent use. +package bytebufferpool diff --git a/vendor/github.com/valyala/bytebufferpool/pool.go b/vendor/github.com/valyala/bytebufferpool/pool.go new file mode 100644 index 0000000..8bb4134 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/pool.go @@ -0,0 +1,151 @@ +package bytebufferpool + +import ( + "sort" + "sync" + "sync/atomic" +) + +const ( + minBitSize = 6 // 2**6=64 is a CPU cache line size + steps = 20 + + minSize = 1 << minBitSize + maxSize = 1 << (minBitSize + steps - 1) + + calibrateCallsThreshold = 42000 + maxPercentile = 0.95 +) + +// Pool represents byte buffer pool. +// +// Distinct pools may be used for distinct types of byte buffers. +// Properly determined byte buffer types with their own pools may help reducing +// memory waste. +type Pool struct { + calls [steps]uint64 + calibrating uint64 + + defaultSize uint64 + maxSize uint64 + + pool sync.Pool +} + +var defaultPool Pool + +// Get returns an empty byte buffer from the pool. +// +// Got byte buffer may be returned to the pool via Put call. +// This reduces the number of memory allocations required for byte buffer +// management. +func Get() *ByteBuffer { return defaultPool.Get() } + +// Get returns new byte buffer with zero length. +// +// The byte buffer may be returned to the pool via Put after the use +// in order to minimize GC overhead. +func (p *Pool) Get() *ByteBuffer { + v := p.pool.Get() + if v != nil { + return v.(*ByteBuffer) + } + return &ByteBuffer{ + B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)), + } +} + +// Put returns byte buffer to the pool. +// +// ByteBuffer.B mustn't be touched after returning it to the pool. +// Otherwise data races will occur. +func Put(b *ByteBuffer) { defaultPool.Put(b) } + +// Put releases byte buffer obtained via Get to the pool. +// +// The buffer mustn't be accessed after returning to the pool. +func (p *Pool) Put(b *ByteBuffer) { + idx := index(len(b.B)) + + if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold { + p.calibrate() + } + + maxSize := int(atomic.LoadUint64(&p.maxSize)) + if maxSize == 0 || cap(b.B) <= maxSize { + b.Reset() + p.pool.Put(b) + } +} + +func (p *Pool) calibrate() { + if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) { + return + } + + a := make(callSizes, 0, steps) + var callsSum uint64 + for i := uint64(0); i < steps; i++ { + calls := atomic.SwapUint64(&p.calls[i], 0) + callsSum += calls + a = append(a, callSize{ + calls: calls, + size: minSize << i, + }) + } + sort.Sort(a) + + defaultSize := a[0].size + maxSize := defaultSize + + maxSum := uint64(float64(callsSum) * maxPercentile) + callsSum = 0 + for i := 0; i < steps; i++ { + if callsSum > maxSum { + break + } + callsSum += a[i].calls + size := a[i].size + if size > maxSize { + maxSize = size + } + } + + atomic.StoreUint64(&p.defaultSize, defaultSize) + atomic.StoreUint64(&p.maxSize, maxSize) + + atomic.StoreUint64(&p.calibrating, 0) +} + +type callSize struct { + calls uint64 + size uint64 +} + +type callSizes []callSize + +func (ci callSizes) Len() int { + return len(ci) +} + +func (ci callSizes) Less(i, j int) bool { + return ci[i].calls > ci[j].calls +} + +func (ci callSizes) Swap(i, j int) { + ci[i], ci[j] = ci[j], ci[i] +} + +func index(n int) int { + n-- + n >>= minBitSize + idx := 0 + for n > 0 { + n >>= 1 + idx++ + } + if idx >= steps { + idx = steps - 1 + } + return idx +} diff --git a/vendor/github.com/valyala/bytebufferpool/pool_test.go b/vendor/github.com/valyala/bytebufferpool/pool_test.go new file mode 100644 index 0000000..6d3bcb8 --- /dev/null +++ b/vendor/github.com/valyala/bytebufferpool/pool_test.go @@ -0,0 +1,94 @@ +package bytebufferpool + +import ( + "math/rand" + "testing" + "time" +) + +func TestIndex(t *testing.T) { + testIndex(t, 0, 0) + testIndex(t, 1, 0) + + testIndex(t, minSize-1, 0) + testIndex(t, minSize, 0) + testIndex(t, minSize+1, 1) + + testIndex(t, 2*minSize-1, 1) + testIndex(t, 2*minSize, 1) + testIndex(t, 2*minSize+1, 2) + + testIndex(t, maxSize-1, steps-1) + testIndex(t, maxSize, steps-1) + testIndex(t, maxSize+1, steps-1) +} + +func testIndex(t *testing.T, n, expectedIdx int) { + idx := index(n) + if idx != expectedIdx { + t.Fatalf("unexpected idx for n=%d: %d. Expecting %d", n, idx, expectedIdx) + } +} + +func TestPoolCalibrate(t *testing.T) { + for i := 0; i < steps*calibrateCallsThreshold; i++ { + n := 1004 + if i%15 == 0 { + n = rand.Intn(15234) + } + testGetPut(t, n) + } +} + +func TestPoolVariousSizesSerial(t *testing.T) { + testPoolVariousSizes(t) +} + +func TestPoolVariousSizesConcurrent(t *testing.T) { + concurrency := 5 + ch := make(chan struct{}) + for i := 0; i < concurrency; i++ { + go func() { + testPoolVariousSizes(t) + ch <- struct{}{} + }() + } + for i := 0; i < concurrency; i++ { + select { + case <-ch: + case <-time.After(3 * time.Second): + t.Fatalf("timeout") + } + } +} + +func testPoolVariousSizes(t *testing.T) { + for i := 0; i < steps+1; i++ { + n := (1 << uint32(i)) + + testGetPut(t, n) + testGetPut(t, n+1) + testGetPut(t, n-1) + + for j := 0; j < 10; j++ { + testGetPut(t, j+n) + } + } +} + +func testGetPut(t *testing.T, n int) { + bb := Get() + if len(bb.B) > 0 { + t.Fatalf("non-empty byte buffer returned from acquire") + } + bb.B = allocNBytes(bb.B, n) + Put(bb) +} + +func allocNBytes(dst []byte, n int) []byte { + diff := n - cap(dst) + if diff <= 0 { + return dst[:n] + } + return append(dst, make([]byte, diff)...) +}