From 304768b4e1cbe36cbcbefd958ef13b757b44a290 Mon Sep 17 00:00:00 2001 From: Eng Zer Jun Date: Sat, 18 Jan 2025 04:25:27 +0800 Subject: [PATCH] build(deps): upgrade github.com/dgraph-io/badger to v4 Despite the major version bump, there are no breaking changes that affects our existing code. From changelog of BadgerDB 4.0.0: "This is a major release because we are making a switch to SemVer in order to make it easier for the community to understand when breaking API and data format changes are made." Reference: https://github.com/dgraph-io/badger/blob/ec1a9f3ae8c41edb50703493f1e4991c0641e6a8/CHANGELOG.md Signed-off-by: Eng Zer Jun --- CHANGELOG.md | 4 + go.mod | 15 +- go.sum | 81 +- v1/storage/disk/config.go | 2 +- v1/storage/disk/config_test.go | 2 +- v1/storage/disk/disk.go | 2 +- v1/storage/disk/disk_test.go | 2 +- v1/storage/disk/txn.go | 2 +- v1/storage/disk/txn_test.go | 2 +- vendor/github.com/cespare/xxhash/LICENSE.txt | 22 - vendor/github.com/cespare/xxhash/README.md | 50 - vendor/github.com/cespare/xxhash/rotate.go | 14 - vendor/github.com/cespare/xxhash/rotate19.go | 14 - vendor/github.com/cespare/xxhash/xxhash.go | 168 - .../github.com/cespare/xxhash/xxhash_amd64.go | 12 - .../github.com/cespare/xxhash/xxhash_amd64.s | 233 - .../github.com/cespare/xxhash/xxhash_other.go | 75 - .../github.com/cespare/xxhash/xxhash_safe.go | 10 - .../cespare/xxhash/xxhash_unsafe.go | 30 - .../dgraph-io/badger/v3/.go-version | 1 - .../dgraph-io/badger/v3/.golangci.yml | 21 - .../dgraph-io/badger/v3/appveyor.yml | 49 - .../dgraph-io/badger/v3/pb/badgerpb3.pb.go | 2164 -- .../github.com/dgraph-io/badger/v3/pb/gen.sh | 8 - .../dgraph-io/badger/v3/y/metrics.go | 169 - .../badger/{v3 => v4}/.deepsource.toml | 0 .../dgraph-io/badger/{v3 => v4}/.gitignore | 0 .../dgraph-io/badger/v4/.golangci.yml | 32 + .../dgraph-io/badger/{v3 => v4}/CHANGELOG.md | 236 +- .../badger/{v3 => v4}/CODE_OF_CONDUCT.md | 0 .../badger/{v3 => v4}/CONTRIBUTING.md | 0 .../dgraph-io/badger/{v3 => v4}/LICENSE | 0 .../dgraph-io/badger/{v3 => v4}/Makefile | 1 - .../dgraph-io/badger/{v3 => v4}/README.md | 61 +- .../dgraph-io/badger/{v3 => v4}/VERSIONING.md | 2 +- .../dgraph-io/badger/{v3 => v4}/backup.go | 12 +- .../dgraph-io/badger/{v3 => v4}/batch.go | 10 +- .../dgraph-io/badger/{v3 => v4}/changes.sh | 0 .../dgraph-io/badger/{v3 => v4}/compaction.go | 4 +- .../dgraph-io/badger/{v3 => v4}/db.go | 271 +- .../dgraph-io/badger/{v3 => v4}/dir_plan9.go | 8 +- .../dgraph-io/badger/{v3 => v4}/dir_unix.go | 7 +- .../badger/{v3 => v4}/dir_windows.go | 3 +- .../dgraph-io/badger/{v3 => v4}/discard.go | 12 +- .../dgraph-io/badger/{v3 => v4}/doc.go | 3 +- .../dgraph-io/badger/{v3 => v4}/errors.go | 57 +- .../badger/{v3 => v4}/fb/BlockOffset.go | 0 .../badger/{v3 => v4}/fb/TableIndex.go | 0 .../badger/{v3 => v4}/fb/flatbuffer.fbs | 0 .../dgraph-io/badger/{v3 => v4}/fb/gen.sh | 0 .../{v3 => v4}/fb/install_flatbuffers.sh | 0 .../dgraph-io/badger/{v3 => v4}/histogram.go | 2 +- .../dgraph-io/badger/{v3 => v4}/iterator.go | 42 +- .../badger/{v3 => v4}/key_registry.go | 9 +- .../badger/{v3 => v4}/level_handler.go | 8 +- .../dgraph-io/badger/{v3 => v4}/levels.go | 105 +- .../dgraph-io/badger/{v3 => v4}/logger.go | 0 .../dgraph-io/badger/{v3 => v4}/managed_db.go | 0 .../dgraph-io/badger/{v3 => v4}/manifest.go | 67 +- .../dgraph-io/badger/{v3 => v4}/memtable.go | 39 +- .../dgraph-io/badger/{v3 => v4}/merge.go | 8 +- .../dgraph-io/badger/{v3 => v4}/options.go | 45 +- .../badger/{v3 => v4}/options/options.go | 0 .../dgraph-io/badger/v4/pb/badgerpb4.pb.go | 870 + .../badgerpb3.proto => v4/pb/badgerpb4.proto} | 8 +- .../github.com/dgraph-io/badger/v4/pb/gen.sh | 7 + .../dgraph-io/badger/{v3 => v4}/publisher.go | 33 +- .../dgraph-io/badger/{v3 => v4}/skl/README.md | 0 .../dgraph-io/badger/{v3 => v4}/skl/arena.go | 20 +- .../dgraph-io/badger/{v3 => v4}/skl/skl.go | 44 +- .../dgraph-io/badger/{v3 => v4}/stream.go | 50 +- .../badger/{v3 => v4}/stream_writer.go | 97 +- .../dgraph-io/badger/{v3 => v4}/structs.go | 5 +- .../badger/{v3 => v4}/table/README.md | 0 .../badger/{v3 => v4}/table/builder.go | 48 +- .../badger/{v3 => v4}/table/iterator.go | 8 +- .../badger/{v3 => v4}/table/merge_iterator.go | 4 +- .../badger/{v3 => v4}/table/table.go | 94 +- .../dgraph-io/badger/{v3 => v4}/test.sh | 35 +- .../dgraph-io/badger/v4/test_extensions.go | 79 + .../dgraph-io/badger/{v3 => v4}/trie/trie.go | 7 +- .../dgraph-io/badger/{v3 => v4}/txn.go | 21 +- .../dgraph-io/badger/{v3 => v4}/util.go | 12 +- .../dgraph-io/badger/{v3 => v4}/value.go | 103 +- .../dgraph-io/badger/{v3 => v4}/y/bloom.go | 2 +- .../dgraph-io/badger/{v3 => v4}/y/checksum.go | 8 +- .../dgraph-io/badger/{v3 => v4}/y/encrypt.go | 0 .../dgraph-io/badger/{v3 => v4}/y/error.go | 13 + .../badger/{v3 => v4}/y/event_log.go | 0 .../badger/{v3 => v4}/y/file_dsync.go | 1 + .../badger/{v3 => v4}/y/file_nodsync.go | 1 + .../dgraph-io/badger/{v3 => v4}/y/iterator.go | 0 .../dgraph-io/badger/v4/y/metrics.go | 223 + .../badger/{v3 => v4}/y/watermark.go | 27 +- .../dgraph-io/badger/{v3 => v4}/y/y.go | 24 +- .../dgraph-io/badger/{v3 => v4}/y/zstd.go | 0 .../dgraph-io/ristretto/.deepsource.toml | 17 - .../dgraph-io/ristretto/.go-version | 1 - .../dgraph-io/ristretto/.golangci.yml | 23 - .../github.com/dgraph-io/ristretto/README.md | 220 - .../dgraph-io/ristretto/v2/.gitignore | 3 + .../dgraph-io/ristretto/v2/.golangci.yml | 30 + .../dgraph-io/ristretto/{ => v2}/CHANGELOG.md | 64 +- .../dgraph-io/ristretto/{ => v2}/LICENSE | 0 .../dgraph-io/ristretto/v2/README.md | 154 + .../dgraph-io/ristretto/{ => v2}/cache.go | 260 +- .../dgraph-io/ristretto/{ => v2}/policy.go | 82 +- .../dgraph-io/ristretto/{ => v2}/ring.go | 0 .../dgraph-io/ristretto/{ => v2}/sketch.go | 10 +- .../dgraph-io/ristretto/{ => v2}/store.go | 99 +- .../dgraph-io/ristretto/{ => v2}/ttl.go | 84 +- .../dgraph-io/ristretto/{ => v2}/z/LICENSE | 0 .../dgraph-io/ristretto/{ => v2}/z/README.md | 32 +- .../ristretto/{ => v2}/z/allocator.go | 4 +- .../dgraph-io/ristretto/{ => v2}/z/bbloom.go | 7 +- .../dgraph-io/ristretto/{ => v2}/z/btree.go | 5 +- .../dgraph-io/ristretto/{ => v2}/z/buffer.go | 36 +- .../dgraph-io/ristretto/{ => v2}/z/calloc.go | 0 .../ristretto/{ => v2}/z/calloc_32bit.go | 0 .../ristretto/{ => v2}/z/calloc_64bit.go | 1 + .../ristretto/{ => v2}/z/calloc_jemalloc.go | 0 .../ristretto/{ => v2}/z/calloc_nojemalloc.go | 3 +- .../dgraph-io/ristretto/{ => v2}/z/file.go | 4 +- .../ristretto/{ => v2}/z/file_default.go | 1 + .../ristretto/{ => v2}/z/file_linux.go | 0 .../dgraph-io/ristretto/{ => v2}/z/flags.go | 22 +- .../ristretto/{ => v2}/z/histogram.go | 0 .../dgraph-io/ristretto/{ => v2}/z/mmap.go | 0 .../ristretto/{ => v2}/z/mmap_darwin.go | 0 .../dgraph-io/ristretto/v2/z/mmap_js.go | 40 + .../ristretto/{ => v2}/z/mmap_linux.go | 3 + .../ristretto/{ => v2}/z/mmap_plan9.go | 0 .../ristretto/{ => v2}/z/mmap_unix.go | 3 +- .../dgraph-io/ristretto/v2/z/mmap_wasip1.go | 40 + .../ristretto/{ => v2}/z/mmap_windows.go | 0 .../z/mremap_nosize.go} | 5 + .../mremap_linux.go => v2/z/mremap_size.go} | 3 +- .../dgraph-io/ristretto/{ => v2}/z/rtutil.go | 3 + .../dgraph-io/ristretto/{ => v2}/z/rtutil.s | 0 .../ristretto/{ => v2}/z/simd/baseline.go | 3 + .../ristretto/{ => v2}/z/simd/search.go | 3 +- .../ristretto/{ => v2}/z/simd/search_amd64.s | 0 .../{ => v2}/z/simd/stub_search_amd64.go | 0 .../dgraph-io/ristretto/{ => v2}/z/z.go | 27 +- .../github.com/dustin/go-humanize/.travis.yml | 16 +- .../dustin/go-humanize/README.markdown | 2 +- .../github.com/dustin/go-humanize/bigbytes.go | 20 +- .../github.com/dustin/go-humanize/commaf.go | 1 + vendor/github.com/dustin/go-humanize/ftoa.go | 3 + .../github.com/dustin/go-humanize/number.go | 2 +- vendor/github.com/dustin/go-humanize/si.go | 4 + vendor/github.com/gogo/protobuf/AUTHORS | 15 - vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 - .../github.com/gogo/protobuf/proto/Makefile | 43 - .../github.com/gogo/protobuf/proto/clone.go | 258 - .../gogo/protobuf/proto/custom_gogo.go | 39 - .../github.com/gogo/protobuf/proto/decode.go | 427 - .../gogo/protobuf/proto/deprecated.go | 63 - .../github.com/gogo/protobuf/proto/discard.go | 350 - .../gogo/protobuf/proto/duration.go | 100 - .../gogo/protobuf/proto/duration_gogo.go | 49 - .../github.com/gogo/protobuf/proto/encode.go | 205 - .../gogo/protobuf/proto/encode_gogo.go | 33 - .../github.com/gogo/protobuf/proto/equal.go | 300 - .../gogo/protobuf/proto/extensions.go | 605 - .../gogo/protobuf/proto/extensions_gogo.go | 389 - vendor/github.com/gogo/protobuf/proto/lib.go | 973 - .../gogo/protobuf/proto/lib_gogo.go | 50 - .../gogo/protobuf/proto/message_set.go | 181 - .../gogo/protobuf/proto/pointer_reflect.go | 357 - .../protobuf/proto/pointer_reflect_gogo.go | 59 - .../gogo/protobuf/proto/pointer_unsafe.go | 308 - .../protobuf/proto/pointer_unsafe_gogo.go | 56 - .../gogo/protobuf/proto/properties.go | 610 - .../gogo/protobuf/proto/properties_gogo.go | 36 - .../gogo/protobuf/proto/skip_gogo.go | 119 - .../gogo/protobuf/proto/table_marshal.go | 3009 --- .../gogo/protobuf/proto/table_marshal_gogo.go | 388 - .../gogo/protobuf/proto/table_merge.go | 676 - .../gogo/protobuf/proto/table_unmarshal.go | 2249 -- .../protobuf/proto/table_unmarshal_gogo.go | 385 - vendor/github.com/gogo/protobuf/proto/text.go | 930 - .../gogo/protobuf/proto/text_gogo.go | 57 - .../gogo/protobuf/proto/text_parser.go | 1018 - .../gogo/protobuf/proto/timestamp.go | 113 - .../gogo/protobuf/proto/timestamp_gogo.go | 49 - .../gogo/protobuf/proto/wrappers.go | 1888 -- .../gogo/protobuf/proto/wrappers_gogo.go | 113 - vendor/github.com/golang/glog/LICENSE | 191 - vendor/github.com/golang/glog/README.md | 36 - vendor/github.com/golang/glog/glog.go | 777 - vendor/github.com/golang/glog/glog_file.go | 418 - .../github.com/golang/glog/glog_file_linux.go | 39 - .../golang/glog/glog_file_nonwindows.go | 12 - .../github.com/golang/glog/glog_file_other.go | 30 - .../github.com/golang/glog/glog_file_posix.go | 53 - .../golang/glog/glog_file_windows.go | 30 - vendor/github.com/golang/glog/glog_flags.go | 398 - .../golang/glog/internal/logsink/logsink.go | 393 - .../glog/internal/logsink/logsink_fatal.go | 35 - .../glog/internal/stackdump/stackdump.go | 127 - vendor/github.com/golang/snappy/README | 107 - vendor/github.com/golang/snappy/decode.go | 264 - .../github.com/golang/snappy/decode_other.go | 115 - vendor/github.com/golang/snappy/encode.go | 289 - .../github.com/golang/snappy/encode_amd64.s | 730 - .../github.com/golang/snappy/encode_arm64.s | 722 - vendor/github.com/golang/snappy/encode_asm.go | 30 - .../github.com/golang/snappy/encode_other.go | 238 - .../flatbuffers/{LICENSE.txt => LICENSE} | 0 .../google/flatbuffers/go/builder.go | 72 +- .../google/flatbuffers/go/encode.go | 2 +- .../github.com/google/flatbuffers/go/lib.go | 37 + .../klauspost/compress/.goreleaser.yml | 6 +- .../github.com/klauspost/compress/README.md | 29 +- .../klauspost/compress/fse/decompress.go | 2 +- .../klauspost/compress/huff0/decompress.go | 4 +- .../compress/internal/race/norace.go | 13 + .../klauspost/compress/internal/race/race.go | 26 + .../klauspost/compress/s2/.gitignore | 15 + .../compress/s2}/LICENSE | 17 +- .../klauspost/compress/s2/README.md | 1120 + .../klauspost/compress/s2/decode.go | 443 + .../compress/s2}/decode_amd64.s | 424 +- .../compress/s2}/decode_arm64.s | 426 +- .../compress/s2}/decode_asm.go | 8 +- .../klauspost/compress/s2/decode_other.go | 292 + .../github.com/klauspost/compress/s2/dict.go | 350 + .../klauspost/compress/s2/encode.go | 414 + .../klauspost/compress/s2/encode_all.go | 1068 + .../klauspost/compress/s2/encode_amd64.go | 317 + .../klauspost/compress/s2/encode_best.go | 796 + .../klauspost/compress/s2/encode_better.go | 1106 + .../klauspost/compress/s2/encode_go.go | 729 + .../compress/s2/encodeblock_amd64.go | 228 + .../klauspost/compress/s2/encodeblock_amd64.s | 21303 ++++++++++++++++ .../github.com/klauspost/compress/s2/index.go | 602 + .../klauspost/compress/s2/lz4convert.go | 585 + .../klauspost/compress/s2/lz4sconvert.go | 467 + .../klauspost/compress/s2/reader.go | 1075 + vendor/github.com/klauspost/compress/s2/s2.go | 151 + .../klauspost/compress/s2/writer.go | 1064 + .../compress}/snappy/.gitignore | 0 .../compress}/snappy/AUTHORS | 0 .../compress}/snappy/CONTRIBUTORS | 0 .../compress}/snappy/LICENSE | 0 .../klauspost/compress/snappy/README.md | 17 + .../klauspost/compress/snappy/decode.go | 60 + .../klauspost/compress/snappy/encode.go | 59 + .../compress}/snappy/snappy.go | 54 +- .../klauspost/compress/zstd/blockdec.go | 4 +- .../klauspost/compress/zstd/enc_better.go | 32 +- .../klauspost/compress/zstd/enc_dfast.go | 16 +- .../klauspost/compress/zstd/encoder.go | 45 +- .../klauspost/compress/zstd/framedec.go | 4 +- .../klauspost/compress/zstd/seqdec_amd64.go | 4 +- .../klauspost/compress/zstd/seqdec_amd64.s | 8 +- .../klauspost/compress/zstd/zstd.go | 4 + vendor/modules.txt | 59 +- 259 files changed, 36118 insertions(+), 26376 deletions(-) delete mode 100644 vendor/github.com/cespare/xxhash/LICENSE.txt delete mode 100644 vendor/github.com/cespare/xxhash/README.md delete mode 100644 vendor/github.com/cespare/xxhash/rotate.go delete mode 100644 vendor/github.com/cespare/xxhash/rotate19.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_amd64.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_amd64.s delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_other.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_safe.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_unsafe.go delete mode 100644 vendor/github.com/dgraph-io/badger/v3/.go-version delete mode 100644 vendor/github.com/dgraph-io/badger/v3/.golangci.yml delete mode 100644 vendor/github.com/dgraph-io/badger/v3/appveyor.yml delete mode 100644 vendor/github.com/dgraph-io/badger/v3/pb/badgerpb3.pb.go delete mode 100644 vendor/github.com/dgraph-io/badger/v3/pb/gen.sh delete mode 100644 vendor/github.com/dgraph-io/badger/v3/y/metrics.go rename vendor/github.com/dgraph-io/badger/{v3 => v4}/.deepsource.toml (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/.gitignore (100%) create mode 100644 vendor/github.com/dgraph-io/badger/v4/.golangci.yml rename vendor/github.com/dgraph-io/badger/{v3 => v4}/CHANGELOG.md (65%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/CODE_OF_CONDUCT.md (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/CONTRIBUTING.md (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/LICENSE (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/Makefile (98%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/README.md (83%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/VERSIONING.md (97%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/backup.go (97%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/batch.go (97%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/changes.sh (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/compaction.go (98%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/db.go (89%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/dir_plan9.go (96%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/dir_unix.go (95%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/dir_windows.go (98%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/discard.go (94%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/doc.go (99%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/errors.go (69%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/fb/BlockOffset.go (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/fb/TableIndex.go (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/fb/flatbuffer.fbs (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/fb/gen.sh (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/fb/install_flatbuffers.sh (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/histogram.go (99%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/iterator.go (95%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/key_registry.go (98%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/level_handler.go (98%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/levels.go (95%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/logger.go (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/managed_db.go (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/manifest.go (85%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/memtable.go (95%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/merge.go (97%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/options.go (95%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/options/options.go (100%) create mode 100644 vendor/github.com/dgraph-io/badger/v4/pb/badgerpb4.pb.go rename vendor/github.com/dgraph-io/badger/{v3/pb/badgerpb3.proto => v4/pb/badgerpb4.proto} (94%) create mode 100644 vendor/github.com/dgraph-io/badger/v4/pb/gen.sh rename vendor/github.com/dgraph-io/badger/{v3 => v4}/publisher.go (87%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/skl/README.md (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/skl/arena.go (93%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/skl/skl.go (95%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/stream.go (91%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/stream_writer.go (82%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/structs.go (98%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/table/README.md (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/table/builder.go (94%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/table/iterator.go (98%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/table/merge_iterator.go (98%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/table/table.go (93%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/test.sh (60%) create mode 100644 vendor/github.com/dgraph-io/badger/v4/test_extensions.go rename vendor/github.com/dgraph-io/badger/{v3 => v4}/trie/trie.go (98%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/txn.go (98%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/util.go (93%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/value.go (93%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/y/bloom.go (99%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/y/checksum.go (90%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/y/encrypt.go (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/y/error.go (90%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/y/event_log.go (100%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/y/file_dsync.go (93%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/y/file_nodsync.go (93%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/y/iterator.go (100%) create mode 100644 vendor/github.com/dgraph-io/badger/v4/y/metrics.go rename vendor/github.com/dgraph-io/badger/{v3 => v4}/y/watermark.go (92%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/y/y.go (96%) rename vendor/github.com/dgraph-io/badger/{v3 => v4}/y/zstd.go (100%) delete mode 100644 vendor/github.com/dgraph-io/ristretto/.deepsource.toml delete mode 100644 vendor/github.com/dgraph-io/ristretto/.go-version delete mode 100644 vendor/github.com/dgraph-io/ristretto/.golangci.yml delete mode 100644 vendor/github.com/dgraph-io/ristretto/README.md create mode 100644 vendor/github.com/dgraph-io/ristretto/v2/.gitignore create mode 100644 vendor/github.com/dgraph-io/ristretto/v2/.golangci.yml rename vendor/github.com/dgraph-io/ristretto/{ => v2}/CHANGELOG.md (73%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/LICENSE (100%) create mode 100644 vendor/github.com/dgraph-io/ristretto/v2/README.md rename vendor/github.com/dgraph-io/ristretto/{ => v2}/cache.go (70%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/policy.go (78%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/ring.go (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/sketch.go (86%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/store.go (67%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/ttl.go (56%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/LICENSE (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/README.md (91%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/allocator.go (98%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/bbloom.go (97%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/btree.go (99%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/buffer.go (95%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/calloc.go (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/calloc_32bit.go (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/calloc_64bit.go (81%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/calloc_jemalloc.go (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/calloc_nojemalloc.go (93%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/file.go (98%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/file_default.go (98%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/file_linux.go (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/flags.go (94%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/histogram.go (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/mmap.go (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/mmap_darwin.go (100%) create mode 100644 vendor/github.com/dgraph-io/ristretto/v2/z/mmap_js.go rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/mmap_linux.go (98%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/mmap_plan9.go (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/mmap_unix.go (92%) create mode 100644 vendor/github.com/dgraph-io/ristretto/v2/z/mmap_wasip1.go rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/mmap_windows.go (100%) rename vendor/github.com/dgraph-io/ristretto/{z/mremap_linux_arm64.go => v2/z/mremap_nosize.go} (94%) rename vendor/github.com/dgraph-io/ristretto/{z/mremap_linux.go => v2/z/mremap_size.go} (95%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/rtutil.go (99%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/rtutil.s (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/simd/baseline.go (97%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/simd/search.go (95%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/simd/search_amd64.s (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/simd/stub_search_amd64.go (100%) rename vendor/github.com/dgraph-io/ristretto/{ => v2}/z/z.go (85%) delete mode 100644 vendor/github.com/gogo/protobuf/AUTHORS delete mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS delete mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile delete mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go delete mode 100644 vendor/github.com/golang/glog/LICENSE delete mode 100644 vendor/github.com/golang/glog/README.md delete mode 100644 vendor/github.com/golang/glog/glog.go delete mode 100644 vendor/github.com/golang/glog/glog_file.go delete mode 100644 vendor/github.com/golang/glog/glog_file_linux.go delete mode 100644 vendor/github.com/golang/glog/glog_file_nonwindows.go delete mode 100644 vendor/github.com/golang/glog/glog_file_other.go delete mode 100644 vendor/github.com/golang/glog/glog_file_posix.go delete mode 100644 vendor/github.com/golang/glog/glog_file_windows.go delete mode 100644 vendor/github.com/golang/glog/glog_flags.go delete mode 100644 vendor/github.com/golang/glog/internal/logsink/logsink.go delete mode 100644 vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go delete mode 100644 vendor/github.com/golang/glog/internal/stackdump/stackdump.go delete mode 100644 vendor/github.com/golang/snappy/README delete mode 100644 vendor/github.com/golang/snappy/decode.go delete mode 100644 vendor/github.com/golang/snappy/decode_other.go delete mode 100644 vendor/github.com/golang/snappy/encode.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/encode_arm64.s delete mode 100644 vendor/github.com/golang/snappy/encode_asm.go delete mode 100644 vendor/github.com/golang/snappy/encode_other.go rename vendor/github.com/google/flatbuffers/{LICENSE.txt => LICENSE} (100%) create mode 100644 vendor/github.com/klauspost/compress/internal/race/norace.go create mode 100644 vendor/github.com/klauspost/compress/internal/race/race.go create mode 100644 vendor/github.com/klauspost/compress/s2/.gitignore rename vendor/github.com/{gogo/protobuf => klauspost/compress/s2}/LICENSE (73%) create mode 100644 vendor/github.com/klauspost/compress/s2/README.md create mode 100644 vendor/github.com/klauspost/compress/s2/decode.go rename vendor/github.com/{golang/snappy => klauspost/compress/s2}/decode_amd64.s (60%) rename vendor/github.com/{golang/snappy => klauspost/compress/s2}/decode_arm64.s (59%) rename vendor/github.com/{golang/snappy => klauspost/compress/s2}/decode_asm.go (66%) create mode 100644 vendor/github.com/klauspost/compress/s2/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/s2/dict.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_all.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_amd64.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_best.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_better.go create mode 100644 vendor/github.com/klauspost/compress/s2/encode_go.go create mode 100644 vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go create mode 100644 vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s create mode 100644 vendor/github.com/klauspost/compress/s2/index.go create mode 100644 vendor/github.com/klauspost/compress/s2/lz4convert.go create mode 100644 vendor/github.com/klauspost/compress/s2/lz4sconvert.go create mode 100644 vendor/github.com/klauspost/compress/s2/reader.go create mode 100644 vendor/github.com/klauspost/compress/s2/s2.go create mode 100644 vendor/github.com/klauspost/compress/s2/writer.go rename vendor/github.com/{golang => klauspost/compress}/snappy/.gitignore (100%) rename vendor/github.com/{golang => klauspost/compress}/snappy/AUTHORS (100%) rename vendor/github.com/{golang => klauspost/compress}/snappy/CONTRIBUTORS (100%) rename vendor/github.com/{golang => klauspost/compress}/snappy/LICENSE (100%) create mode 100644 vendor/github.com/klauspost/compress/snappy/README.md create mode 100644 vendor/github.com/klauspost/compress/snappy/decode.go create mode 100644 vendor/github.com/klauspost/compress/snappy/encode.go rename vendor/github.com/{golang => klauspost/compress}/snappy/snappy.go (60%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 400b7fafef..8f30a6a531 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ project adheres to [Semantic Versioning](http://semver.org/). ## Unreleased +### Dependency Updates + +- github.com/dgraph-io/badger: v3.2103.5 -> v4.5.0 by @Juneezee in [#7239](https://github.com/open-policy-agent/opa/pull/7239) + ## 1.0.0 > **_NOTES:_** diff --git a/go.mod b/go.mod index 4e4328038d..6a92046c9a 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 github.com/containerd/containerd v1.7.25 github.com/containerd/errdefs v1.0.0 - github.com/dgraph-io/badger/v3 v3.2103.5 + github.com/dgraph-io/badger/v4 v4.5.0 github.com/fortytw2/leaktest v1.3.0 github.com/foxcpp/go-mockdns v1.1.0 github.com/fsnotify/fsnotify v1.8.0 @@ -57,25 +57,20 @@ require ( github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect - github.com/dgraph-io/ristretto v0.1.1 // indirect - github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect + github.com/dgraph-io/ristretto/v2 v2.0.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/flatbuffers v1.12.1 // indirect + github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect diff --git a/go.sum b/go.sum index a0065253b7..c4bffa951c 100644 --- a/go.sum +++ b/go.sum @@ -6,7 +6,6 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.11.7 h1:vl/nj3Bar/CvJSYo7gIQPyRWc9f3c6IeSNavBTSZNZQ= github.com/Microsoft/hcsshim v0.11.7/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= @@ -15,7 +14,6 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNg github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= @@ -23,9 +21,6 @@ github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyz github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -42,10 +37,6 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -53,17 +44,16 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= -github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgraph-io/badger/v4 v4.5.0 h1:TeJE3I1pIWLBjYhIYCA1+uxrjWEoJXImFBMEBVSm16g= +github.com/dgraph-io/badger/v4 v4.5.0/go.mod h1:ysgYmIeG8dS/E8kwxT7xHyc7MkmwNYLRoYnFbr7387A= +github.com/dgraph-io/ristretto/v2 v2.0.0 h1:l0yiSOtlJvc0otkqyMaDNysg8E9/F/TYZwMbxscNOAQ= +github.com/dgraph-io/ristretto/v2 v2.0.0/go.mod h1:FVFokF2dRqXyPyeMnK1YDy8Fc6aTe0IKgbcd03CYeEk= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -76,7 +66,6 @@ github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7Dlme github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= @@ -91,15 +80,11 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= -github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -110,18 +95,14 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/flatbuffers v1.12.1 h1:MVlul7pQNoDzWRLTw5imwYsl+usrS1TXG2H4jg6ImGw= -github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= +github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -134,14 +115,10 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5 github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -152,7 +129,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -160,8 +136,6 @@ github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/Qd github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= @@ -178,7 +152,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/peterh/liner v1.2.2 h1:aJ4AOodmL+JxOZZEL2u9iJf8omNRpqHc/EbrK+3mAXw= @@ -205,7 +178,6 @@ github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqn github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= @@ -218,29 +190,19 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -254,18 +216,13 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= @@ -296,9 +253,7 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= @@ -310,8 +265,6 @@ golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqR golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= @@ -324,8 +277,6 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -339,10 +290,7 @@ golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= @@ -351,10 +299,8 @@ golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -362,7 +308,6 @@ golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -394,8 +339,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= @@ -403,13 +346,10 @@ golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= @@ -417,7 +357,6 @@ google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= diff --git a/v1/storage/disk/config.go b/v1/storage/disk/config.go index 5bb6f6046a..f871d1cafd 100644 --- a/v1/storage/disk/config.go +++ b/v1/storage/disk/config.go @@ -9,7 +9,7 @@ import ( "fmt" "os" - badger "github.com/dgraph-io/badger/v3" + badger "github.com/dgraph-io/badger/v4" "github.com/open-policy-agent/opa/v1/config" "github.com/open-policy-agent/opa/v1/storage" "github.com/open-policy-agent/opa/v1/util" diff --git a/v1/storage/disk/config_test.go b/v1/storage/disk/config_test.go index 9eb2364163..e773fd9deb 100644 --- a/v1/storage/disk/config_test.go +++ b/v1/storage/disk/config_test.go @@ -11,7 +11,7 @@ import ( "path/filepath" "testing" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/open-policy-agent/opa/v1/logging" ) diff --git a/v1/storage/disk/disk.go b/v1/storage/disk/disk.go index 8b35e60dff..a252ba0264 100644 --- a/v1/storage/disk/disk.go +++ b/v1/storage/disk/disk.go @@ -71,7 +71,7 @@ import ( "sync/atomic" "time" - badger "github.com/dgraph-io/badger/v3" + badger "github.com/dgraph-io/badger/v4" "github.com/prometheus/client_golang/prometheus" "github.com/open-policy-agent/opa/v1/logging" diff --git a/v1/storage/disk/disk_test.go b/v1/storage/disk/disk_test.go index 711fc4e2d9..99c6179500 100644 --- a/v1/storage/disk/disk_test.go +++ b/v1/storage/disk/disk_test.go @@ -19,7 +19,7 @@ import ( "github.com/open-policy-agent/opa/v1/bundle" - badger "github.com/dgraph-io/badger/v3" + badger "github.com/dgraph-io/badger/v4" "github.com/open-policy-agent/opa/v1/logging" "github.com/open-policy-agent/opa/v1/storage" diff --git a/v1/storage/disk/txn.go b/v1/storage/disk/txn.go index 70dc13bc80..5da0845231 100644 --- a/v1/storage/disk/txn.go +++ b/v1/storage/disk/txn.go @@ -11,7 +11,7 @@ import ( "fmt" "strconv" - badger "github.com/dgraph-io/badger/v3" + badger "github.com/dgraph-io/badger/v4" "github.com/open-policy-agent/opa/v1/metrics" "github.com/open-policy-agent/opa/v1/storage" diff --git a/v1/storage/disk/txn_test.go b/v1/storage/disk/txn_test.go index 0eb64a062b..903a5601b2 100644 --- a/v1/storage/disk/txn_test.go +++ b/v1/storage/disk/txn_test.go @@ -11,7 +11,7 @@ import ( "math/rand" "testing" - "github.com/dgraph-io/badger/v3" + "github.com/dgraph-io/badger/v4" "github.com/open-policy-agent/opa/v1/logging" "github.com/open-policy-agent/opa/v1/storage" "github.com/open-policy-agent/opa/v1/util/test" diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f4..0000000000 --- a/vendor/github.com/cespare/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md deleted file mode 100644 index 0982fd25e5..0000000000 --- a/vendor/github.com/cespare/xxhash/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -The API is very small, taking its cue from the other hashing packages in the -standard library: - - $ go doc github.com/cespare/xxhash ! - package xxhash // import "github.com/cespare/xxhash" - - Package xxhash implements the 64-bit variant of xxHash (XXH64) as described - at http://cyan4973.github.io/xxHash/. - - func New() hash.Hash64 - func Sum64(b []byte) uint64 - func Sum64String(s string) uint64 - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64 against another popular Go XXH64 implementation, -[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash): - -| input size | OneOfOne | cespare (purego) | cespare | -| --- | --- | --- | --- | -| 5 B | 416 MB/s | 720 MB/s | 872 MB/s | -| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s | -| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s | -| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s | - -These numbers were generated with: - -``` -$ go test -benchtime 10s -bench '/OneOfOne,' -$ go test -tags purego -benchtime 10s -bench '/xxhash,' -$ go test -benchtime 10s -bench '/xxhash,' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go deleted file mode 100644 index f3eac5ebc0..0000000000 --- a/vendor/github.com/cespare/xxhash/rotate.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.9 - -package xxhash - -// TODO(caleb): After Go 1.10 comes out, remove this fallback code. - -func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } -func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } -func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } -func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } -func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } -func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } -func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } -func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go deleted file mode 100644 index b99612bab8..0000000000 --- a/vendor/github.com/cespare/xxhash/rotate19.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build go1.9 - -package xxhash - -import "math/bits" - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go deleted file mode 100644 index f896bd28f0..0000000000 --- a/vendor/github.com/cespare/xxhash/xxhash.go +++ /dev/null @@ -1,168 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "hash" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -type xxh struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total int - mem [32]byte - n int // how much of mem is used -} - -// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm. -func New() hash.Hash64 { - var x xxh - x.Reset() - return &x -} - -func (x *xxh) Reset() { - x.n = 0 - x.total = 0 - x.v1 = prime1v + prime2 - x.v2 = prime2 - x.v3 = 0 - x.v4 = -prime1v -} - -func (x *xxh) Size() int { return 8 } -func (x *xxh) BlockSize() int { return 32 } - -// Write adds more data to x. It always returns len(b), nil. -func (x *xxh) Write(b []byte) (n int, err error) { - n = len(b) - x.total += len(b) - - if x.n+len(b) < 32 { - // This new data doesn't even fill the current block. - copy(x.mem[x.n:], b) - x.n += len(b) - return - } - - if x.n > 0 { - // Finish off the partial block. - copy(x.mem[x.n:], b) - x.v1 = round(x.v1, u64(x.mem[0:8])) - x.v2 = round(x.v2, u64(x.mem[8:16])) - x.v3 = round(x.v3, u64(x.mem[16:24])) - x.v4 = round(x.v4, u64(x.mem[24:32])) - b = b[32-x.n:] - x.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - b = writeBlocks(x, b) - } - - // Store any remaining partial block. - copy(x.mem[:], b) - x.n = len(b) - - return -} - -func (x *xxh) Sum(b []byte) []byte { - s := x.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -func (x *xxh) Sum64() uint64 { - var h uint64 - - if x.total >= 32 { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = x.v3 + prime5 - } - - h += uint64(x.total) - - i, end := 0, x.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(x.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(x.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(x.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go deleted file mode 100644 index d617652680..0000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -func writeBlocks(x *xxh, b []byte) []byte diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s deleted file mode 100644 index 757f2011f0..0000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,233 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the x pointer. - -// func writeBlocks(x *xxh, b []byte) []byte -TEXT ·writeBlocks(SB), NOSPLIT, $0-56 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from x. - MOVQ x+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to x. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // Construct return slice. - // NOTE: It's important that we don't construct a slice that has a base - // pointer off the end of the original slice, as in Go 1.7+ this will - // cause runtime crashes. (See discussion in, for example, - // https://github.com/golang/go/issues/16772.) - // Therefore, we calculate the length/cap first, and if they're zero, we - // keep the old base. This is what the compiler does as well if you - // write code like - // b = b[len(b):] - - // New length is 32 - (CX - BX) -> BX+32 - CX. - ADDQ $32, BX - SUBQ CX, BX - JZ afterSetBase - - MOVQ CX, ret_base+32(FP) - -afterSetBase: - MOVQ BX, ret_len+40(FP) - MOVQ BX, ret_cap+48(FP) // set cap == len - - RET diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/xxhash_other.go deleted file mode 100644 index c68d13f89e..0000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_other.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // x := New() - // x.Write(b) - // return x.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(x *xxh, b []byte) []byte { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4 - return b -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go deleted file mode 100644 index dfa15ab7e2..0000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_safe.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go deleted file mode 100644 index d2b64e8bb0..0000000000 --- a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -// -// TODO(caleb): Consider removing this if an optimization is ever added to make -// it unnecessary: https://golang.org/issue/2205. -// -// TODO(caleb): We still have a function call; we could instead write Go/asm -// copies of Sum64 for strings to squeeze out a bit more speed. -func Sum64String(s string) uint64 { - // See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ - // for some discussion about this unsafe conversion. - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} diff --git a/vendor/github.com/dgraph-io/badger/v3/.go-version b/vendor/github.com/dgraph-io/badger/v3/.go-version deleted file mode 100644 index adc97d8e22..0000000000 --- a/vendor/github.com/dgraph-io/badger/v3/.go-version +++ /dev/null @@ -1 +0,0 @@ -1.18 diff --git a/vendor/github.com/dgraph-io/badger/v3/.golangci.yml b/vendor/github.com/dgraph-io/badger/v3/.golangci.yml deleted file mode 100644 index 81060fb1c1..0000000000 --- a/vendor/github.com/dgraph-io/badger/v3/.golangci.yml +++ /dev/null @@ -1,21 +0,0 @@ -run: - tests: false - -linters-settings: - lll: - line-length: 120 - -linters: - disable-all: true - enable: -# - errcheck -# - ineffassign -# - gas - - gofmt -# - golint - - gosimple - - govet - - lll -# - varcheck -# - unused -# - gosec diff --git a/vendor/github.com/dgraph-io/badger/v3/appveyor.yml b/vendor/github.com/dgraph-io/badger/v3/appveyor.yml deleted file mode 100644 index 1842b117bf..0000000000 --- a/vendor/github.com/dgraph-io/badger/v3/appveyor.yml +++ /dev/null @@ -1,49 +0,0 @@ -# version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -# Platform. -platform: x64 - -clone_folder: c:\gopath\src\github.com\dgraph-io\badger - -# Environment variables -environment: - GOVERSION: 1.12 - GOPATH: c:\gopath - GO111MODULE: on - -# scripts that run after cloning repository -install: - - set PATH=%GOPATH%\bin;c:\go\bin;c:\msys64\mingw64\bin;%PATH% - - go version - - go env - - python --version - - gcc --version - -# To run your custom scripts instead of automatic MSBuild -build_script: - # We need to disable firewall - https://github.com/appveyor/ci/issues/1579#issuecomment-309830648 - - ps: Disable-NetFirewallRule -DisplayName 'File and Printer Sharing (SMB-Out)' - - cd c:\gopath\src\github.com\dgraph-io\badger - - git branch - - go get -t ./... - -# To run your custom scripts instead of automatic tests -test_script: - # Unit tests - - ps: Add-AppveyorTest "Unit Tests" -Outcome Running - - go test -v github.com/dgraph-io/badger/... - - ps: Update-AppveyorTest "Unit Tests" -Outcome Passed - -notifications: - - provider: Email - to: - - pawan@dgraph.io - on_build_failure: true - on_build_status_changed: true -# to disable deployment -deploy: off - diff --git a/vendor/github.com/dgraph-io/badger/v3/pb/badgerpb3.pb.go b/vendor/github.com/dgraph-io/badger/v3/pb/badgerpb3.pb.go deleted file mode 100644 index 927878ca03..0000000000 --- a/vendor/github.com/dgraph-io/badger/v3/pb/badgerpb3.pb.go +++ /dev/null @@ -1,2164 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: badgerpb3.proto - -package pb - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type EncryptionAlgo int32 - -const ( - EncryptionAlgo_aes EncryptionAlgo = 0 -) - -var EncryptionAlgo_name = map[int32]string{ - 0: "aes", -} - -var EncryptionAlgo_value = map[string]int32{ - "aes": 0, -} - -func (x EncryptionAlgo) String() string { - return proto.EnumName(EncryptionAlgo_name, int32(x)) -} - -func (EncryptionAlgo) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6d729c99bbc38987, []int{0} -} - -type ManifestChange_Operation int32 - -const ( - ManifestChange_CREATE ManifestChange_Operation = 0 - ManifestChange_DELETE ManifestChange_Operation = 1 -) - -var ManifestChange_Operation_name = map[int32]string{ - 0: "CREATE", - 1: "DELETE", -} - -var ManifestChange_Operation_value = map[string]int32{ - "CREATE": 0, - "DELETE": 1, -} - -func (x ManifestChange_Operation) String() string { - return proto.EnumName(ManifestChange_Operation_name, int32(x)) -} - -func (ManifestChange_Operation) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6d729c99bbc38987, []int{3, 0} -} - -type Checksum_Algorithm int32 - -const ( - Checksum_CRC32C Checksum_Algorithm = 0 - Checksum_XXHash64 Checksum_Algorithm = 1 -) - -var Checksum_Algorithm_name = map[int32]string{ - 0: "CRC32C", - 1: "XXHash64", -} - -var Checksum_Algorithm_value = map[string]int32{ - "CRC32C": 0, - "XXHash64": 1, -} - -func (x Checksum_Algorithm) String() string { - return proto.EnumName(Checksum_Algorithm_name, int32(x)) -} - -func (Checksum_Algorithm) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6d729c99bbc38987, []int{4, 0} -} - -type KV struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - UserMeta []byte `protobuf:"bytes,3,opt,name=user_meta,json=userMeta,proto3" json:"user_meta,omitempty"` - Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - ExpiresAt uint64 `protobuf:"varint,5,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` - Meta []byte `protobuf:"bytes,6,opt,name=meta,proto3" json:"meta,omitempty"` - // Stream id is used to identify which stream the KV came from. - StreamId uint32 `protobuf:"varint,10,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` - // Stream done is used to indicate end of stream. - StreamDone bool `protobuf:"varint,11,opt,name=stream_done,json=streamDone,proto3" json:"stream_done,omitempty"` -} - -func (m *KV) Reset() { *m = KV{} } -func (m *KV) String() string { return proto.CompactTextString(m) } -func (*KV) ProtoMessage() {} -func (*KV) Descriptor() ([]byte, []int) { - return fileDescriptor_6d729c99bbc38987, []int{0} -} -func (m *KV) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KV) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KV.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KV) XXX_Merge(src proto.Message) { - xxx_messageInfo_KV.Merge(m, src) -} -func (m *KV) XXX_Size() int { - return m.Size() -} -func (m *KV) XXX_DiscardUnknown() { - xxx_messageInfo_KV.DiscardUnknown(m) -} - -var xxx_messageInfo_KV proto.InternalMessageInfo - -func (m *KV) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *KV) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *KV) GetUserMeta() []byte { - if m != nil { - return m.UserMeta - } - return nil -} - -func (m *KV) GetVersion() uint64 { - if m != nil { - return m.Version - } - return 0 -} - -func (m *KV) GetExpiresAt() uint64 { - if m != nil { - return m.ExpiresAt - } - return 0 -} - -func (m *KV) GetMeta() []byte { - if m != nil { - return m.Meta - } - return nil -} - -func (m *KV) GetStreamId() uint32 { - if m != nil { - return m.StreamId - } - return 0 -} - -func (m *KV) GetStreamDone() bool { - if m != nil { - return m.StreamDone - } - return false -} - -type KVList struct { - Kv []*KV `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` - // alloc_ref used internally for memory management. - AllocRef uint64 `protobuf:"varint,10,opt,name=alloc_ref,json=allocRef,proto3" json:"alloc_ref,omitempty"` -} - -func (m *KVList) Reset() { *m = KVList{} } -func (m *KVList) String() string { return proto.CompactTextString(m) } -func (*KVList) ProtoMessage() {} -func (*KVList) Descriptor() ([]byte, []int) { - return fileDescriptor_6d729c99bbc38987, []int{1} -} -func (m *KVList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KVList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KVList.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KVList) XXX_Merge(src proto.Message) { - xxx_messageInfo_KVList.Merge(m, src) -} -func (m *KVList) XXX_Size() int { - return m.Size() -} -func (m *KVList) XXX_DiscardUnknown() { - xxx_messageInfo_KVList.DiscardUnknown(m) -} - -var xxx_messageInfo_KVList proto.InternalMessageInfo - -func (m *KVList) GetKv() []*KV { - if m != nil { - return m.Kv - } - return nil -} - -func (m *KVList) GetAllocRef() uint64 { - if m != nil { - return m.AllocRef - } - return 0 -} - -type ManifestChangeSet struct { - // A set of changes that are applied atomically. - Changes []*ManifestChange `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"` -} - -func (m *ManifestChangeSet) Reset() { *m = ManifestChangeSet{} } -func (m *ManifestChangeSet) String() string { return proto.CompactTextString(m) } -func (*ManifestChangeSet) ProtoMessage() {} -func (*ManifestChangeSet) Descriptor() ([]byte, []int) { - return fileDescriptor_6d729c99bbc38987, []int{2} -} -func (m *ManifestChangeSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ManifestChangeSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ManifestChangeSet.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ManifestChangeSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_ManifestChangeSet.Merge(m, src) -} -func (m *ManifestChangeSet) XXX_Size() int { - return m.Size() -} -func (m *ManifestChangeSet) XXX_DiscardUnknown() { - xxx_messageInfo_ManifestChangeSet.DiscardUnknown(m) -} - -var xxx_messageInfo_ManifestChangeSet proto.InternalMessageInfo - -func (m *ManifestChangeSet) GetChanges() []*ManifestChange { - if m != nil { - return m.Changes - } - return nil -} - -type ManifestChange struct { - Id uint64 `protobuf:"varint,1,opt,name=Id,proto3" json:"Id,omitempty"` - Op ManifestChange_Operation `protobuf:"varint,2,opt,name=Op,proto3,enum=badgerpb3.ManifestChange_Operation" json:"Op,omitempty"` - Level uint32 `protobuf:"varint,3,opt,name=Level,proto3" json:"Level,omitempty"` - KeyId uint64 `protobuf:"varint,4,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` - EncryptionAlgo EncryptionAlgo `protobuf:"varint,5,opt,name=encryption_algo,json=encryptionAlgo,proto3,enum=badgerpb3.EncryptionAlgo" json:"encryption_algo,omitempty"` - Compression uint32 `protobuf:"varint,6,opt,name=compression,proto3" json:"compression,omitempty"` -} - -func (m *ManifestChange) Reset() { *m = ManifestChange{} } -func (m *ManifestChange) String() string { return proto.CompactTextString(m) } -func (*ManifestChange) ProtoMessage() {} -func (*ManifestChange) Descriptor() ([]byte, []int) { - return fileDescriptor_6d729c99bbc38987, []int{3} -} -func (m *ManifestChange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ManifestChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ManifestChange.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ManifestChange) XXX_Merge(src proto.Message) { - xxx_messageInfo_ManifestChange.Merge(m, src) -} -func (m *ManifestChange) XXX_Size() int { - return m.Size() -} -func (m *ManifestChange) XXX_DiscardUnknown() { - xxx_messageInfo_ManifestChange.DiscardUnknown(m) -} - -var xxx_messageInfo_ManifestChange proto.InternalMessageInfo - -func (m *ManifestChange) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *ManifestChange) GetOp() ManifestChange_Operation { - if m != nil { - return m.Op - } - return ManifestChange_CREATE -} - -func (m *ManifestChange) GetLevel() uint32 { - if m != nil { - return m.Level - } - return 0 -} - -func (m *ManifestChange) GetKeyId() uint64 { - if m != nil { - return m.KeyId - } - return 0 -} - -func (m *ManifestChange) GetEncryptionAlgo() EncryptionAlgo { - if m != nil { - return m.EncryptionAlgo - } - return EncryptionAlgo_aes -} - -func (m *ManifestChange) GetCompression() uint32 { - if m != nil { - return m.Compression - } - return 0 -} - -type Checksum struct { - Algo Checksum_Algorithm `protobuf:"varint,1,opt,name=algo,proto3,enum=badgerpb3.Checksum_Algorithm" json:"algo,omitempty"` - Sum uint64 `protobuf:"varint,2,opt,name=sum,proto3" json:"sum,omitempty"` -} - -func (m *Checksum) Reset() { *m = Checksum{} } -func (m *Checksum) String() string { return proto.CompactTextString(m) } -func (*Checksum) ProtoMessage() {} -func (*Checksum) Descriptor() ([]byte, []int) { - return fileDescriptor_6d729c99bbc38987, []int{4} -} -func (m *Checksum) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Checksum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Checksum.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Checksum) XXX_Merge(src proto.Message) { - xxx_messageInfo_Checksum.Merge(m, src) -} -func (m *Checksum) XXX_Size() int { - return m.Size() -} -func (m *Checksum) XXX_DiscardUnknown() { - xxx_messageInfo_Checksum.DiscardUnknown(m) -} - -var xxx_messageInfo_Checksum proto.InternalMessageInfo - -func (m *Checksum) GetAlgo() Checksum_Algorithm { - if m != nil { - return m.Algo - } - return Checksum_CRC32C -} - -func (m *Checksum) GetSum() uint64 { - if m != nil { - return m.Sum - } - return 0 -} - -type DataKey struct { - KeyId uint64 `protobuf:"varint,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Iv []byte `protobuf:"bytes,3,opt,name=iv,proto3" json:"iv,omitempty"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *DataKey) Reset() { *m = DataKey{} } -func (m *DataKey) String() string { return proto.CompactTextString(m) } -func (*DataKey) ProtoMessage() {} -func (*DataKey) Descriptor() ([]byte, []int) { - return fileDescriptor_6d729c99bbc38987, []int{5} -} -func (m *DataKey) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DataKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DataKey.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DataKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_DataKey.Merge(m, src) -} -func (m *DataKey) XXX_Size() int { - return m.Size() -} -func (m *DataKey) XXX_DiscardUnknown() { - xxx_messageInfo_DataKey.DiscardUnknown(m) -} - -var xxx_messageInfo_DataKey proto.InternalMessageInfo - -func (m *DataKey) GetKeyId() uint64 { - if m != nil { - return m.KeyId - } - return 0 -} - -func (m *DataKey) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *DataKey) GetIv() []byte { - if m != nil { - return m.Iv - } - return nil -} - -func (m *DataKey) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -type Match struct { - Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` - IgnoreBytes string `protobuf:"bytes,2,opt,name=ignore_bytes,json=ignoreBytes,proto3" json:"ignore_bytes,omitempty"` -} - -func (m *Match) Reset() { *m = Match{} } -func (m *Match) String() string { return proto.CompactTextString(m) } -func (*Match) ProtoMessage() {} -func (*Match) Descriptor() ([]byte, []int) { - return fileDescriptor_6d729c99bbc38987, []int{6} -} -func (m *Match) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Match) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Match.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Match) XXX_Merge(src proto.Message) { - xxx_messageInfo_Match.Merge(m, src) -} -func (m *Match) XXX_Size() int { - return m.Size() -} -func (m *Match) XXX_DiscardUnknown() { - xxx_messageInfo_Match.DiscardUnknown(m) -} - -var xxx_messageInfo_Match proto.InternalMessageInfo - -func (m *Match) GetPrefix() []byte { - if m != nil { - return m.Prefix - } - return nil -} - -func (m *Match) GetIgnoreBytes() string { - if m != nil { - return m.IgnoreBytes - } - return "" -} - -func init() { - proto.RegisterEnum("badgerpb3.EncryptionAlgo", EncryptionAlgo_name, EncryptionAlgo_value) - proto.RegisterEnum("badgerpb3.ManifestChange_Operation", ManifestChange_Operation_name, ManifestChange_Operation_value) - proto.RegisterEnum("badgerpb3.Checksum_Algorithm", Checksum_Algorithm_name, Checksum_Algorithm_value) - proto.RegisterType((*KV)(nil), "badgerpb3.KV") - proto.RegisterType((*KVList)(nil), "badgerpb3.KVList") - proto.RegisterType((*ManifestChangeSet)(nil), "badgerpb3.ManifestChangeSet") - proto.RegisterType((*ManifestChange)(nil), "badgerpb3.ManifestChange") - proto.RegisterType((*Checksum)(nil), "badgerpb3.Checksum") - proto.RegisterType((*DataKey)(nil), "badgerpb3.DataKey") - proto.RegisterType((*Match)(nil), "badgerpb3.Match") -} - -func init() { proto.RegisterFile("badgerpb3.proto", fileDescriptor_6d729c99bbc38987) } - -var fileDescriptor_6d729c99bbc38987 = []byte{ - // 651 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x54, 0x4d, 0x6b, 0xdb, 0x40, - 0x10, 0xf5, 0xca, 0xf2, 0xd7, 0x38, 0x71, 0xdc, 0xa5, 0x2d, 0x0a, 0x25, 0xae, 0xa3, 0x50, 0x30, - 0x85, 0xda, 0x34, 0x2e, 0xbd, 0xf4, 0xe4, 0x2f, 0x88, 0x71, 0x42, 0x60, 0x1b, 0x42, 0xe8, 0xc5, - 0xac, 0xa5, 0xb1, 0x2d, 0x6c, 0x4b, 0x62, 0xb5, 0x16, 0xf1, 0x8f, 0x28, 0xf4, 0x67, 0xf5, 0x98, - 0x43, 0x0f, 0x3d, 0x96, 0xe4, 0x8f, 0x94, 0x5d, 0x29, 0xae, 0x7d, 0xe8, 0x6d, 0xe6, 0xcd, 0x68, - 0xde, 0xe8, 0xbd, 0x91, 0xe0, 0x68, 0xc2, 0xdd, 0x19, 0x8a, 0x70, 0xd2, 0x6e, 0x86, 0x22, 0x90, - 0x01, 0x2d, 0x6d, 0x01, 0xfb, 0x17, 0x01, 0x63, 0x74, 0x4b, 0xab, 0x90, 0x5d, 0xe0, 0xc6, 0x22, - 0x75, 0xd2, 0x38, 0x60, 0x2a, 0xa4, 0x2f, 0x21, 0x17, 0xf3, 0xe5, 0x1a, 0x2d, 0x43, 0x63, 0x49, - 0x42, 0xdf, 0x40, 0x69, 0x1d, 0xa1, 0x18, 0xaf, 0x50, 0x72, 0x2b, 0xab, 0x2b, 0x45, 0x05, 0x5c, - 0xa1, 0xe4, 0xd4, 0x82, 0x42, 0x8c, 0x22, 0xf2, 0x02, 0xdf, 0x32, 0xeb, 0xa4, 0x61, 0xb2, 0xe7, - 0x94, 0x9e, 0x00, 0xe0, 0x7d, 0xe8, 0x09, 0x8c, 0xc6, 0x5c, 0x5a, 0x39, 0x5d, 0x2c, 0xa5, 0x48, - 0x47, 0x52, 0x0a, 0xa6, 0x1e, 0x98, 0xd7, 0x03, 0x75, 0xac, 0x98, 0x22, 0x29, 0x90, 0xaf, 0xc6, - 0x9e, 0x6b, 0x41, 0x9d, 0x34, 0x0e, 0x59, 0x31, 0x01, 0x86, 0x2e, 0x7d, 0x0b, 0xe5, 0xb4, 0xe8, - 0x06, 0x3e, 0x5a, 0xe5, 0x3a, 0x69, 0x14, 0x19, 0x24, 0x50, 0x3f, 0xf0, 0xd1, 0xee, 0x43, 0x7e, - 0x74, 0x7b, 0xe9, 0x45, 0x92, 0x9e, 0x80, 0xb1, 0x88, 0x2d, 0x52, 0xcf, 0x36, 0xca, 0xe7, 0x87, - 0xcd, 0x7f, 0x4a, 0x8c, 0x6e, 0x99, 0xb1, 0x88, 0x15, 0x0d, 0x5f, 0x2e, 0x03, 0x67, 0x2c, 0x70, - 0xaa, 0x69, 0x4c, 0x56, 0xd4, 0x00, 0xc3, 0xa9, 0x7d, 0x01, 0x2f, 0xae, 0xb8, 0xef, 0x4d, 0x31, - 0x92, 0xbd, 0x39, 0xf7, 0x67, 0xf8, 0x15, 0x25, 0x6d, 0x43, 0xc1, 0xd1, 0x49, 0x94, 0x4e, 0x3d, - 0xde, 0x99, 0xba, 0xdf, 0xce, 0x9e, 0x3b, 0xed, 0xef, 0x06, 0x54, 0xf6, 0x6b, 0xb4, 0x02, 0xc6, - 0xd0, 0xd5, 0x8a, 0x9b, 0xcc, 0x18, 0xba, 0xb4, 0x0d, 0xc6, 0x75, 0xa8, 0xd5, 0xae, 0x9c, 0x9f, - 0xfd, 0x77, 0x64, 0xf3, 0x3a, 0x44, 0xc1, 0xa5, 0x17, 0xf8, 0xcc, 0xb8, 0x0e, 0x95, 0x4b, 0x97, - 0x18, 0xe3, 0x52, 0x7b, 0x71, 0xc8, 0x92, 0x84, 0xbe, 0x82, 0xfc, 0x02, 0x37, 0x4a, 0xb8, 0xc4, - 0x87, 0xdc, 0x02, 0x37, 0x43, 0x97, 0x76, 0xe1, 0x08, 0x7d, 0x47, 0x6c, 0x42, 0xf5, 0xf8, 0x98, - 0x2f, 0x67, 0x81, 0xb6, 0xa2, 0xb2, 0xf7, 0x06, 0x83, 0x6d, 0x47, 0x67, 0x39, 0x0b, 0x58, 0x05, - 0xf7, 0x72, 0x5a, 0x87, 0xb2, 0x13, 0xac, 0x42, 0x81, 0x91, 0xf6, 0x39, 0xaf, 0x69, 0x77, 0x21, - 0xfb, 0x0c, 0x4a, 0xdb, 0x1d, 0x29, 0x40, 0xbe, 0xc7, 0x06, 0x9d, 0x9b, 0x41, 0x35, 0xa3, 0xe2, - 0xfe, 0xe0, 0x72, 0x70, 0x33, 0xa8, 0x12, 0x3b, 0x86, 0x62, 0x6f, 0x8e, 0xce, 0x22, 0x5a, 0xaf, - 0xe8, 0x47, 0x30, 0xf5, 0x2e, 0x44, 0xef, 0x72, 0xb2, 0xb3, 0xcb, 0x73, 0x4b, 0x53, 0x51, 0x0b, - 0x4f, 0xce, 0x57, 0x4c, 0xb7, 0xaa, 0x73, 0x8d, 0xd6, 0x2b, 0x2d, 0x96, 0xc9, 0x54, 0x68, 0xbf, - 0x83, 0xd2, 0xb6, 0x29, 0x61, 0xed, 0xb5, 0xcf, 0x7b, 0xd5, 0x0c, 0x3d, 0x80, 0xe2, 0xdd, 0xdd, - 0x05, 0x8f, 0xe6, 0x9f, 0x3f, 0x55, 0x89, 0xed, 0x40, 0xa1, 0xcf, 0x25, 0x1f, 0xe1, 0x66, 0x47, - 0x24, 0xb2, 0x2b, 0x12, 0x05, 0xd3, 0xe5, 0x92, 0xa7, 0x67, 0xaf, 0x63, 0x65, 0x95, 0x17, 0xa7, - 0xe7, 0x6e, 0x78, 0xb1, 0x3a, 0x67, 0x47, 0x20, 0x97, 0xe8, 0xaa, 0x73, 0x56, 0x1a, 0x67, 0x59, - 0x29, 0x45, 0x3a, 0xd2, 0xee, 0x42, 0xee, 0x8a, 0x4b, 0x67, 0x4e, 0x5f, 0x43, 0x3e, 0x14, 0x38, - 0xf5, 0xee, 0xd3, 0x0f, 0x2b, 0xcd, 0xe8, 0x29, 0x1c, 0x78, 0x33, 0x3f, 0x10, 0x38, 0x9e, 0x6c, - 0x24, 0x46, 0x9a, 0xab, 0xc4, 0xca, 0x09, 0xd6, 0x55, 0xd0, 0xfb, 0x63, 0xa8, 0xec, 0x3b, 0x41, - 0x0b, 0x90, 0xe5, 0x18, 0x55, 0x33, 0xdd, 0x2f, 0x3f, 0x1f, 0x6b, 0xe4, 0xe1, 0xb1, 0x46, 0xfe, - 0x3c, 0xd6, 0xc8, 0x8f, 0xa7, 0x5a, 0xe6, 0xe1, 0xa9, 0x96, 0xf9, 0xfd, 0x54, 0xcb, 0x7c, 0x3b, - 0x9d, 0x79, 0x72, 0xbe, 0x9e, 0x34, 0x9d, 0x60, 0xd5, 0x72, 0x67, 0x82, 0x87, 0xf3, 0x0f, 0x5e, - 0xd0, 0x4a, 0xf4, 0x6c, 0xc5, 0xed, 0x56, 0x38, 0x99, 0xe4, 0xf5, 0x1f, 0xa0, 0xfd, 0x37, 0x00, - 0x00, 0xff, 0xff, 0xa2, 0x8d, 0xa8, 0xf5, 0x14, 0x04, 0x00, 0x00, -} - -func (m *KV) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KV) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KV) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StreamDone { - i-- - if m.StreamDone { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 - } - if m.StreamId != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.StreamId)) - i-- - dAtA[i] = 0x50 - } - if len(m.Meta) > 0 { - i -= len(m.Meta) - copy(dAtA[i:], m.Meta) - i = encodeVarintBadgerpb3(dAtA, i, uint64(len(m.Meta))) - i-- - dAtA[i] = 0x32 - } - if m.ExpiresAt != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.ExpiresAt)) - i-- - dAtA[i] = 0x28 - } - if m.Version != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x20 - } - if len(m.UserMeta) > 0 { - i -= len(m.UserMeta) - copy(dAtA[i:], m.UserMeta) - i = encodeVarintBadgerpb3(dAtA, i, uint64(len(m.UserMeta))) - i-- - dAtA[i] = 0x1a - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintBadgerpb3(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintBadgerpb3(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *KVList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KVList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KVList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AllocRef != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.AllocRef)) - i-- - dAtA[i] = 0x50 - } - if len(m.Kv) > 0 { - for iNdEx := len(m.Kv) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Kv[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBadgerpb3(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ManifestChangeSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ManifestChangeSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ManifestChangeSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Changes) > 0 { - for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Changes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBadgerpb3(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ManifestChange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ManifestChange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ManifestChange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Compression != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.Compression)) - i-- - dAtA[i] = 0x30 - } - if m.EncryptionAlgo != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.EncryptionAlgo)) - i-- - dAtA[i] = 0x28 - } - if m.KeyId != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.KeyId)) - i-- - dAtA[i] = 0x20 - } - if m.Level != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.Level)) - i-- - dAtA[i] = 0x18 - } - if m.Op != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.Op)) - i-- - dAtA[i] = 0x10 - } - if m.Id != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Checksum) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Checksum) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Checksum) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Sum != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.Sum)) - i-- - dAtA[i] = 0x10 - } - if m.Algo != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.Algo)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DataKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DataKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DataKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - if len(m.Iv) > 0 { - i -= len(m.Iv) - copy(dAtA[i:], m.Iv) - i = encodeVarintBadgerpb3(dAtA, i, uint64(len(m.Iv))) - i-- - dAtA[i] = 0x1a - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintBadgerpb3(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - if m.KeyId != 0 { - i = encodeVarintBadgerpb3(dAtA, i, uint64(m.KeyId)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Match) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Match) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Match) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.IgnoreBytes) > 0 { - i -= len(m.IgnoreBytes) - copy(dAtA[i:], m.IgnoreBytes) - i = encodeVarintBadgerpb3(dAtA, i, uint64(len(m.IgnoreBytes))) - i-- - dAtA[i] = 0x12 - } - if len(m.Prefix) > 0 { - i -= len(m.Prefix) - copy(dAtA[i:], m.Prefix) - i = encodeVarintBadgerpb3(dAtA, i, uint64(len(m.Prefix))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintBadgerpb3(dAtA []byte, offset int, v uint64) int { - offset -= sovBadgerpb3(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *KV) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovBadgerpb3(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovBadgerpb3(uint64(l)) - } - l = len(m.UserMeta) - if l > 0 { - n += 1 + l + sovBadgerpb3(uint64(l)) - } - if m.Version != 0 { - n += 1 + sovBadgerpb3(uint64(m.Version)) - } - if m.ExpiresAt != 0 { - n += 1 + sovBadgerpb3(uint64(m.ExpiresAt)) - } - l = len(m.Meta) - if l > 0 { - n += 1 + l + sovBadgerpb3(uint64(l)) - } - if m.StreamId != 0 { - n += 1 + sovBadgerpb3(uint64(m.StreamId)) - } - if m.StreamDone { - n += 2 - } - return n -} - -func (m *KVList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Kv) > 0 { - for _, e := range m.Kv { - l = e.Size() - n += 1 + l + sovBadgerpb3(uint64(l)) - } - } - if m.AllocRef != 0 { - n += 1 + sovBadgerpb3(uint64(m.AllocRef)) - } - return n -} - -func (m *ManifestChangeSet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Changes) > 0 { - for _, e := range m.Changes { - l = e.Size() - n += 1 + l + sovBadgerpb3(uint64(l)) - } - } - return n -} - -func (m *ManifestChange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != 0 { - n += 1 + sovBadgerpb3(uint64(m.Id)) - } - if m.Op != 0 { - n += 1 + sovBadgerpb3(uint64(m.Op)) - } - if m.Level != 0 { - n += 1 + sovBadgerpb3(uint64(m.Level)) - } - if m.KeyId != 0 { - n += 1 + sovBadgerpb3(uint64(m.KeyId)) - } - if m.EncryptionAlgo != 0 { - n += 1 + sovBadgerpb3(uint64(m.EncryptionAlgo)) - } - if m.Compression != 0 { - n += 1 + sovBadgerpb3(uint64(m.Compression)) - } - return n -} - -func (m *Checksum) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Algo != 0 { - n += 1 + sovBadgerpb3(uint64(m.Algo)) - } - if m.Sum != 0 { - n += 1 + sovBadgerpb3(uint64(m.Sum)) - } - return n -} - -func (m *DataKey) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.KeyId != 0 { - n += 1 + sovBadgerpb3(uint64(m.KeyId)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovBadgerpb3(uint64(l)) - } - l = len(m.Iv) - if l > 0 { - n += 1 + l + sovBadgerpb3(uint64(l)) - } - if m.CreatedAt != 0 { - n += 1 + sovBadgerpb3(uint64(m.CreatedAt)) - } - return n -} - -func (m *Match) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Prefix) - if l > 0 { - n += 1 + l + sovBadgerpb3(uint64(l)) - } - l = len(m.IgnoreBytes) - if l > 0 { - n += 1 + l + sovBadgerpb3(uint64(l)) - } - return n -} - -func sovBadgerpb3(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozBadgerpb3(x uint64) (n int) { - return sovBadgerpb3(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *KV) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KV: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KV: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb3 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb3 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb3 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb3 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserMeta", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb3 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb3 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserMeta = append(m.UserMeta[:0], dAtA[iNdEx:postIndex]...) - if m.UserMeta == nil { - m.UserMeta = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType) - } - m.ExpiresAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExpiresAt |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb3 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb3 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Meta = append(m.Meta[:0], dAtA[iNdEx:postIndex]...) - if m.Meta == nil { - m.Meta = []byte{} - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType) - } - m.StreamId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StreamId |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamDone", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StreamDone = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipBadgerpb3(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBadgerpb3 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KVList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KVList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KVList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBadgerpb3 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb3 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kv = append(m.Kv, &KV{}) - if err := m.Kv[len(m.Kv)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllocRef", wireType) - } - m.AllocRef = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AllocRef |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBadgerpb3(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBadgerpb3 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ManifestChangeSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ManifestChangeSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ManifestChangeSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBadgerpb3 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb3 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Changes = append(m.Changes, &ManifestChange{}) - if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBadgerpb3(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBadgerpb3 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ManifestChange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ManifestChange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ManifestChange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) - } - m.Op = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Op |= ManifestChange_Operation(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) - } - m.Level = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Level |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyId", wireType) - } - m.KeyId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeyId |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EncryptionAlgo", wireType) - } - m.EncryptionAlgo = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EncryptionAlgo |= EncryptionAlgo(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Compression", wireType) - } - m.Compression = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Compression |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBadgerpb3(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBadgerpb3 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Checksum) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Checksum: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Checksum: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Algo", wireType) - } - m.Algo = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Algo |= Checksum_Algorithm(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) - } - m.Sum = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Sum |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBadgerpb3(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBadgerpb3 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DataKey) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DataKey: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DataKey: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyId", wireType) - } - m.KeyId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeyId |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb3 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb3 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Iv", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb3 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb3 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Iv = append(m.Iv[:0], dAtA[iNdEx:postIndex]...) - if m.Iv == nil { - m.Iv = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBadgerpb3(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBadgerpb3 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Match) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Match: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Match: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthBadgerpb3 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb3 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Prefix = append(m.Prefix[:0], dAtA[iNdEx:postIndex]...) - if m.Prefix == nil { - m.Prefix = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreBytes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBadgerpb3 - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBadgerpb3 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IgnoreBytes = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBadgerpb3(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBadgerpb3 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipBadgerpb3(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBadgerpb3 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthBadgerpb3 - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupBadgerpb3 - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthBadgerpb3 - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthBadgerpb3 = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowBadgerpb3 = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupBadgerpb3 = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/dgraph-io/badger/v3/pb/gen.sh b/vendor/github.com/dgraph-io/badger/v3/pb/gen.sh deleted file mode 100644 index e41208c9da..0000000000 --- a/vendor/github.com/dgraph-io/badger/v3/pb/gen.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -# Run this script from its directory, so that badgerpb2.proto is where it's expected to -# be. - -# You might need to go get -v github.com/gogo/protobuf/... -go get -v github.com/gogo/protobuf/protoc-gen-gogofaster -protoc --gogofaster_out=. --gogofaster_opt=paths=source_relative -I=. badgerpb3.proto diff --git a/vendor/github.com/dgraph-io/badger/v3/y/metrics.go b/vendor/github.com/dgraph-io/badger/v3/y/metrics.go deleted file mode 100644 index cdae2a194a..0000000000 --- a/vendor/github.com/dgraph-io/badger/v3/y/metrics.go +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package y - -import ( - "expvar" -) - -var ( - // lsmSize has size of the LSM in bytes - lsmSize *expvar.Map - // vlogSize has size of the value log in bytes - vlogSize *expvar.Map - // pendingWrites tracks the number of pending writes. - pendingWrites *expvar.Map - - // These are cumulative - - // numReads has cumulative number of reads - numReads *expvar.Int - // numWrites has cumulative number of writes - numWrites *expvar.Int - // numBytesRead has cumulative number of bytes read - numBytesRead *expvar.Int - // numBytesWritten has cumulative number of bytes written - numBytesWritten *expvar.Int - // numLSMGets is number of LMS gets - numLSMGets *expvar.Map - // numLSMBloomHits is number of LMS bloom hits - numLSMBloomHits *expvar.Map - // numGets is number of gets - numGets *expvar.Int - // numPuts is number of puts - numPuts *expvar.Int - // numBlockedPuts is number of blocked puts - numBlockedPuts *expvar.Int - // numMemtableGets is number of memtable gets - numMemtableGets *expvar.Int - // numCompactionTables is the number of tables being compacted - numCompactionTables *expvar.Int -) - -// These variables are global and have cumulative values for all kv stores. -func init() { - numReads = expvar.NewInt("badger_v3_disk_reads_total") - numWrites = expvar.NewInt("badger_v3_disk_writes_total") - numBytesRead = expvar.NewInt("badger_v3_read_bytes") - numBytesWritten = expvar.NewInt("badger_v3_written_bytes") - numLSMGets = expvar.NewMap("badger_v3_lsm_level_gets_total") - numLSMBloomHits = expvar.NewMap("badger_v3_lsm_bloom_hits_total") - numGets = expvar.NewInt("badger_v3_gets_total") - numPuts = expvar.NewInt("badger_v3_puts_total") - numBlockedPuts = expvar.NewInt("badger_v3_blocked_puts_total") - numMemtableGets = expvar.NewInt("badger_v3_memtable_gets_total") - lsmSize = expvar.NewMap("badger_v3_lsm_size_bytes") - vlogSize = expvar.NewMap("badger_v3_vlog_size_bytes") - pendingWrites = expvar.NewMap("badger_v3_pending_writes_total") - numCompactionTables = expvar.NewInt("badger_v3_compactions_current") -} - -func NumReadsAdd(enabled bool, val int64) { - addInt(enabled, numReads, val) -} - -func NumWritesAdd(enabled bool, val int64) { - addInt(enabled, numWrites, val) -} - -func NumBytesReadAdd(enabled bool, val int64) { - addInt(enabled, numBytesRead, val) -} - -func NumBytesWrittenAdd(enabled bool, val int64) { - addInt(enabled, numBytesWritten, val) -} - -func NumGetsAdd(enabled bool, val int64) { - addInt(enabled, numGets, val) -} - -func NumPutsAdd(enabled bool, val int64) { - addInt(enabled, numPuts, val) -} - -func NumBlockedPutsAdd(enabled bool, val int64) { - addInt(enabled, numBlockedPuts, val) -} - -func NumMemtableGetsAdd(enabled bool, val int64) { - addInt(enabled, numMemtableGets, val) -} - -func NumCompactionTablesAdd(enabled bool, val int64) { - addInt(enabled, numCompactionTables, val) -} - -func LSMSizeSet(enabled bool, key string, val expvar.Var) { - storeToMap(enabled, lsmSize, key, val) -} - -func VlogSizeSet(enabled bool, key string, val expvar.Var) { - storeToMap(enabled, vlogSize, key, val) -} - -func PendingWritesSet(enabled bool, key string, val expvar.Var) { - storeToMap(enabled, pendingWrites, key, val) -} - -func NumLSMBloomHitsAdd(enabled bool, key string, val int64) { - addToMap(enabled, numLSMBloomHits, key, val) -} - -func NumLSMGetsAdd(enabled bool, key string, val int64) { - addToMap(enabled, numLSMGets, key, val) -} - -func LSMSizeGet(enabled bool, key string) expvar.Var { - return getFromMap(enabled, lsmSize, key) -} - -func VlogSizeGet(enabled bool, key string) expvar.Var { - return getFromMap(enabled, vlogSize, key) -} - -func addInt(enabled bool, metric *expvar.Int, val int64) { - if !enabled { - return - } - - metric.Add(val) -} - -func addToMap(enabled bool, metric *expvar.Map, key string, val int64) { - if !enabled { - return - } - - metric.Add(key, val) -} - -func storeToMap(enabled bool, metric *expvar.Map, key string, val expvar.Var) { - if !enabled { - return - } - - metric.Set(key, val) -} - -func getFromMap(enabled bool, metric *expvar.Map, key string) expvar.Var { - if !enabled { - return nil - } - - return metric.Get(key) -} \ No newline at end of file diff --git a/vendor/github.com/dgraph-io/badger/v3/.deepsource.toml b/vendor/github.com/dgraph-io/badger/v4/.deepsource.toml similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/.deepsource.toml rename to vendor/github.com/dgraph-io/badger/v4/.deepsource.toml diff --git a/vendor/github.com/dgraph-io/badger/v3/.gitignore b/vendor/github.com/dgraph-io/badger/v4/.gitignore similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/.gitignore rename to vendor/github.com/dgraph-io/badger/v4/.gitignore diff --git a/vendor/github.com/dgraph-io/badger/v4/.golangci.yml b/vendor/github.com/dgraph-io/badger/v4/.golangci.yml new file mode 100644 index 0000000000..a338bab64a --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/v4/.golangci.yml @@ -0,0 +1,32 @@ +run: + skip-dirs: + skip-files: + +linters-settings: + lll: + line-length: 120 + staticcheck: + checks: + - all + - '-SA1019' # it is okay to use math/rand at times. + gosec: + excludes: # these are not relevant for us right now + - G114 + - G204 + - G306 + - G404 + +linters: + disable-all: true + enable: + - errcheck + - gofmt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - lll + - staticcheck + - unconvert + - unused diff --git a/vendor/github.com/dgraph-io/badger/v3/CHANGELOG.md b/vendor/github.com/dgraph-io/badger/v4/CHANGELOG.md similarity index 65% rename from vendor/github.com/dgraph-io/badger/v3/CHANGELOG.md rename to vendor/github.com/dgraph-io/badger/v4/CHANGELOG.md index 28648bbea9..7fefac60df 100644 --- a/vendor/github.com/dgraph-io/badger/v3/CHANGELOG.md +++ b/vendor/github.com/dgraph-io/badger/v4/CHANGELOG.md @@ -3,6 +3,205 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). +## [4.4.0] - 2024-10-26 + +- retract v4.3.0 due to #2121 and #2113, upgrade to Go v1.23, use ristretto v2 in https://github.com/dgraph-io/badger/pull/2122 +- Allow stream custom maxsize per batch in https://github.com/dgraph-io/badger/pull/2063 +- chore(deps): bump github.com/klauspost/compress from 1.17.10 to 1.17.11 in the patch group in https://github.com/dgraph-io/badger/pull/2120 +- fix: sentinel errors should not have stack traces in https://github.com/dgraph-io/badger/pull/2042 +- chore(deps): bump the minor group with 2 updates in https://github.com/dgraph-io/badger/pull/2119 + +**Full Changelog**: https://github.com/dgraph-io/badger/compare/v4.3.1...v4.4.0 + + +## [4.3.1] - 2024-10-06 + +- chore: update docs links by @ryanfoxtyler in https://github.com/dgraph-io/badger/pull/2097 +- chore(deps): bump golang.org/x/sys from 0.24.0 to 0.25.0 in the minor group by @dependabot in https://github.com/dgraph-io/badger/pull/2100 +- chore(deps): bump golang.org/x/net from 0.28.0 to 0.29.0 in the minor group by @dependabot in https://github.com/dgraph-io/badger/pull/2106 +- fix: fix reverse iterator broken by seek by @harshil-goel in https://github.com/dgraph-io/badger/pull/2109 +- chore(deps): bump github.com/klauspost/compress from 1.17.9 to 1.17.10 in the patch group by @dependabot in https://github.com/dgraph-io/badger/pull/2114 +- chore(deps): bump github.com/dgraph-io/ristretto from 0.1.2-0.20240116140435-c67e07994f91 to 1.0.0 by @dependabot in https://github.com/dgraph-io/badger/pull/2112 + +**Full Changelog**: https://github.com/dgraph-io/badger/compare/v4.3.0...v4.3.1 + + +## [4.3.0] - 2024-08-29 + +> **Warning** +> The tag v4.3.0 has been retracted due to an issue go.sum. +> Use v4.3.1 (see #2121 and #2113) + +### Fixes +- chore(changelog): add a missed entry in CHANGELOG for v4.2.0 by @mangalaman93 in https://github.com/dgraph-io/badger/pull/1988 +- update README with project KVS using badger by @tauraamui in https://github.com/dgraph-io/badger/pull/1989 +- fix edge case for watermark when index is zero by @mangalaman93 in https://github.com/dgraph-io/badger/pull/1999 +- upgrade spf13/cobra to version v1.7.0 by @mangalaman93 in https://github.com/dgraph-io/badger/pull/2001 +- chore: update readme by @joshua-goldstein in https://github.com/dgraph-io/badger/pull/2011 +- perf: upgrade compress package test and benchmark. by @siddhant2001 in https://github.com/dgraph-io/badger/pull/2009 +- fix(Transactions): Fix resource consumption on empty write transaction by @Zach-Johnson in https://github.com/dgraph-io/badger/pull/2018 +- chore(deps): bump golang.org/x/net from 0.7.0 to 0.17.0 by @dependabot in https://github.com/dgraph-io/badger/pull/2017 +- perf(compactor): optimize allocations: use buffer for priorities by @deff7 in https://github.com/dgraph-io/badger/pull/2006 +- fix(Transaction): discard empty transactions on CommitWith by @Wondertan in https://github.com/dgraph-io/badger/pull/2031 +- fix(levelHandler): use lock for levelHandler sort tables instead of rlock by @xgzlucario in https://github.com/dgraph-io/badger/pull/2034 +- Docs: update README with project LLS using badger by @Boc-chi-no in https://github.com/dgraph-io/badger/pull/2032 +- chore: MaxTableSize has been renamed to BaseTableSize by @mitar in https://github.com/dgraph-io/badger/pull/2038 +- Update CODEOWNERS by @ryanfoxtyler in https://github.com/dgraph-io/badger/pull/2043 +- Chore(): add Stale Action by @ryanfoxtyler in https://github.com/dgraph-io/badger/pull/2070 +- Update ristretto and refactor for use of generics by @paralin in https://github.com/dgraph-io/badger/pull/2047 +- chore: Remove obsolete comment by @mitar in https://github.com/dgraph-io/badger/pull/2039 +- chore(Docs): Update jQuery 3.2.1 to 3.7.1 by @kokizzu in https://github.com/dgraph-io/badger/pull/2023 +- chore(deps): bump the go_modules group with 3 updates by @dependabot in https://github.com/dgraph-io/badger/pull/2074 +- docs(): update docs path by @ryanfoxtyler in https://github.com/dgraph-io/badger/pull/2076 +- perf: fix operation in seek by @harshil-goel in https://github.com/dgraph-io/badger/pull/2077 +- Add lakeFS to README.md by @N-o-Z in https://github.com/dgraph-io/badger/pull/2078 +- chore(): add Dependabot by @ryanfoxtyler in https://github.com/dgraph-io/badger/pull/2080 +- chore(deps): bump golangci/golangci-lint-action from 4 to 6 by @dependabot in https://github.com/dgraph-io/badger/pull/2083 +- chore(deps): bump actions/upload-artifact from 3 to 4 by @dependabot in https://github.com/dgraph-io/badger/pull/2081 +- chore(deps): bump github/codeql-action from 2 to 3 by @dependabot in https://github.com/dgraph-io/badger/pull/2082 +- chore(deps): bump the minor group with 7 updates by @dependabot in https://github.com/dgraph-io/badger/pull/2089 +- Action Manager by @madhu72 in https://github.com/dgraph-io/badger/pull/2050 +- chore(deps): bump golang.org/x/sys from 0.23.0 to 0.24.0 in the minor group by @dependabot in https://github.com/dgraph-io/badger/pull/2091 +- chore(deps): bump github.com/golang/protobuf from 1.5.3 to 1.5.4 in the patch group by @dependabot in https://github.com/dgraph-io/badger/pull/2090 +- chore: fix some comments by @dufucun in https://github.com/dgraph-io/badger/pull/2092 +- chore(deps): bump github.com/google/flatbuffers from 1.12.1 to 24.3.25+incompatible by @dependabot in https://github.com/dgraph-io/badger/pull/2084 + +### CI +- ci: change cron frequency to fix ghost jobs by @joshua-goldstein in https://github.com/dgraph-io/badger/pull/2010 +- fix(CI): Update to pull_request trigger by @ryanfoxtyler in https://github.com/dgraph-io/badger/pull/2056 +- ci/cd optimization by @ryanfoxtyler in https://github.com/dgraph-io/badger/pull/2051 +- fix(cd): fixed cd pipeline by @harshil-goel in https://github.com/dgraph-io/badger/pull/2093 +- fix(cd): change name by @harshil-goel in https://github.com/dgraph-io/badger/pull/2094 +- fix(cd): added more debug things to cd by @harshil-goel in https://github.com/dgraph-io/badger/pull/2095 +- fix(cd): removing some debug items by @harshil-goel in https://github.com/dgraph-io/badger/pull/2096 + + +**Full Changelog**: https://github.com/dgraph-io/badger/compare/v4.2.0...v4.3.0 + +## [4.2.0] - 2023-08-03 + +### Breaking + +- feat(metrics): fix and update metrics in badger (#1948) +- fix(metrics): remove badger version in the metrics name (#1982) + +### Fixed + +- fix(db): avoid panic in parallel reads after closing DB (#1987) +- fix(logging): fix direct access to logger (#1980) +- fix(sec): bump google.golang.org/grpc from 1.20.1 to 1.53.0 (#1977) +- fix(sec): update gopkg.in/yaml.v2 package (#1969) +- fix(test): fix flakiness of TestPersistLFDiscardStats (#1963) +- fix(stream): setup oracle correctly in stream writer (#1968) (#1904) +- fix(stream): improve incremental stream writer (#1901) +- fix(test): improve the params in BenchmarkDbGrowth (#1967) +- fix(sync): sync active memtable and value log on Db.Sync (#1847) (#1953) +- fix(test): handle draining of closed channel, speed up test. (#1957) +- fix(test): fix table checksum test. Test on uncompressed. (#1952) +- fix(level): change split key range right key to use ts=0 (#1932) +- fix(test): the new test case PagebufferReader5 introduced an error. (#1936) +- fix(test): add missing unlock in TestPersistLFDiscardStats (#1924) +- fix(PageBufferReader): should conform to io.Reader interface (#1935) +- fix(publisher): publish updates after persistence in WAL (#1917) + +### CI + +- chore(ci): split off coverage workflow (#1944) +- chore(ci): adding trivy scanning workflow (#1925) + + +## [4.1.0] - 2023-03-30 + +This release adds support for incremental stream writer. We also do some cleanup in the docs +and resolve some CI issues for community PR's. We resolve high and medium CVE's and fix +[#1833](https://github.com/dgraph-io/badger/issues/1833). + +### Features +- feat(stream): add support for incremental stream writer (#1722) (#1874) + +### Fixes +- chore: upgrade xxhash from v1.1.0 to v2.1.2 (#1910) (fixes [#1833](https://github.com/dgraph-io/badger/issues/1833)) + +### Security +- chore(deps): bump golang.org/x/net from 0.0.0-20201021035429-f5854403a974 to 0.7.0 (#1885) + +### CVE's + +- [CVE-2021-31525](https://github.com/dgraph-io/badger/security/dependabot/7) +- [CVE-2022-41723](https://github.com/dgraph-io/badger/security/dependabot/4) +- [CVE-2022-27664](https://github.com/dgraph-io/badger/security/dependabot/5) +- [CVE-2021-33194](https://github.com/dgraph-io/badger/security/dependabot/9) +- [CVE-2022-41723](https://github.com/dgraph-io/badger/security/dependabot/13) +- [CVE-2021-33194](https://github.com/dgraph-io/badger/security/dependabot/16) +- [CVE-2021-38561](https://github.com/dgraph-io/badger/security/dependabot/8) + +### Chores + +- fix(docs): update README (#1915) +- cleanup sstable file after tests (#1912) +- chore(ci): add dgraph regression tests (#1908) +- docs: fix the default value in docs (#1909) +- chore: update URL for unsupported manifest version error (#1905) +- docs(README): add raft-badger to projects using badger (#1902) +- sync the docs with README with projects using badger (#1903) +- fix: update code comments for WithNumCompactors (#1900) +- docs: add loggie to projects using badger (#1882) +- chore(memtable): refactor code for memtable flush (#1866) +- resolve coveralls issue for community PR's (#1892, #1894, #1896) + + +## [4.0.1] - 2023-02-28 + +We issue a follow up release in order to resolve a bug in subscriber. We also generate updated protobufs for Badger v4. + +### Fixed + - fix(pb): fix generated protos #1888 + - fix(publisher): initialize the atomic variable #1889 + +### Chores + - chore(cd): tag based deployments #1887 + - chore(ci): fail fast when testing #1890 + + +## [4.0.0] - 2023-02-27 + +> **Warning** +> The tag v4.0.0 has been retracted due to a bug in publisher. +> Use v4.0.1 (see #1889) + +This release fixes a bug in the maxHeaderSize parameter that could lead +to panics. We introduce an external magic number to keep track of external +dependencies. We bump up the minimum required Go version to 1.19. No changes +were made to the format of data on disk. This is a major release because +we are making a switch to SemVer in order to make it easier for the community +to understand when breaking API and data format changes are made. + +### Fixed +- fix: update maxHeaderSize #1877 +- feat(externalMagic): Introduce external magic number (#1745) #1852 +- fix(bench): bring in benchmark fixes from main #1863 + +### Chores +- upgrade go to 1.19 #1868 +- enable linters (gosimple, govet, lll, unused, staticcheck, errcheck, ineffassign, gofmt) #1871 #1870 #1876 +- remove dependency on io/ioutil #1879 +- various doc and comment fixes #1857 +- moving from CalVer to SemVer + + +## [3.2103.5] - 2022-12-15 + +We release Badger CLI tool binaries for amd64 and now arm64. This release does not involve any core code changes to Badger. We add a CD job for building Badger for arm64. + + +## [3.2103.4] - 2022-11-04 + +### Fixed + - fix(manifest): fix manifest corruption due to race condition in concurrent compactions (#1756) + +### Chores + - We bring the release branch to parity with main by updating the CI/CD jobs, Readme, Codeowners, PR and issue templates, etc. + ## [3.2103.3] - 2022-10-14 ### Remarks @@ -199,6 +398,29 @@ This release is not backward compatible with Badger v2.x.x - WithTruncate (#1555) - WithValueLogLoadingMode (#1555) +## [2.2007.4] - 2021-08-25 + +### Fixed + - Fix build on Plan 9 (#1451) (#1508) (#1738) + +### Features + - feat(zstd): backport replacement of DataDog's zstd with Klauspost's zstd (#1736) + +## [2.2007.3] - 2021-07-21 + +### Fixed + - fix(maxVersion): Use choosekey instead of KeyToList (#1532) #1533 + - fix(flatten): Add --num_versions flag (#1518) #1520 + - fix(build): Fix integer overflow on 32-bit architectures #1558 + - fix(pb): avoid protobuf warning due to common filename (#1519) + +### Features + - Add command to stream contents of DB into another DB. (#1486) + +### New APIs + - DB.StreamDB + - DB.MaxVersion + ## [2.2007.2] - 2020-08-31 ### Fixed @@ -363,17 +585,17 @@ This sections lists the changes which were reverted because of non-reproducible - Improve write stalling on level 0 and 1. (#1186) - Disable compression and set ZSTD Compression Level to 1. (#1191) -## [2.0.1] - 2020-01-02 +## [2.0.1] - 2020-01-02 ### New APIs - badger.Options - WithInMemory (f5b6321) - WithZSTDCompressionLevel (3eb4e72) - + - Badger.TableInfo - EstimatedSz (f46f8ea) - + ### Features - Introduce in-memory mode in badger. (#1113) @@ -386,12 +608,12 @@ This sections lists the changes which were reverted because of non-reproducible - Fix windows dataloss issue. (#1134) - Fix VerifyValueChecksum checks. (#1138) - Fix encryption in stream writer. (#1146) -- Fix segmentation fault in vlog.Read. (header.Decode) (#1150) +- Fix segmentation fault in vlog.Read. (header.Decode) (#1150) - Fix merge iterator duplicates issue. (#1157) ### Performance -- Set level 15 as default compression level in Zstd. (#1111) +- Set level 15 as default compression level in Zstd. (#1111) - Optimize createTable in stream_writer.go. (#1132) ## [2.0.0] - 2019-11-12 @@ -413,7 +635,7 @@ This sections lists the changes which were reverted because of non-reproducible - WithEncryptionKey (a425b0e) - WithEncryptionKeyRotationDuration (a425b0e) - WithChecksumVerificationMode (7b4083d) - + ### Features - Data cache to speed up lookups and iterations. (#1066) @@ -493,7 +715,7 @@ _Note_: The hashes in parentheses correspond to the commits that impacted the gi - NewEntry - WithMeta - WithDiscard - - WithTTL + - WithTTL - badger.Item - KeySize (fd59907) diff --git a/vendor/github.com/dgraph-io/badger/v3/CODE_OF_CONDUCT.md b/vendor/github.com/dgraph-io/badger/v4/CODE_OF_CONDUCT.md similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/CODE_OF_CONDUCT.md rename to vendor/github.com/dgraph-io/badger/v4/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/dgraph-io/badger/v3/CONTRIBUTING.md b/vendor/github.com/dgraph-io/badger/v4/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/CONTRIBUTING.md rename to vendor/github.com/dgraph-io/badger/v4/CONTRIBUTING.md diff --git a/vendor/github.com/dgraph-io/badger/v3/LICENSE b/vendor/github.com/dgraph-io/badger/v4/LICENSE similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/LICENSE rename to vendor/github.com/dgraph-io/badger/v4/LICENSE diff --git a/vendor/github.com/dgraph-io/badger/v3/Makefile b/vendor/github.com/dgraph-io/badger/v4/Makefile similarity index 98% rename from vendor/github.com/dgraph-io/badger/v3/Makefile rename to vendor/github.com/dgraph-io/badger/v4/Makefile index b3e2ac1222..fa4227816d 100644 --- a/vendor/github.com/dgraph-io/badger/v3/Makefile +++ b/vendor/github.com/dgraph-io/badger/v4/Makefile @@ -50,7 +50,6 @@ jemalloc: dependency: @echo "Installing dependencies..." @sudo apt-get update - @sudo apt-get -y upgrade @sudo apt-get -y install \ ca-certificates \ curl \ diff --git a/vendor/github.com/dgraph-io/badger/v3/README.md b/vendor/github.com/dgraph-io/badger/v4/README.md similarity index 83% rename from vendor/github.com/dgraph-io/badger/v3/README.md rename to vendor/github.com/dgraph-io/badger/v4/README.md index e6a7cf4cbc..c0706e49b3 100644 --- a/vendor/github.com/dgraph-io/badger/v3/README.md +++ b/vendor/github.com/dgraph-io/badger/v4/README.md @@ -1,7 +1,7 @@ -# BadgerDB +# BadgerDB -[![Go Reference](https://pkg.go.dev/badge/github.com/dgraph-io/badger/v3.svg)](https://pkg.go.dev/github.com/dgraph-io/badger/v3) -[![Go Report Card](https://goreportcard.com/badge/github.com/dgraph-io/badger/v3)](https://goreportcard.com/report/github.com/dgraph-io/badger/v3) +[![Go Reference](https://pkg.go.dev/badge/github.com/dgraph-io/badger/v4.svg)](https://pkg.go.dev/github.com/dgraph-io/badger/v4) +[![Go Report Card](https://goreportcard.com/badge/github.com/dgraph-io/badger/v4)](https://goreportcard.com/report/github.com/dgraph-io/badger/v4) [![Sourcegraph](https://sourcegraph.com/github.com/dgraph-io/badger/-/badge.svg)](https://sourcegraph.com/github.com/dgraph-io/badger?badge) [![ci-badger-tests](https://github.com/dgraph-io/badger/actions/workflows/ci-badger-tests.yml/badge.svg)](https://github.com/dgraph-io/badger/actions/workflows/ci-badger-tests.yml) [![ci-badger-bank-tests](https://github.com/dgraph-io/badger/actions/workflows/ci-badger-bank-tests.yml/badge.svg)](https://github.com/dgraph-io/badger/actions/workflows/ci-badger-bank-tests.yml) @@ -39,43 +39,46 @@ Please consult the [Changelog] for more detailed information on releases. For more details on our version naming schema please read [Choosing a version](#choosing-a-version). -[Changelog]:https://github.com/dgraph-io/badger/blob/master/CHANGELOG.md +[Changelog]:https://github.com/dgraph-io/badger/blob/main/CHANGELOG.md ## Table of Contents - * [Getting Started](#getting-started) - + [Installing](#installing) +- [BadgerDB](#badgerdb) + - [Project Status](#project-status) + - [Table of Contents](#table-of-contents) + - [Getting Started](#getting-started) + - [Installing](#installing) - [Installing Badger Command Line Tool](#installing-badger-command-line-tool) - [Choosing a version](#choosing-a-version) - * [Badger Documentation](#badger-documentation) - * [Resources](#resources) - + [Blog Posts](#blog-posts) - * [Design](#design) - + [Comparisons](#comparisons) - + [Benchmarks](#benchmarks) - * [Projects Using Badger](#projects-using-badger) - * [Contributing](#contributing) - * [Contact](#contact) + - [Badger Documentation](#badger-documentation) + - [Resources](#resources) + - [Blog Posts](#blog-posts) + - [Design](#design) + - [Comparisons](#comparisons) + - [Benchmarks](#benchmarks) + - [Projects Using Badger](#projects-using-badger) + - [Contributing](#contributing) + - [Contact](#contact) ## Getting Started ### Installing -To start using Badger, install Go 1.12 or above. Badger v3 needs go modules. From your project, run the following command +To start using Badger, install Go 1.21 or above. Badger v3 and above needs go modules. From your project, run the following command ```sh -$ go get github.com/dgraph-io/badger/v3 +$ go get github.com/dgraph-io/badger/v4 ``` This will retrieve the library. #### Installing Badger Command Line Tool -Badger provides a CLI tool which can perform certain operations like offline backup/restore. To install the Badger CLI, +Badger provides a CLI tool which can perform certain operations like offline backup/restore. To install the Badger CLI, retrieve the repository and checkout the desired version. Then run ```sh $ cd badger $ go install . ``` -This will install the badger command line utility into your $GOBIN path. +This will install the badger command line utility into your $GOBIN path. #### Choosing a version @@ -176,7 +179,6 @@ Below is a list of known projects that use Badger: * [Sandglass](https://github.com/celrenheit/sandglass) - distributed, horizontally scalable, persistent, time sorted message queue. * [TalariaDB](https://github.com/grab/talaria) - Grab's Distributed, low latency time-series database. * [Sloop](https://github.com/salesforce/sloop) - Salesforce's Kubernetes History Visualization Project. -* [Immudb](https://github.com/codenotary/immudb) - Lightweight, high-speed immutable database for systems and applications. * [Usenet Express](https://usenetexpress.com/) - Serving over 300TB of data with Badger. * [gorush](https://github.com/appleboy/gorush) - A push notification server written in Go. * [0-stor](https://github.com/zero-os/0-stor) - Single device object store. @@ -196,11 +198,11 @@ Below is a list of known projects that use Badger: * [BadgerHold](https://github.com/timshannon/badgerhold) - An embeddable NoSQL store for querying Go types built on Badger * [Goblero](https://github.com/didil/goblero) - Pure Go embedded persistent job queue backed by BadgerDB * [Surfline](https://www.surfline.com) - Serving global wave and weather forecast data with Badger. -* [Cete](https://github.com/mosuka/cete) - Simple and highly available distributed key-value store built on Badger. Makes it easy bringing up a cluster of Badger with Raft consensus algorithm by hashicorp/raft. +* [Cete](https://github.com/mosuka/cete) - Simple and highly available distributed key-value store built on Badger. Makes it easy bringing up a cluster of Badger with Raft consensus algorithm by hashicorp/raft. * [Volument](https://volument.com/) - A new take on website analytics backed by Badger. * [KVdb](https://kvdb.io/) - Hosted key-value store and serverless platform built on top of Badger. * [Terminotes](https://gitlab.com/asad-awadia/terminotes) - Self hosted notes storage and search server - storage powered by BadgerDB -* [Pyroscope](https://github.com/pyroscope-io/pyroscope) - Open source confinuous profiling platform built with BadgerDB +* [Pyroscope](https://github.com/pyroscope-io/pyroscope) - Open source continuous profiling platform built with BadgerDB * [Veri](https://github.com/bgokden/veri) - A distributed feature store optimized for Search and Recommendation tasks. * [bIter](https://github.com/MikkelHJuul/bIter) - A library and Iterator interface for working with the `badger.Iterator`, simplifying from-to, and prefix mechanics. * [ld](https://github.com/MikkelHJuul/ld) - (Lean Database) A very simple gRPC-only key-value database, exposing BadgerDB with key-range scanning semantics. @@ -211,6 +213,15 @@ Below is a list of known projects that use Badger: * [vxdb](https://github.com/vitalvas/vxdb) - Simple schema-less Key-Value NoSQL database with simplest API interface. * [Opacity](https://github.com/opacity/storage-node) - Backend implementation for the Opacity storage project * [Vephar](https://github.com/vaccovecrana/vephar) - A minimal key/value store using hashicorp-raft for cluster coordination and Badger for data storage. +* [gowarcserver](https://github.com/nlnwa/gowarcserver) - Open-source server for warc files. Can be used in conjunction with pywb +* [flow-go](https://github.com/onflow/flow-go) - A fast, secure, and developer-friendly blockchain built to support the next generation of games, apps and the digital assets that power them. +* [Wrgl](https://www.wrgl.co) - A data version control system that works like Git but specialized to store and diff CSV. +* [Loggie](https://github.com/loggie-io/loggie) - A lightweight, cloud-native data transfer agent and aggregator. +* [raft-badger](https://github.com/rfyiamcool/raft-badger) - raft-badger implements LogStore and StableStore Interface of hashcorp/raft. it is used to store raft log and metadata of hashcorp/raft. +* [DVID](https://github.com/janelia-flyem/dvid) - A dataservice for branched versioning of a variety of data types. Originally created for large-scale brain reconstructions in Connectomics. +* [KVS](https://github.com/tauraamui/kvs) - A library for making it easy to persist, load and query full structs into BadgerDB, using an ownership hierarchy model. +* [LLS](https://github.com/Boc-chi-no/LLS) - LLS is an efficient URL Shortener that can be used to shorten links and track link usage. Support for BadgerDB and MongoDB. Improved performance by more than 30% when using BadgerDB +* [lakeFS](https://github.com/treeverse/lakeFS) - lakeFS is an open-source data version control that transforms your object storage to Git-like repositories. lakeFS uses BadgerDB for its underlying local metadata KV store implementation. If you are using Badger in a project please send a pull request to add it to the list. @@ -219,6 +230,6 @@ If you are using Badger in a project please send a pull request to add it to the If you're interested in contributing to Badger see [CONTRIBUTING](./CONTRIBUTING.md). ## Contact -- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, feature requests and discussions. -- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for filing bugs or feature requests. -- Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs). \ No newline at end of file +- Please use [Github issues](https://github.com/dgraph-io/badger/issues) for filing bugs. +- Please use [discuss.dgraph.io](https://discuss.dgraph.io) for questions, discussions, and feature requests. +- Follow us on Twitter [@dgraphlabs](https://twitter.com/dgraphlabs). diff --git a/vendor/github.com/dgraph-io/badger/v3/VERSIONING.md b/vendor/github.com/dgraph-io/badger/v4/VERSIONING.md similarity index 97% rename from vendor/github.com/dgraph-io/badger/v3/VERSIONING.md rename to vendor/github.com/dgraph-io/badger/v4/VERSIONING.md index a890a36ffb..7741bd4045 100644 --- a/vendor/github.com/dgraph-io/badger/v3/VERSIONING.md +++ b/vendor/github.com/dgraph-io/badger/v4/VERSIONING.md @@ -43,5 +43,5 @@ For more background on our decision to adopt Serialization Versioning, read the [Semantic Versioning, Go Modules, and Databases][blog] and the original proposal on [this comment on Dgraph's Discuss forum][discuss]. -[blog]: https://blog.dgraph.io/post/serialization-versioning/ +[blog]: https://open.dgraph.io/post/serialization-versioning/ [discuss]: https://discuss.dgraph.io/t/go-modules-on-badger-and-dgraph/4662/7 \ No newline at end of file diff --git a/vendor/github.com/dgraph-io/badger/v3/backup.go b/vendor/github.com/dgraph-io/badger/v4/backup.go similarity index 97% rename from vendor/github.com/dgraph-io/badger/v3/backup.go rename to vendor/github.com/dgraph-io/badger/v4/backup.go index 42bdc1791e..e9811e1dbc 100644 --- a/vendor/github.com/dgraph-io/badger/v3/backup.go +++ b/vendor/github.com/dgraph-io/badger/v4/backup.go @@ -23,11 +23,12 @@ import ( "encoding/binary" "io" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" - "github.com/golang/protobuf/proto" "github.com/pkg/errors" + "google.golang.org/protobuf/proto" + + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) // flushThreshold determines when a buffer will be flushed. When performing a @@ -78,8 +79,7 @@ func (stream *Stream) Backup(w io.Writer, since uint64) (uint64, error) { var valCopy []byte if !item.IsDeletedOrExpired() { // No need to copy value, if item is deleted or expired. - var err error - err = item.Value(func(val []byte) error { + err := item.Value(func(val []byte) error { valCopy = a.Copy(val) return nil }) diff --git a/vendor/github.com/dgraph-io/badger/v3/batch.go b/vendor/github.com/dgraph-io/badger/v4/batch.go similarity index 97% rename from vendor/github.com/dgraph-io/badger/v3/batch.go rename to vendor/github.com/dgraph-io/badger/v4/batch.go index a57022ea78..ea7a81f1fd 100644 --- a/vendor/github.com/dgraph-io/badger/v3/batch.go +++ b/vendor/github.com/dgraph-io/badger/v4/batch.go @@ -20,10 +20,12 @@ import ( "sync" "sync/atomic" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" "github.com/pkg/errors" + "google.golang.org/protobuf/proto" + + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) // WriteBatch holds the necessary info to perform batched writes. @@ -112,7 +114,7 @@ func (wb *WriteBatch) Write(buf *z.Buffer) error { err := buf.SliceIterate(func(s []byte) error { kv := &pb.KV{} - if err := kv.Unmarshal(s); err != nil { + if err := proto.Unmarshal(s, kv); err != nil { return err } return wb.writeKV(kv) diff --git a/vendor/github.com/dgraph-io/badger/v3/changes.sh b/vendor/github.com/dgraph-io/badger/v4/changes.sh similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/changes.sh rename to vendor/github.com/dgraph-io/badger/v4/changes.sh diff --git a/vendor/github.com/dgraph-io/badger/v3/compaction.go b/vendor/github.com/dgraph-io/badger/v4/compaction.go similarity index 98% rename from vendor/github.com/dgraph-io/badger/v3/compaction.go rename to vendor/github.com/dgraph-io/badger/v4/compaction.go index 1a0f228ef3..1f61ecfe6e 100644 --- a/vendor/github.com/dgraph-io/badger/v3/compaction.go +++ b/vendor/github.com/dgraph-io/badger/v4/compaction.go @@ -23,8 +23,8 @@ import ( "math" "sync" - "github.com/dgraph-io/badger/v3/table" - "github.com/dgraph-io/badger/v3/y" + "github.com/dgraph-io/badger/v4/table" + "github.com/dgraph-io/badger/v4/y" ) type keyRange struct { diff --git a/vendor/github.com/dgraph-io/badger/v3/db.go b/vendor/github.com/dgraph-io/badger/v4/db.go similarity index 89% rename from vendor/github.com/dgraph-io/badger/v3/db.go rename to vendor/github.com/dgraph-io/badger/v4/db.go index 3ac6a2d959..dbf5b7e7a8 100644 --- a/vendor/github.com/dgraph-io/badger/v3/db.go +++ b/vendor/github.com/dgraph-io/badger/v4/db.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/binary" + stderrors "errors" "expvar" "fmt" "math" @@ -31,15 +32,17 @@ import ( "sync/atomic" "time" - "github.com/dgraph-io/badger/v3/options" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/skl" - "github.com/dgraph-io/badger/v3/table" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto" - "github.com/dgraph-io/ristretto/z" humanize "github.com/dustin/go-humanize" "github.com/pkg/errors" + + "github.com/dgraph-io/badger/v4/fb" + "github.com/dgraph-io/badger/v4/options" + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/skl" + "github.com/dgraph-io/badger/v4/table" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2" + "github.com/dgraph-io/ristretto/v2/z" ) var ( @@ -48,10 +51,6 @@ var ( bannedNsKey = []byte("!badger!banned") // For storing the banned namespaces. ) -const ( - maxNumSplits = 128 -) - type closers struct { updateSize *z.Closer compactors *z.Closer @@ -93,6 +92,8 @@ func (lk *lockedKeys) all() []uint64 { // DB provides the various functions required to interact with Badger. // DB is thread-safe. type DB struct { + testOnlyDBExtensions + lock sync.RWMutex // Guards list of inmemory tables, not individual reads and writes. dirLockGuard *directoryLockGuard @@ -112,11 +113,11 @@ type DB struct { lc *levelsController vlog valueLog writeCh chan *request - flushChan chan flushTask // For flushing memtables. + flushChan chan *memTable // For flushing memtables. closeOnce sync.Once // For closing DB only once. - blockWrites int32 - isClosed uint32 + blockWrites atomic.Int32 + isClosed atomic.Uint32 orc *oracle bannedNamespaces *lockedKeys @@ -124,8 +125,8 @@ type DB struct { pub *publisher registry *KeyRegistry - blockCache *ristretto.Cache - indexCache *ristretto.Cache + blockCache *ristretto.Cache[[]byte, *table.Block] + indexCache *ristretto.Cache[uint64, *fb.TableIndex] allocPool *z.AllocatorPool } @@ -163,10 +164,10 @@ func checkAndSetOptions(opt *Options) error { // the transaction APIs. Transaction batches entries into batches of size opt.maxBatchSize. if opt.ValueThreshold > opt.maxBatchSize { return errors.Errorf("Valuethreshold %d greater than max batch size of %d. Either "+ - "reduce opt.ValueThreshold or increase opt.MaxTableSize.", + "reduce opt.ValueThreshold or increase opt.BaseTableSize.", opt.ValueThreshold, opt.maxBatchSize) } - // ValueLogFileSize should be stricly LESS than 2<<30 otherwise we will + // ValueLogFileSize should be strictly LESS than 2<<30 otherwise we will // overflow the uint32 when we mmap it in OpenMemtable. if !(opt.ValueLogFileSize < 2<<30 && opt.ValueLogFileSize >= 1<<20) { return ErrValueLogSize @@ -243,7 +244,7 @@ func Open(opt Options) (*DB, error) { db := &DB{ imm: make([]*memTable, 0, opt.NumMemtables), - flushChan: make(chan flushTask, opt.NumMemtables), + flushChan: make(chan *memTable, opt.NumMemtables), writeCh: make(chan *request, kvWriteChCapacity), opt: opt, manifest: manifestFile, @@ -255,6 +256,9 @@ func Open(opt Options) (*DB, error) { bannedNamespaces: &lockedKeys{keys: make(map[uint64]struct{})}, threshold: initVlogThreshold(&opt), } + + db.syncChan = opt.syncChan + // Cleanup all the goroutines started by badger in case of an error. defer func() { if err != nil { @@ -272,14 +276,14 @@ func Open(opt Options) (*DB, error) { numInCache = 1 } - config := ristretto.Config{ + config := ristretto.Config[[]byte, *table.Block]{ NumCounters: numInCache * 8, MaxCost: opt.BlockCacheSize, BufferItems: 64, Metrics: true, OnExit: table.BlockEvictHandler, } - db.blockCache, err = ristretto.NewCache(&config) + db.blockCache, err = ristretto.NewCache[[]byte, *table.Block](&config) if err != nil { return nil, y.Wrap(err, "failed to create data cache") } @@ -295,7 +299,7 @@ func Open(opt Options) (*DB, error) { numInCache = 1 } - config := ristretto.Config{ + config := ristretto.Config[uint64, *fb.TableIndex]{ NumCounters: numInCache * 8, MaxCost: opt.IndexCacheSize, BufferItems: 64, @@ -354,11 +358,11 @@ func Open(opt Options) (*DB, error) { db.closers.memtable = z.NewCloser(1) go func() { - _ = db.flushMemtable(db.closers.memtable) // Need levels controller to be up. + db.flushMemtable(db.closers.memtable) // Need levels controller to be up. }() // Flush them to disk asap. for _, mt := range db.imm { - db.flushChan <- flushTask{mt: mt} + db.flushChan <- mt } } // We do increment nextTxnTs below. So, no need to do it here. @@ -400,7 +404,7 @@ func Open(opt Options) (*DB, error) { return db, nil } -// initBannedNamespaces retrieves the banned namepsaces from the DB and updates in-memory structure. +// initBannedNamespaces retrieves the banned namespaces from the DB and updates in-memory structure. func (db *DB) initBannedNamespaces() error { if db.opt.NamespaceOffset < 0 { return nil @@ -534,16 +538,17 @@ func (db *DB) Close() error { // IsClosed denotes if the badger DB is closed or not. A DB instance should not // be used after closing it. func (db *DB) IsClosed() bool { - return atomic.LoadUint32(&db.isClosed) == 1 + return db.isClosed.Load() == 1 } func (db *DB) close() (err error) { defer db.allocPool.Release() db.opt.Debugf("Closing database") - db.opt.Infof("Lifetime L0 stalled for: %s\n", time.Duration(atomic.LoadInt64(&db.lc.l0stallsMs))) + db.opt.Infof("Lifetime L0 stalled for: %s\n", time.Duration(db.lc.l0stallsMs.Load())) - atomic.StoreInt32(&db.blockWrites, 1) + db.blockWrites.Store(1) + db.isClosed.Store(1) if !db.opt.InMemory { // Stop value GC first. @@ -571,12 +576,12 @@ func (db *DB) close() (err error) { } else { db.opt.Debugf("Flushing memtable") for { - pushedFlushTask := func() bool { + pushedMemTable := func() bool { db.lock.Lock() defer db.lock.Unlock() y.AssertTrue(db.mt != nil) select { - case db.flushChan <- flushTask{mt: db.mt}: + case db.flushChan <- db.mt: db.imm = append(db.imm, db.mt) // Flusher will attempt to remove this from s.imm. db.mt = nil // Will segfault if we try writing! db.opt.Debugf("pushed to flush chan\n") @@ -589,7 +594,7 @@ func (db *DB) close() (err error) { } return false }() - if pushedFlushTask { + if pushedMemTable { break } time.Sleep(10 * time.Millisecond) @@ -629,7 +634,6 @@ func (db *DB) close() (err error) { db.blockCache.Close() db.indexCache.Close() - atomic.StoreUint32(&db.isClosed, 1) db.threshold.close() if db.opt.InMemory { @@ -679,7 +683,38 @@ const ( // Sync syncs database content to disk. This function provides // more control to user to sync data whenever required. func (db *DB) Sync() error { - return db.vlog.sync() + /** + Make an attempt to sync both the logs, the active memtable's WAL and the vLog (1847). + Cases: + - All_ok :: If both the logs sync successfully. + + - Entry_Lost :: If an entry with a value pointer was present in the active memtable's WAL, + :: and the WAL was synced but there was an error in syncing the vLog. + :: The entry will be considered lost and this case will need to be handled during recovery. + + - Entries_Lost :: If there were errors in syncing both the logs, multiple entries would be lost. + + - Entries_Lost :: If the active memtable's WAL is not synced but the vLog is synced, it will + :: result in entries being lost because recovery of the active memtable is done from its WAL. + :: Check `UpdateSkipList` in memtable.go. + + - Nothing_lost :: If an entry with its value was present in the active memtable's WAL, and the WAL was synced, + :: but there was an error in syncing the vLog. + :: Nothing is lost for this very specific entry because the entry is completely present in the memtable's WAL. + + - Partially_lost :: If entries were written partially in either of the logs, + :: the logs will be truncated during recovery. + :: As a result of truncation, some entries might be lost. + :: Assume that 4KB of data is to be synced and invoking `Sync` results only in syncing 3KB + :: of data and then the machine shuts down or the disk failure happens, + :: this will result in partial writes. [[This case needs verification]] + */ + db.lock.RLock() + memtableSyncError := db.mt.SyncWAL() + db.lock.RUnlock() + + vLogSyncError := db.vlog.sync() + return y.CombineErrors(memtableSyncError, vLogSyncError) } // getMemtables returns the current memtables and get references. @@ -742,6 +777,7 @@ func (db *DB) get(key []byte) (y.ValueStruct, error) { } // Found the required version of the key, return immediately. if vs.Version == version { + y.NumGetsWithResultsAdd(db.opt.MetricsEnabled, 1) return vs, nil } if maxVs.Version < vs.Version { @@ -819,8 +855,6 @@ func (db *DB) writeRequests(reqs []*request) error { return err } - db.opt.Debugf("Sending updates to subscribers") - db.pub.sendUpdates(reqs) db.opt.Debugf("Writing to memtable") var count int for _, b := range reqs { @@ -829,6 +863,7 @@ func (db *DB) writeRequests(reqs []*request) error { } count += len(b.Entries) var i uint64 + var err error for err = db.ensureRoomForWrite(); err == errNoRoom; err = db.ensureRoomForWrite() { i++ if i%100 == 0 { @@ -848,13 +883,17 @@ func (db *DB) writeRequests(reqs []*request) error { return y.Wrap(err, "writeRequests") } } + + db.opt.Debugf("Sending updates to subscribers") + db.pub.sendUpdates(reqs) + done(nil) db.opt.Debugf("%d entries written", count) return nil } func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) { - if atomic.LoadInt32(&db.blockWrites) == 1 { + if db.blockWrites.Load() == 1 { return nil, ErrBlockedWrites } var count, size int64 @@ -862,11 +901,12 @@ func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) { size += e.estimateSizeAndSetThreshold(db.valueThreshold()) count++ } + y.NumBytesWrittenUserAdd(db.opt.MetricsEnabled, size) if count >= db.opt.maxBatchCount || size >= db.opt.maxBatchSize { return nil, ErrTxnTooBig } - // We can only service one request because we need each txn to be stored in a contigous section. + // We can only service one request because we need each txn to be stored in a contiguous section. // Txns should not interleave among other txns or rewrites. req := requestPool.Get().(*request) req.reset() @@ -945,7 +985,8 @@ func (db *DB) doWrites(lc *z.Closer) { // batchSet applies a list of badger.Entry. If a request level error occurs it // will be returned. -// Check(kv.BatchSet(entries)) +// +// Check(kv.BatchSet(entries)) func (db *DB) batchSet(entries []*Entry) error { req, err := db.sendToWriteCh(entries) if err != nil { @@ -958,9 +999,10 @@ func (db *DB) batchSet(entries []*Entry) error { // batchSetAsync is the asynchronous version of batchSet. It accepts a callback // function which is called when all the sets are complete. If a request level // error occurs, it will be passed back via the callback. -// err := kv.BatchSetAsync(entries, func(err error)) { -// Check(err) -// } +// +// err := kv.BatchSetAsync(entries, func(err error)) { +// Check(err) +// } func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error { req, err := db.sendToWriteCh(entries) if err != nil { @@ -974,7 +1016,7 @@ func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error { return nil } -var errNoRoom = errors.New("No room for write") +var errNoRoom = stderrors.New("No room for write") // ensureRoomForWrite is always called serially. func (db *DB) ensureRoomForWrite() error { @@ -988,7 +1030,7 @@ func (db *DB) ensureRoomForWrite() error { } select { - case db.flushChan <- flushTask{mt: db.mt}: + case db.flushChan <- db.mt: db.opt.Debugf("Flushing memtable, mt.size=%d size of flushChan: %d\n", db.mt.sl.MemSize(), len(db.flushChan)) // We manage to push this task. Let's modify imm. @@ -1010,12 +1052,12 @@ func arenaSize(opt Options) int64 { } // buildL0Table builds a new table from the memtable. -func buildL0Table(ft flushTask, bopts table.Options) *table.Builder { - iter := ft.mt.sl.NewIterator() +func buildL0Table(iter y.Iterator, dropPrefixes [][]byte, bopts table.Options) *table.Builder { defer iter.Close() + b := table.NewTableBuilder(bopts) - for iter.SeekToFirst(); iter.Valid(); iter.Next() { - if len(ft.dropPrefixes) > 0 && hasAnyPrefixes(iter.Key(), ft.dropPrefixes) { + for iter.Rewind(); iter.Valid(); iter.Next() { + if len(dropPrefixes) > 0 && hasAnyPrefixes(iter.Key(), dropPrefixes) { continue } vs := iter.Value() @@ -1025,23 +1067,15 @@ func buildL0Table(ft flushTask, bopts table.Options) *table.Builder { } b.Add(iter.Key(), iter.Value(), vp.Len) } - return b -} -type flushTask struct { - mt *memTable - dropPrefixes [][]byte + return b } -// handleFlushTask must be run serially. -func (db *DB) handleFlushTask(ft flushTask) error { - // There can be a scenario, when empty memtable is flushed. - if ft.mt.sl.Empty() { - return nil - } - +// handleMemTableFlush must be run serially. +func (db *DB) handleMemTableFlush(mt *memTable, dropPrefixes [][]byte) error { bopts := buildTableOptions(db) - builder := buildL0Table(ft, bopts) + itr := mt.sl.NewUniIterator(false) + builder := buildL0Table(itr, nil, bopts) defer builder.Close() // buildL0Table can return nil if the none of the items in the skiplist are @@ -1070,39 +1104,39 @@ func (db *DB) handleFlushTask(ft flushTask) error { return err } -// flushMemtable must keep running until we send it an empty flushTask. If there -// are errors during handling the flush task, we'll retry indefinitely. -func (db *DB) flushMemtable(lc *z.Closer) error { +// flushMemtable must keep running until we send it an empty memtable. If there +// are errors during handling the memtable flush, we'll retry indefinitely. +func (db *DB) flushMemtable(lc *z.Closer) { defer lc.Done() - for ft := range db.flushChan { - if ft.mt == nil { - // We close db.flushChan now, instead of sending a nil ft.mt. + for mt := range db.flushChan { + if mt == nil { continue } - for { - err := db.handleFlushTask(ft) - if err == nil { - // Update s.imm. Need a lock. - db.lock.Lock() - // This is a single-threaded operation. ft.mt corresponds to the head of - // db.imm list. Once we flush it, we advance db.imm. The next ft.mt - // which would arrive here would match db.imm[0], because we acquire a - // lock over DB when pushing to flushChan. - // TODO: This logic is dirty AF. Any change and this could easily break. - y.AssertTrue(ft.mt == db.imm[0]) - db.imm = db.imm[1:] - ft.mt.DecrRef() // Return memory. - db.lock.Unlock() - break + for { + if err := db.handleMemTableFlush(mt, nil); err != nil { + // Encountered error. Retry indefinitely. + db.opt.Errorf("error flushing memtable to disk: %v, retrying", err) + time.Sleep(time.Second) + continue } - // Encountered error. Retry indefinitely. - db.opt.Errorf("Failure while flushing memtable to disk: %v. Retrying...\n", err) - time.Sleep(time.Second) + + // Update s.imm. Need a lock. + db.lock.Lock() + // This is a single-threaded operation. mt corresponds to the head of + // db.imm list. Once we flush it, we advance db.imm. The next mt + // which would arrive here would match db.imm[0], because we acquire a + // lock over DB when pushing to flushChan. + // TODO: This logic is dirty AF. Any change and this could easily break. + y.AssertTrue(mt == db.imm[0]) + db.imm = db.imm[1:] + mt.DecrRef() // Return memory. + // unlock + db.lock.Unlock() + break } } - return nil } func exists(path string) (bool, error) { @@ -1522,10 +1556,10 @@ func (db *DB) startCompactions() { func (db *DB) startMemoryFlush() { // Start memory fluhser. if db.closers.memtable != nil { - db.flushChan = make(chan flushTask, db.opt.NumMemtables) + db.flushChan = make(chan *memTable, db.opt.NumMemtables) db.closers.memtable = z.NewCloser(1) go func() { - _ = db.flushMemtable(db.closers.memtable) + db.flushMemtable(db.closers.memtable) }() } } @@ -1585,7 +1619,7 @@ func (db *DB) Flatten(workers int) error { } } if len(levels) <= 1 { - prios := db.lc.pickCompactLevels() + prios := db.lc.pickCompactLevels(nil) if len(prios) == 0 || prios[0].score <= 1.0 { db.opt.Infof("All tables consolidated into one level. Flattening done.\n") return nil @@ -1605,7 +1639,7 @@ func (db *DB) Flatten(workers int) error { func (db *DB) blockWrite() error { // Stop accepting new writes. - if !atomic.CompareAndSwapInt32(&db.blockWrites, 0, 1) { + if !db.blockWrites.CompareAndSwap(0, 1) { return ErrBlockedWrites } @@ -1620,7 +1654,7 @@ func (db *DB) unblockWrite() { go db.doWrites(db.closers.writes) // Resume writes. - atomic.StoreInt32(&db.blockWrites, 0) + db.blockWrites.Store(0) } func (db *DB) prepareToDrop() (func(), error) { @@ -1628,10 +1662,10 @@ func (db *DB) prepareToDrop() (func(), error) { panic("Attempting to drop data in read-only mode.") } // In order prepare for drop, we need to block the incoming writes and - // write it to db. Then, flush all the pending flushtask. So that, we + // write it to db. Then, flush all the pending memtable. So that, we // don't miss any entries. if err := db.blockWrite(); err != nil { - return nil, err + return func() {}, err } reqs := make([]*request, 0, 10) for { @@ -1677,7 +1711,7 @@ func (db *DB) dropAll() (func(), error) { if err != nil { return f, err } - // prepareToDrop will stop all the incomming write and flushes any pending flush tasks. + // prepareToDrop will stop all the incoming write and flushes any pending memtables. // Before we drop, we'll stop the compaction because anyways all the datas are going to // be deleted. db.stopCompactions() @@ -1710,7 +1744,7 @@ func (db *DB) dropAll() (func(), error) { if err != nil { return resume, err } - db.lc.nextFileID = 1 + db.lc.nextFileID.Store(1) db.opt.Infof("Deleted %d value log files. DropAll done.\n", num) db.blockCache.Clear() db.indexCache.Clear() @@ -1719,16 +1753,16 @@ func (db *DB) dropAll() (func(), error) { } // DropPrefix would drop all the keys with the provided prefix. It does this in the following way: -// - Stop accepting new writes. -// - Stop memtable flushes before acquiring lock. Because we're acquring lock here -// and memtable flush stalls for lock, which leads to deadlock -// - Flush out all memtables, skipping over keys with the given prefix, Kp. -// - Write out the value log header to memtables when flushing, so we don't accidentally bring Kp -// back after a restart. -// - Stop compaction. -// - Compact L0->L1, skipping over Kp. -// - Compact rest of the levels, Li->Li, picking tables which have Kp. -// - Resume memtable flushes, compactions and writes. +// - Stop accepting new writes. +// - Stop memtable flushes before acquiring lock. Because we're acquiring lock here +// and memtable flush stalls for lock, which leads to deadlock +// - Flush out all memtables, skipping over keys with the given prefix, Kp. +// - Write out the value log header to memtables when flushing, so we don't accidentally bring Kp +// back after a restart. +// - Stop compaction. +// - Compact L0->L1, skipping over Kp. +// - Compact rest of the levels, Li->Li, picking tables which have Kp. +// - Resume memtable flushes, compactions and writes. func (db *DB) DropPrefix(prefixes ...[]byte) error { if len(prefixes) == 0 { return nil @@ -1759,13 +1793,8 @@ func (db *DB) DropPrefix(prefixes ...[]byte) error { memtable.DecrRef() continue } - task := flushTask{ - mt: memtable, - // Ensure that the head of value log gets persisted to disk. - dropPrefixes: filtered, - } db.opt.Debugf("Flushing memtable") - if err := db.handleFlushTask(task); err != nil { + if err := db.handleMemTableFlush(memtable, filtered); err != nil { db.opt.Errorf("While trying to flush memtable: %v", err) return err } @@ -1869,7 +1898,10 @@ func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, matches } c := z.NewCloser(1) - s := db.pub.newSubscriber(c, matches) + s, err := db.pub.newSubscriber(c, matches) + if err != nil { + return y.Wrapf(err, "while creating a new subscriber") + } slurp := func(batch *pb.KVList) error { for { select { @@ -1887,7 +1919,11 @@ func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, matches drain := func() { for { select { - case <-s.sendCh: + case _, ok := <-s.sendCh: + if !ok { + // Channel is closed. + return + } default: return } @@ -1904,7 +1940,7 @@ func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, matches return err case <-ctx.Done(): c.Done() - atomic.StoreUint64(s.active, 0) + s.active.Store(0) drain() db.pub.deleteSubscriber(s.id) // Delete the subscriber to avoid further updates. @@ -1913,7 +1949,7 @@ func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, matches err := slurp(batch) if err != nil { c.Done() - atomic.StoreUint64(s.active, 0) + s.active.Store(0) drain() // Delete the subscriber if there is an error by the callback. db.pub.deleteSubscriber(s.id) @@ -1923,11 +1959,6 @@ func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, matches } } -// shouldEncrypt returns bool, which tells whether to encrypt or not. -func (db *DB) shouldEncrypt() bool { - return len(db.opt.EncryptionKey) > 0 -} - func (db *DB) syncDir(dir string) error { if db.opt.InMemory { return nil @@ -1968,7 +1999,7 @@ func (db *DB) StreamDB(outOptions Options) error { defer outDB.Close() writer := outDB.NewStreamWriter() if err := writer.Prepare(); err != nil { - y.Wrapf(err, "cannot create stream writer in out DB at %s", outDir) + return y.Wrapf(err, "cannot create stream writer in out DB at %s", outDir) } // Stream contents of DB to the output DB. diff --git a/vendor/github.com/dgraph-io/badger/v3/dir_plan9.go b/vendor/github.com/dgraph-io/badger/v4/dir_plan9.go similarity index 96% rename from vendor/github.com/dgraph-io/badger/v3/dir_plan9.go rename to vendor/github.com/dgraph-io/badger/v4/dir_plan9.go index 76e3fa2111..7f7f194bcd 100644 --- a/vendor/github.com/dgraph-io/badger/v3/dir_plan9.go +++ b/vendor/github.com/dgraph-io/badger/v4/dir_plan9.go @@ -22,7 +22,7 @@ import ( "path/filepath" "strings" - "github.com/dgraph-io/badger/v3/y" + "github.com/dgraph-io/badger/v4/y" ) // directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part @@ -123,9 +123,9 @@ func syncDir(dir string) error { // Opening an exclusive-use file returns an error. // The expected error strings are: // -// - "open/create -- file is locked" (cwfs, kfs) -// - "exclusive lock" (fossil) -// - "exclusive use file already open" (ramfs) +// - "open/create -- file is locked" (cwfs, kfs) +// - "exclusive lock" (fossil) +// - "exclusive use file already open" (ramfs) // // See https://github.com/golang/go/blob/go1.15rc1/src/cmd/go/internal/lockedfile/lockedfile_plan9.go#L16 var lockedErrStrings = [...]string{ diff --git a/vendor/github.com/dgraph-io/badger/v3/dir_unix.go b/vendor/github.com/dgraph-io/badger/v4/dir_unix.go similarity index 95% rename from vendor/github.com/dgraph-io/badger/v3/dir_unix.go rename to vendor/github.com/dgraph-io/badger/v4/dir_unix.go index 48addc976a..ecaa5ced78 100644 --- a/vendor/github.com/dgraph-io/badger/v3/dir_unix.go +++ b/vendor/github.com/dgraph-io/badger/v4/dir_unix.go @@ -1,3 +1,4 @@ +//go:build !windows && !plan9 // +build !windows,!plan9 /* @@ -20,12 +21,12 @@ package badger import ( "fmt" - "io/ioutil" "os" "path/filepath" - "github.com/dgraph-io/badger/v3/y" "golang.org/x/sys/unix" + + "github.com/dgraph-io/badger/v4/y" ) // directoryLockGuard holds a lock on a directory and a pid file inside. The pid file isn't part @@ -70,7 +71,7 @@ func acquireDirectoryLock(dirPath string, pidFileName string, readOnly bool) ( if !readOnly { // Yes, we happily overwrite a pre-existing pid file. We're the // only read-write badger process using this directory. - err = ioutil.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666) + err = os.WriteFile(absPidFilePath, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0666) if err != nil { f.Close() return nil, y.Wrapf(err, diff --git a/vendor/github.com/dgraph-io/badger/v3/dir_windows.go b/vendor/github.com/dgraph-io/badger/v4/dir_windows.go similarity index 98% rename from vendor/github.com/dgraph-io/badger/v3/dir_windows.go rename to vendor/github.com/dgraph-io/badger/v4/dir_windows.go index 43b00e37c7..bd8d2f9d2f 100644 --- a/vendor/github.com/dgraph-io/badger/v3/dir_windows.go +++ b/vendor/github.com/dgraph-io/badger/v4/dir_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* @@ -24,7 +25,7 @@ import ( "path/filepath" "syscall" - "github.com/dgraph-io/badger/v3/y" + "github.com/dgraph-io/badger/v4/y" ) // FILE_ATTRIBUTE_TEMPORARY - A file that is being used for temporary storage. diff --git a/vendor/github.com/dgraph-io/badger/v3/discard.go b/vendor/github.com/dgraph-io/badger/v4/discard.go similarity index 94% rename from vendor/github.com/dgraph-io/badger/v3/discard.go rename to vendor/github.com/dgraph-io/badger/v4/discard.go index 050d5caa2e..409939efc0 100644 --- a/vendor/github.com/dgraph-io/badger/v3/discard.go +++ b/vendor/github.com/dgraph-io/badger/v4/discard.go @@ -23,8 +23,8 @@ import ( "sort" "sync" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) // discardStats keeps track of the amount of data that could be discarded for @@ -42,14 +42,14 @@ const discardFname string = "DISCARD" func InitDiscardStats(opt Options) (*discardStats, error) { fname := filepath.Join(opt.ValueDir, discardFname) - // 1GB file can store 67M discard entries. Each entry is 16 bytes. + // 1MB file can store 65.536 discard entries. Each entry is 16 bytes. mf, err := z.OpenMmapFile(fname, os.O_CREATE|os.O_RDWR, 1<<20) lf := &discardStats{ MmapFile: mf, opt: opt, } if err == z.NewFile { - // We don't need to zero out the entire 1GB. + // We don't need to zero out the entire 1MB. lf.zeroOut() } else if err != nil { @@ -131,7 +131,7 @@ func (lf *discardStats) Update(fidu uint32, discard int64) int64 { // Could not find the fid. Add the entry. idx = lf.nextEmptySlot - lf.set(idx*16, uint64(fid)) + lf.set(idx*16, fid) lf.set(idx*16+8, uint64(discard)) // Move to next slot. @@ -142,7 +142,7 @@ func (lf *discardStats) Update(fidu uint32, discard int64) int64 { lf.zeroOut() sort.Sort(lf) - return int64(discard) + return discard } func (lf *discardStats) Iterate(f func(fid, stats uint64)) { diff --git a/vendor/github.com/dgraph-io/badger/v3/doc.go b/vendor/github.com/dgraph-io/badger/v4/doc.go similarity index 99% rename from vendor/github.com/dgraph-io/badger/v3/doc.go rename to vendor/github.com/dgraph-io/badger/v4/doc.go index 83dc9a28ac..128f2fccb9 100644 --- a/vendor/github.com/dgraph-io/badger/v3/doc.go +++ b/vendor/github.com/dgraph-io/badger/v4/doc.go @@ -10,8 +10,7 @@ hence reducing both write amplification and the size of the LSM tree. This allows LSM tree to be served entirely from RAM, while the values are served from SSD. - -Usage +# Usage Badger has the following main types: DB, Txn, Item and Iterator. DB contains keys that are associated with values. It must be opened with the appropriate diff --git a/vendor/github.com/dgraph-io/badger/v3/errors.go b/vendor/github.com/dgraph-io/badger/v4/errors.go similarity index 69% rename from vendor/github.com/dgraph-io/badger/v3/errors.go rename to vendor/github.com/dgraph-io/badger/v4/errors.go index f5df6d5118..ffdac6a244 100644 --- a/vendor/github.com/dgraph-io/badger/v3/errors.go +++ b/vendor/github.com/dgraph-io/badger/v4/errors.go @@ -17,9 +17,8 @@ package badger import ( + stderrors "errors" "math" - - "github.com/pkg/errors" ) const ( @@ -30,97 +29,97 @@ const ( var ( // ErrValueLogSize is returned when opt.ValueLogFileSize option is not within the valid // range. - ErrValueLogSize = errors.New("Invalid ValueLogFileSize, must be in range [1MB, 2GB)") + ErrValueLogSize = stderrors.New("Invalid ValueLogFileSize, must be in range [1MB, 2GB)") // ErrKeyNotFound is returned when key isn't found on a txn.Get. - ErrKeyNotFound = errors.New("Key not found") + ErrKeyNotFound = stderrors.New("Key not found") // ErrTxnTooBig is returned if too many writes are fit into a single transaction. - ErrTxnTooBig = errors.New("Txn is too big to fit into one request") + ErrTxnTooBig = stderrors.New("Txn is too big to fit into one request") // ErrConflict is returned when a transaction conflicts with another transaction. This can // happen if the read rows had been updated concurrently by another transaction. - ErrConflict = errors.New("Transaction Conflict. Please retry") + ErrConflict = stderrors.New("Transaction Conflict. Please retry") // ErrReadOnlyTxn is returned if an update function is called on a read-only transaction. - ErrReadOnlyTxn = errors.New("No sets or deletes are allowed in a read-only transaction") + ErrReadOnlyTxn = stderrors.New("No sets or deletes are allowed in a read-only transaction") // ErrDiscardedTxn is returned if a previously discarded transaction is re-used. - ErrDiscardedTxn = errors.New("This transaction has been discarded. Create a new one") + ErrDiscardedTxn = stderrors.New("This transaction has been discarded. Create a new one") // ErrEmptyKey is returned if an empty key is passed on an update function. - ErrEmptyKey = errors.New("Key cannot be empty") + ErrEmptyKey = stderrors.New("Key cannot be empty") // ErrInvalidKey is returned if the key has a special !badger! prefix, // reserved for internal usage. - ErrInvalidKey = errors.New("Key is using a reserved !badger! prefix") + ErrInvalidKey = stderrors.New("Key is using a reserved !badger! prefix") // ErrBannedKey is returned if the read/write key belongs to any banned namespace. - ErrBannedKey = errors.New("Key is using the banned prefix") + ErrBannedKey = stderrors.New("Key is using the banned prefix") // ErrThresholdZero is returned if threshold is set to zero, and value log GC is called. // In such a case, GC can't be run. - ErrThresholdZero = errors.New( + ErrThresholdZero = stderrors.New( "Value log GC can't run because threshold is set to zero") // ErrNoRewrite is returned if a call for value log GC doesn't result in a log file rewrite. - ErrNoRewrite = errors.New( + ErrNoRewrite = stderrors.New( "Value log GC attempt didn't result in any cleanup") // ErrRejected is returned if a value log GC is called either while another GC is running, or // after DB::Close has been called. - ErrRejected = errors.New("Value log GC request rejected") + ErrRejected = stderrors.New("Value log GC request rejected") // ErrInvalidRequest is returned if the user request is invalid. - ErrInvalidRequest = errors.New("Invalid request") + ErrInvalidRequest = stderrors.New("Invalid request") // ErrManagedTxn is returned if the user tries to use an API which isn't // allowed due to external management of transactions, when using ManagedDB. - ErrManagedTxn = errors.New( + ErrManagedTxn = stderrors.New( "Invalid API request. Not allowed to perform this action using ManagedDB") // ErrNamespaceMode is returned if the user tries to use an API which is allowed only when // NamespaceOffset is non-negative. - ErrNamespaceMode = errors.New( + ErrNamespaceMode = stderrors.New( "Invalid API request. Not allowed to perform this action when NamespaceMode is not set.") // ErrInvalidDump if a data dump made previously cannot be loaded into the database. - ErrInvalidDump = errors.New("Data dump cannot be read") + ErrInvalidDump = stderrors.New("Data dump cannot be read") // ErrZeroBandwidth is returned if the user passes in zero bandwidth for sequence. - ErrZeroBandwidth = errors.New("Bandwidth must be greater than zero") + ErrZeroBandwidth = stderrors.New("Bandwidth must be greater than zero") // ErrWindowsNotSupported is returned when opt.ReadOnly is used on Windows - ErrWindowsNotSupported = errors.New("Read-only mode is not supported on Windows") + ErrWindowsNotSupported = stderrors.New("Read-only mode is not supported on Windows") // ErrPlan9NotSupported is returned when opt.ReadOnly is used on Plan 9 - ErrPlan9NotSupported = errors.New("Read-only mode is not supported on Plan 9") + ErrPlan9NotSupported = stderrors.New("Read-only mode is not supported on Plan 9") // ErrTruncateNeeded is returned when the value log gets corrupt, and requires truncation of // corrupt data to allow Badger to run properly. - ErrTruncateNeeded = errors.New( + ErrTruncateNeeded = stderrors.New( "Log truncate required to run DB. This might result in data loss") // ErrBlockedWrites is returned if the user called DropAll. During the process of dropping all // data from Badger, we stop accepting new writes, by returning this error. - ErrBlockedWrites = errors.New("Writes are blocked, possibly due to DropAll or Close") + ErrBlockedWrites = stderrors.New("Writes are blocked, possibly due to DropAll or Close") // ErrNilCallback is returned when subscriber's callback is nil. - ErrNilCallback = errors.New("Callback cannot be nil") + ErrNilCallback = stderrors.New("Callback cannot be nil") // ErrEncryptionKeyMismatch is returned when the storage key is not // matched with the key previously given. - ErrEncryptionKeyMismatch = errors.New("Encryption key mismatch") + ErrEncryptionKeyMismatch = stderrors.New("Encryption key mismatch") // ErrInvalidDataKeyID is returned if the datakey id is invalid. - ErrInvalidDataKeyID = errors.New("Invalid datakey id") + ErrInvalidDataKeyID = stderrors.New("Invalid datakey id") // ErrInvalidEncryptionKey is returned if length of encryption keys is invalid. - ErrInvalidEncryptionKey = errors.New("Encryption key's length should be" + + ErrInvalidEncryptionKey = stderrors.New("Encryption key's length should be" + "either 16, 24, or 32 bytes") // ErrGCInMemoryMode is returned when db.RunValueLogGC is called in in-memory mode. - ErrGCInMemoryMode = errors.New("Cannot run value log GC when DB is opened in InMemory mode") + ErrGCInMemoryMode = stderrors.New("Cannot run value log GC when DB is opened in InMemory mode") // ErrDBClosed is returned when a get operation is performed after closing the DB. - ErrDBClosed = errors.New("DB Closed") + ErrDBClosed = stderrors.New("DB Closed") ) diff --git a/vendor/github.com/dgraph-io/badger/v3/fb/BlockOffset.go b/vendor/github.com/dgraph-io/badger/v4/fb/BlockOffset.go similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/fb/BlockOffset.go rename to vendor/github.com/dgraph-io/badger/v4/fb/BlockOffset.go diff --git a/vendor/github.com/dgraph-io/badger/v3/fb/TableIndex.go b/vendor/github.com/dgraph-io/badger/v4/fb/TableIndex.go similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/fb/TableIndex.go rename to vendor/github.com/dgraph-io/badger/v4/fb/TableIndex.go diff --git a/vendor/github.com/dgraph-io/badger/v3/fb/flatbuffer.fbs b/vendor/github.com/dgraph-io/badger/v4/fb/flatbuffer.fbs similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/fb/flatbuffer.fbs rename to vendor/github.com/dgraph-io/badger/v4/fb/flatbuffer.fbs diff --git a/vendor/github.com/dgraph-io/badger/v3/fb/gen.sh b/vendor/github.com/dgraph-io/badger/v4/fb/gen.sh similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/fb/gen.sh rename to vendor/github.com/dgraph-io/badger/v4/fb/gen.sh diff --git a/vendor/github.com/dgraph-io/badger/v3/fb/install_flatbuffers.sh b/vendor/github.com/dgraph-io/badger/v4/fb/install_flatbuffers.sh similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/fb/install_flatbuffers.sh rename to vendor/github.com/dgraph-io/badger/v4/fb/install_flatbuffers.sh diff --git a/vendor/github.com/dgraph-io/badger/v3/histogram.go b/vendor/github.com/dgraph-io/badger/v4/histogram.go similarity index 99% rename from vendor/github.com/dgraph-io/badger/v3/histogram.go rename to vendor/github.com/dgraph-io/badger/v4/histogram.go index d8c94bb7ad..ad6002123d 100644 --- a/vendor/github.com/dgraph-io/badger/v3/histogram.go +++ b/vendor/github.com/dgraph-io/badger/v4/histogram.go @@ -106,7 +106,7 @@ func (histogram *histogramData) Update(value int64) { } // Check if the value should be added to the "index" bin - if value < int64(histogram.bins[index]) { + if value < histogram.bins[index] { histogram.countPerBin[index]++ break } diff --git a/vendor/github.com/dgraph-io/badger/v3/iterator.go b/vendor/github.com/dgraph-io/badger/v4/iterator.go similarity index 95% rename from vendor/github.com/dgraph-io/badger/v3/iterator.go rename to vendor/github.com/dgraph-io/badger/v4/iterator.go index 409e8618a6..54d2bf27dc 100644 --- a/vendor/github.com/dgraph-io/badger/v3/iterator.go +++ b/vendor/github.com/dgraph-io/badger/v4/iterator.go @@ -23,13 +23,11 @@ import ( "math" "sort" "sync" - "sync/atomic" "time" - "github.com/dgraph-io/badger/v3/table" - "github.com/dgraph-io/ristretto/z" - - "github.com/dgraph-io/badger/v3/y" + "github.com/dgraph-io/badger/v4/table" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) type prefetchStatus uint8 @@ -170,7 +168,7 @@ func (item *Item) yieldItemValue() ([]byte, func(), error) { db := item.txn.db result, cb, err := db.vlog.Read(vp, item.slice) if err != nil { - db.opt.Logger.Errorf("Unable to read: Key: %v, Version : %v, meta: %v, userMeta: %v"+ + db.opt.Errorf("Unable to read: Key: %v, Version : %v, meta: %v, userMeta: %v"+ " Error: %v", key, item.version, item.meta, item.userMeta, err) var txn *Txn if db.opt.managedTxns { @@ -193,7 +191,7 @@ func (item *Item) yieldItemValue() ([]byte, func(), error) { if item.meta&bitValuePointer > 0 { vp.Decode(item.vptr) } - db.opt.Logger.Errorf("Key: %v, Version : %v, meta: %v, userMeta: %v valuePointer: %+v", + db.opt.Errorf("Key: %v, Version : %v, meta: %v, userMeta: %v valuePointer: %+v", item.Key(), item.version, item.meta, item.userMeta, vp) } } @@ -464,24 +462,25 @@ type Iterator struct { // Using prefetch is recommended if you're doing a long running iteration, for performance. // // Multiple Iterators: -// For a read-only txn, multiple iterators can be running simultaneously. However, for a read-write +// For a read-only txn, multiple iterators can be running simultaneously. However, for a read-write // txn, iterators have the nuance of being a snapshot of the writes for the transaction at the time // iterator was created. If writes are performed after an iterator is created, then that iterator // will not be able to see those writes. Only writes performed before an iterator was created can be // viewed. func (txn *Txn) NewIterator(opt IteratorOptions) *Iterator { if txn.discarded { - panic("Transaction has already been discarded") + panic(ErrDiscardedTxn) } if txn.db.IsClosed() { - panic(ErrDBClosed.Error()) + panic(ErrDBClosed) } + y.NumIteratorsCreatedAdd(txn.db.opt.MetricsEnabled, 1) + // Keep track of the number of active iterators. - atomic.AddInt32(&txn.numIterators, 1) + txn.numIterators.Add(1) - // TODO: If Prefix is set, only pick those memtables which have keys with - // the prefix. + // TODO: If Prefix is set, only pick those memtables which have keys with the prefix. tables, decr := txn.db.getMemTables() defer decr() txn.db.vlog.incrIteratorCount() @@ -555,7 +554,7 @@ func (it *Iterator) Close() { } it.closed = true if it.iitr == nil { - atomic.AddInt32(&it.txn.numIterators, -1) + it.txn.numIterators.Add(-1) return } @@ -574,7 +573,7 @@ func (it *Iterator) Close() { // TODO: We could handle this error. _ = it.txn.db.vlog.decrIteratorCount() - atomic.AddInt32(&it.txn.numIterators, -1) + it.txn.numIterators.Add(-1) } // Next would advance the iterator by one. Always check it.Valid() after a Next() @@ -590,7 +589,7 @@ func (it *Iterator) Next() { // Set next item to current it.item = it.data.pop() - for it.iitr.Valid() { + for it.iitr.Valid() && hasPrefix(it) { if it.parseItem() { // parseItem calls one extra next. // This is used to deal with the complexity of reverse iteration. @@ -726,6 +725,15 @@ func (it *Iterator) fill(item *Item) { } } +func hasPrefix(it *Iterator) bool { + // We shouldn't check prefix in case the iterator is going in reverse. Since in reverse we expect + // people to append items to the end of prefix. + if !it.opt.Reverse && len(it.opt.Prefix) > 0 { + return bytes.HasPrefix(y.ParseKey(it.iitr.Key()), it.opt.Prefix) + } + return true +} + func (it *Iterator) prefetch() { prefetchSize := 2 if it.opt.PrefetchValues && it.opt.PrefetchSize > 1 { @@ -735,7 +743,7 @@ func (it *Iterator) prefetch() { i := it.iitr var count int it.item = nil - for i.Valid() { + for i.Valid() && hasPrefix(it) { if !it.parseItem() { continue } diff --git a/vendor/github.com/dgraph-io/badger/v3/key_registry.go b/vendor/github.com/dgraph-io/badger/v4/key_registry.go similarity index 98% rename from vendor/github.com/dgraph-io/badger/v3/key_registry.go rename to vendor/github.com/dgraph-io/badger/v4/key_registry.go index 306e9c1297..ddca6501ad 100644 --- a/vendor/github.com/dgraph-io/badger/v3/key_registry.go +++ b/vendor/github.com/dgraph-io/badger/v4/key_registry.go @@ -28,8 +28,9 @@ import ( "sync" "time" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/y" + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/y" + "google.golang.org/protobuf/proto" ) const ( @@ -195,7 +196,7 @@ func (kri *keyRegistryIterator) next() (*pb.DataKey, error) { return nil, y.Wrapf(y.ErrChecksumMismatch, "Error while checking checksum for data key.") } dataKey := &pb.DataKey{} - if err = dataKey.Unmarshal(data); err != nil { + if err = proto.Unmarshal(data, dataKey); err != nil { return nil, y.Wrapf(err, "While unmarshal of datakey in keyRegistryIterator.next") } if len(kri.encryptionKey) > 0 { @@ -404,7 +405,7 @@ func storeDataKey(buf *bytes.Buffer, storageKey []byte, k *pb.DataKey) error { return y.Wrapf(err, "Error while encrypting datakey in storeDataKey") } var data []byte - if data, err = k.Marshal(); err != nil { + if data, err = proto.Marshal(k); err != nil { err = y.Wrapf(err, "Error while marshaling datakey in storeDataKey") var err2 error // decrypting the datakey back. diff --git a/vendor/github.com/dgraph-io/badger/v3/level_handler.go b/vendor/github.com/dgraph-io/badger/v4/level_handler.go similarity index 98% rename from vendor/github.com/dgraph-io/badger/v3/level_handler.go rename to vendor/github.com/dgraph-io/badger/v4/level_handler.go index 669c097f4f..fc81cc452c 100644 --- a/vendor/github.com/dgraph-io/badger/v3/level_handler.go +++ b/vendor/github.com/dgraph-io/badger/v4/level_handler.go @@ -21,8 +21,8 @@ import ( "sort" "sync" - "github.com/dgraph-io/badger/v3/table" - "github.com/dgraph-io/badger/v3/y" + "github.com/dgraph-io/badger/v4/table" + "github.com/dgraph-io/badger/v4/y" ) type levelHandler struct { @@ -165,8 +165,8 @@ func (s *levelHandler) addTable(t *table.Table) { // sortTables sorts tables of levelHandler based on table.Smallest. // Normally it should be called after all addTable calls. func (s *levelHandler) sortTables() { - s.RLock() - defer s.RUnlock() + s.Lock() + defer s.Unlock() sort.Slice(s.tables, func(i, j int) bool { return y.CompareKeys(s.tables[i].Smallest(), s.tables[j].Smallest()) < 0 diff --git a/vendor/github.com/dgraph-io/badger/v3/levels.go b/vendor/github.com/dgraph-io/badger/v4/levels.go similarity index 95% rename from vendor/github.com/dgraph-io/badger/v3/levels.go rename to vendor/github.com/dgraph-io/badger/v4/levels.go index 1c625d1b34..7b32cffa9f 100644 --- a/vendor/github.com/dgraph-io/badger/v3/levels.go +++ b/vendor/github.com/dgraph-io/badger/v4/levels.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/hex" + stderrors "errors" "fmt" "math" "math/rand" @@ -30,18 +31,18 @@ import ( "sync/atomic" "time" + "github.com/pkg/errors" otrace "go.opencensus.io/trace" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/table" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" - "github.com/pkg/errors" + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/table" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) type levelsController struct { - nextFileID uint64 // Atomic - l0stallsMs int64 // Atomic + nextFileID atomic.Uint64 + l0stallsMs atomic.Int64 // The following are initialized once and const. levels []*levelHandler @@ -107,7 +108,7 @@ func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) { throttle := y.NewThrottle(3) start := time.Now() - var numOpened int32 + var numOpened atomic.Int32 tick := time.NewTicker(3 * time.Second) defer tick.Stop() @@ -115,7 +116,7 @@ func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) { fname := table.NewFilename(fileID, db.opt.Dir) select { case <-tick.C: - db.opt.Infof("%d tables out of %d opened in %s\n", atomic.LoadInt32(&numOpened), + db.opt.Infof("%d tables out of %d opened in %s\n", numOpened.Load(), len(mf.Tables), time.Since(start).Round(time.Millisecond)) default: } @@ -130,7 +131,7 @@ func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) { var rerr error defer func() { throttle.Done(rerr) - atomic.AddInt32(&numOpened, 1) + numOpened.Add(1) }() dk, err := db.registry.DataKey(tf.KeyID) if err != nil { @@ -168,9 +169,9 @@ func newLevelsController(db *DB, mf *Manifest) (*levelsController, error) { closeAllTables(tables) return nil, err } - db.opt.Infof("All %d tables opened in %s\n", atomic.LoadInt32(&numOpened), + db.opt.Infof("All %d tables opened in %s\n", numOpened.Load(), time.Since(start).Round(time.Millisecond)) - s.nextFileID = maxFileID + 1 + s.nextFileID.Store(maxFileID + 1) for i, tbls := range tables { s.levels[i].initTables(tbls) } @@ -473,8 +474,13 @@ func (s *levelsController) runCompactor(id int, lc *z.Closer) { } return false } + + var priosBuffer []compactionPriority runOnce := func() bool { - prios := s.pickCompactLevels() + prios := s.pickCompactLevels(priosBuffer) + defer func() { + priosBuffer = prios + }() if id == 0 { // Worker ID zero prefers to compact L0 always. prios = moveL0toFront(prios) @@ -536,7 +542,9 @@ func (s *levelsController) lastLevel() *levelHandler { // pickCompactLevel determines which level to compact. // Based on: https://github.com/facebook/rocksdb/wiki/Leveled-Compaction -func (s *levelsController) pickCompactLevels() (prios []compactionPriority) { +// It tries to reuse priosBuffer to reduce memory allocation, +// passing nil is acceptable, then new memory will be allocated. +func (s *levelsController) pickCompactLevels(priosBuffer []compactionPriority) (prios []compactionPriority) { t := s.levelTargets() addPriority := func(level int, score float64) { pri := compactionPriority{ @@ -548,6 +556,12 @@ func (s *levelsController) pickCompactLevels() (prios []compactionPriority) { prios = append(prios, pri) } + // Grow buffer to fit all levels. + if cap(priosBuffer) < len(s.levels) { + priosBuffer = make([]compactionPriority, 0, len(s.levels)) + } + prios = priosBuffer[:0] + // Add L0 priority based on the number of tables. addPriority(0, float64(s.levels[0].numTables())/float64(s.kv.opt.NumLevelZeroTables)) @@ -998,20 +1012,14 @@ func containsPrefix(table *table.Table, prefix []byte) bool { // In table iterator's Seek, we assume that key has version in last 8 bytes. We set // version=0 (ts=math.MaxUint64), so that we don't skip the key prefixed with prefix. ti.Seek(y.KeyWithTs(prefix, math.MaxUint64)) - if bytes.HasPrefix(ti.Key(), prefix) { - return true - } - return false + return bytes.HasPrefix(ti.Key(), prefix) } if bytes.Compare(prefix, smallValue) > 0 && bytes.Compare(prefix, largeValue) < 0 { // There may be a case when table contains [0x0000,...., 0xffff]. If we are searching for // k=0x0011, we should not directly infer that k is present. It may not be present. - if !isPresent() { - return false - } - return true + return isPresent() } return false @@ -1079,14 +1087,13 @@ func (s *levelsController) addSplits(cd *compactDef) { return } if i%width == width-1 { - // Right should always have ts=maxUint64 otherwise we'll lose keys - // in subcompaction. Consider the following. + // Right is assigned ts=0. The encoding ts bytes take MaxUint64-ts, + // so, those with smaller TS will be considered larger for the same key. + // Consider the following. // Top table is [A1...C3(deleted)] // bot table is [B1....C2] - // This will generate splits like [A1 ... C2] . Notice that we - // dropped the C3 which is the last key of the top table. - // See TestCompaction/with_split test. - right := y.KeyWithTs(y.ParseKey(t.Biggest()), math.MaxUint64) + // It will generate a split [A1 ... C0], including any records of Key C. + right := y.KeyWithTs(y.ParseKey(t.Biggest()), 0) addRange(right) } } @@ -1426,8 +1433,8 @@ func (s *levelsController) runCompactDef(id, l int, cd compactDef) (err error) { cd.splits = append(cd.splits, keyRange{}) } - // Table should never be moved directly between levels, always be rewritten to allow discarding - // invalid versions. + // Table should never be moved directly between levels, + // always be rewritten to allow discarding invalid versions. newTables, decr, err := s.compactBuildTables(l, cd) if err != nil { @@ -1446,6 +1453,22 @@ func (s *levelsController) runCompactDef(id, l int, cd compactDef) (err error) { return err } + getSizes := func(tables []*table.Table) int64 { + size := int64(0) + for _, i := range tables { + size += i.Size() + } + return size + } + + sizeNewTables := int64(0) + sizeOldTables := int64(0) + if s.kv.opt.MetricsEnabled { + sizeNewTables = getSizes(newTables) + sizeOldTables = getSizes(cd.bot) + getSizes(cd.top) + y.NumBytesCompactionWrittenAdd(s.kv.opt.MetricsEnabled, nextLevel.strLevel, sizeNewTables) + } + // See comment earlier in this function about the ordering of these ops, and the order in which // we access levels when reading. if err := nextLevel.replaceTables(cd.bot, newTables); err != nil { @@ -1466,16 +1489,16 @@ func (s *levelsController) runCompactDef(id, l int, cd compactDef) (err error) { expensive = " [E]" } s.kv.opt.Infof("[%d]%s LOG Compact %d->%d (%d, %d -> %d tables with %d splits)."+ - " [%s] -> [%s], took %v\n", + " [%s] -> [%s], took %v\n, deleted %d bytes", id, expensive, thisLevel.level, nextLevel.level, len(cd.top), len(cd.bot), len(newTables), len(cd.splits), strings.Join(from, " "), strings.Join(to, " "), - dur.Round(time.Millisecond)) + dur.Round(time.Millisecond), sizeOldTables-sizeNewTables) } if cd.thisLevel.level != 0 && len(newTables) > 2*s.kv.opt.LevelSizeMultiplier { - s.kv.opt.Debugf("This Range (numTables: %d)\nLeft:\n%s\nRight:\n%s\n", + s.kv.opt.Infof("This Range (numTables: %d)\nLeft:\n%s\nRight:\n%s\n", len(cd.top), hex.Dump(cd.thisRange.left), hex.Dump(cd.thisRange.right)) - s.kv.opt.Debugf("Next Range (numTables: %d)\nLeft:\n%s\nRight:\n%s\n", + s.kv.opt.Infof("Next Range (numTables: %d)\nLeft:\n%s\nRight:\n%s\n", len(cd.bot), hex.Dump(cd.nextRange.left), hex.Dump(cd.nextRange.right)) } return nil @@ -1490,7 +1513,7 @@ func tablesToString(tables []*table.Table) []string { return res } -var errFillTables = errors.New("Unable to fill tables") +var errFillTables = stderrors.New("Unable to fill tables") // doCompact picks some table on level l and compacts it away to the next level. func (s *levelsController) doCompact(id int, p compactionPriority) error { @@ -1568,7 +1591,7 @@ func (s *levelsController) addLevel0Table(t *table.Table) error { if dur > time.Second { s.kv.opt.Infof("L0 was stalled for %s\n", dur.Round(time.Millisecond)) } - atomic.AddInt64(&s.l0stallsMs, int64(dur.Round(time.Millisecond))) + s.l0stallsMs.Add(int64(dur.Round(time.Millisecond))) } return nil @@ -1580,8 +1603,8 @@ func (s *levelsController) close() error { } // get searches for a given key in all the levels of the LSM tree. It returns -// key version <= the expected version (maxVs). If not found, it returns an empty -// y.ValueStruct. +// key version <= the expected version (version in key). If not found, +// it returns an empty y.ValueStruct. func (s *levelsController) get(key []byte, maxVs y.ValueStruct, startLevel int) ( y.ValueStruct, error) { if s.kv.IsClosed() { @@ -1605,6 +1628,7 @@ func (s *levelsController) get(key []byte, maxVs y.ValueStruct, startLevel int) if vs.Value == nil && vs.Meta == 0 { continue } + y.NumBytesReadsLSMAdd(s.kv.opt.MetricsEnabled, int64(len(vs.Value))) if vs.Version == version { return vs, nil } @@ -1612,6 +1636,9 @@ func (s *levelsController) get(key []byte, maxVs y.ValueStruct, startLevel int) maxVs = vs } } + if len(maxVs.Value) > 0 { + y.NumGetsWithResultsAdd(s.kv.opt.MetricsEnabled, 1) + } return maxVs, nil } @@ -1694,7 +1721,7 @@ type LevelInfo struct { func (s *levelsController) getLevelInfo() []LevelInfo { t := s.levelTargets() - prios := s.pickCompactLevels() + prios := s.pickCompactLevels(nil) result := make([]LevelInfo, len(s.levels)) for i, l := range s.levels { l.RLock() diff --git a/vendor/github.com/dgraph-io/badger/v3/logger.go b/vendor/github.com/dgraph-io/badger/v4/logger.go similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/logger.go rename to vendor/github.com/dgraph-io/badger/v4/logger.go diff --git a/vendor/github.com/dgraph-io/badger/v3/managed_db.go b/vendor/github.com/dgraph-io/badger/v4/managed_db.go similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/managed_db.go rename to vendor/github.com/dgraph-io/badger/v4/managed_db.go diff --git a/vendor/github.com/dgraph-io/badger/v3/manifest.go b/vendor/github.com/dgraph-io/badger/v4/manifest.go similarity index 85% rename from vendor/github.com/dgraph-io/badger/v3/manifest.go rename to vendor/github.com/dgraph-io/badger/v4/manifest.go index 7face8b476..ffe1455294 100644 --- a/vendor/github.com/dgraph-io/badger/v3/manifest.go +++ b/vendor/github.com/dgraph-io/badger/v4/manifest.go @@ -20,18 +20,21 @@ import ( "bufio" "bytes" "encoding/binary" + stderrors "errors" "fmt" "hash/crc32" "io" + "math" "os" "path/filepath" "sync" - "github.com/dgraph-io/badger/v3/options" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/y" - "github.com/golang/protobuf/proto" "github.com/pkg/errors" + "google.golang.org/protobuf/proto" + + "github.com/dgraph-io/badger/v4/options" + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/y" ) // Manifest represents the contents of the MANIFEST file in a Badger store. @@ -79,6 +82,10 @@ type TableManifest struct { type manifestFile struct { fp *os.File directory string + + // The external magic number used by the application running badger. + externalMagic uint16 + // We make this configurable so that unit tests can hit rewrite() code quickly deletionsRewriteThreshold int @@ -124,11 +131,12 @@ func openOrCreateManifestFile(opt Options) ( if opt.InMemory { return &manifestFile{inMemory: true}, Manifest{}, nil } - return helpOpenOrCreateManifestFile(opt.Dir, opt.ReadOnly, manifestDeletionsRewriteThreshold) + return helpOpenOrCreateManifestFile(opt.Dir, opt.ReadOnly, opt.ExternalMagicVersion, + manifestDeletionsRewriteThreshold) } -func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold int) ( - *manifestFile, Manifest, error) { +func helpOpenOrCreateManifestFile(dir string, readOnly bool, extMagic uint16, + deletionsThreshold int) (*manifestFile, Manifest, error) { path := filepath.Join(dir, ManifestFilename) var flags y.Flags @@ -144,7 +152,7 @@ func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold return nil, Manifest{}, fmt.Errorf("no manifest found, required for read-only db") } m := createManifest() - fp, netCreations, err := helpRewrite(dir, &m) + fp, netCreations, err := helpRewrite(dir, &m, extMagic) if err != nil { return nil, Manifest{}, err } @@ -152,13 +160,14 @@ func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold mf := &manifestFile{ fp: fp, directory: dir, + externalMagic: extMagic, manifest: m.clone(), deletionsRewriteThreshold: deletionsThreshold, } return mf, m, nil } - manifest, truncOffset, err := ReplayManifestFile(fp) + manifest, truncOffset, err := ReplayManifestFile(fp, extMagic) if err != nil { _ = fp.Close() return nil, Manifest{}, err @@ -179,6 +188,7 @@ func helpOpenOrCreateManifestFile(dir string, readOnly bool, deletionsThreshold mf := &manifestFile{ fp: fp, directory: dir, + externalMagic: extMagic, manifest: manifest.clone(), deletionsRewriteThreshold: deletionsThreshold, } @@ -237,10 +247,10 @@ var syncFunc = func(f *os.File) error { return f.Sync() } // Has to be 4 bytes. The value can never change, ever, anyway. var magicText = [4]byte{'B', 'd', 'g', 'r'} -// The magic version number. -const magicVersion = 8 +// The magic version number. It is allocated 2 bytes, so it's value must be <= math.MaxUint16 +const badgerMagicVersion = 8 -func helpRewrite(dir string, m *Manifest) (*os.File, int, error) { +func helpRewrite(dir string, m *Manifest, extMagic uint16) (*os.File, int, error) { rewritePath := filepath.Join(dir, manifestRewriteFilename) // We explicitly sync. fp, err := y.OpenTruncFile(rewritePath, false) @@ -248,9 +258,16 @@ func helpRewrite(dir string, m *Manifest) (*os.File, int, error) { return nil, 0, err } + // magic bytes are structured as + // +---------------------+-------------------------+-----------------------+ + // | magicText (4 bytes) | externalMagic (2 bytes) | badgerMagic (2 bytes) | + // +---------------------+-------------------------+-----------------------+ + + y.AssertTrue(badgerMagicVersion <= math.MaxUint16) buf := make([]byte, 8) copy(buf[0:4], magicText[:]) - binary.BigEndian.PutUint32(buf[4:8], magicVersion) + binary.BigEndian.PutUint16(buf[4:6], extMagic) + binary.BigEndian.PutUint16(buf[6:8], badgerMagicVersion) netCreations := len(m.Tables) changes := m.asChanges() @@ -305,7 +322,7 @@ func (mf *manifestFile) rewrite() error { if err := mf.fp.Close(); err != nil { return err } - fp, netCreations, err := helpRewrite(mf.directory, &mf.manifest) + fp, netCreations, err := helpRewrite(mf.directory, &mf.manifest, mf.externalMagic) if err != nil { return err } @@ -336,8 +353,8 @@ func (r *countingReader) ReadByte() (b byte, err error) { } var ( - errBadMagic = errors.New("manifest has bad magic") - errBadChecksum = errors.New("manifest has checksum mismatch") + errBadMagic = stderrors.New("manifest has bad magic") + errBadChecksum = stderrors.New("manifest has checksum mismatch") ) // ReplayManifestFile reads the manifest file and constructs two manifest objects. (We need one @@ -345,7 +362,7 @@ var ( // Also, returns the last offset after a completely read manifest entry -- the file must be // truncated at that point before further appends are made (if there is a partial entry after // that). In normal conditions, truncOffset is the file size. -func ReplayManifestFile(fp *os.File) (Manifest, int64, error) { +func ReplayManifestFile(fp *os.File, extMagic uint16) (Manifest, int64, error) { r := countingReader{wrapped: bufio.NewReader(fp)} var magicBuf [8]byte @@ -355,14 +372,22 @@ func ReplayManifestFile(fp *os.File) (Manifest, int64, error) { if !bytes.Equal(magicBuf[0:4], magicText[:]) { return Manifest{}, 0, errBadMagic } - version := y.BytesToU32(magicBuf[4:8]) - if version != magicVersion { + + extVersion := y.BytesToU16(magicBuf[4:6]) + version := y.BytesToU16(magicBuf[6:8]) + + if version != badgerMagicVersion { return Manifest{}, 0, //nolint:lll fmt.Errorf("manifest has unsupported version: %d (we support %d).\n"+ - "Please see https://github.com/dgraph-io/badger/blob/master/README.md#i-see-manifest-has-unsupported-version-x-we-support-y-error"+ + "Please see https://dgraph.io/docs/badger/faq/#i-see-manifest-has-unsupported-version-x-we-support-y-error"+ " on how to fix this.", - version, magicVersion) + version, badgerMagicVersion) + } + if extVersion != extMagic { + return Manifest{}, 0, + fmt.Errorf("Cannot open DB because the external magic number doesn't match. "+ + "Expected: %d, version present in manifest: %d\n", extMagic, extVersion) } stat, err := fp.Stat() diff --git a/vendor/github.com/dgraph-io/badger/v3/memtable.go b/vendor/github.com/dgraph-io/badger/v4/memtable.go similarity index 95% rename from vendor/github.com/dgraph-io/badger/v3/memtable.go rename to vendor/github.com/dgraph-io/badger/v4/memtable.go index a200fa2fa7..f5950b419b 100644 --- a/vendor/github.com/dgraph-io/badger/v3/memtable.go +++ b/vendor/github.com/dgraph-io/badger/v4/memtable.go @@ -25,7 +25,6 @@ import ( "fmt" "hash/crc32" "io" - "io/ioutil" "os" "path/filepath" "sort" @@ -34,11 +33,12 @@ import ( "sync" "sync/atomic" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/skl" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" "github.com/pkg/errors" + + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/skl" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) // memTable structure stores a skiplist and a corresponding WAL. Writes to memTable are written @@ -58,7 +58,7 @@ func (db *DB) openMemTables(opt Options) error { if db.opt.InMemory { return nil } - files, err := ioutil.ReadDir(db.opt.Dir) + files, err := os.ReadDir(db.opt.Dir) if err != nil { return errFile(err, db.opt.Dir, "Unable to open mem dir.") } @@ -147,8 +147,6 @@ func (db *DB) openMemTable(fid, flags int) (*memTable, error) { return mt, y.Wrapf(err, "while updating skiplist") } -var errExpectingNewFile = errors.New("Expecting to create a new file, but found an existing file") - func (db *DB) newMemTable() (*memTable, error) { mt, err := db.openMemTable(db.nextMemFid, os.O_CREATE|os.O_RDWR) if err == z.NewFile { @@ -209,6 +207,7 @@ func (mt *memTable) Put(key []byte, value y.ValueStruct) error { if ts := y.ParseTs(entry.Key); ts > mt.maxVersion { mt.maxVersion = ts } + y.NumBytesWrittenToL0Add(mt.opt.MetricsEnabled, entry.estimateSizeAndSetThreshold(mt.opt.ValueThreshold)) return nil } @@ -220,8 +219,8 @@ func (mt *memTable) UpdateSkipList() error { if err != nil { return y.Wrapf(err, "while iterating wal: %s", mt.wal.Fd.Name()) } - if endOff < mt.wal.size && mt.opt.ReadOnly { - return y.Wrapf(ErrTruncateNeeded, "end offset: %d < size: %d", endOff, mt.wal.size) + if endOff < mt.wal.size.Load() && mt.opt.ReadOnly { + return y.Wrapf(ErrTruncateNeeded, "end offset: %d < size: %d", endOff, mt.wal.size.Load()) } return mt.wal.Truncate(int64(endOff)) } @@ -270,7 +269,7 @@ type logFile struct { // exclusive ownership to open/close the descriptor, unmap or remove the file. lock sync.RWMutex fid uint32 - size uint32 + size atomic.Uint32 dataKey *pb.DataKey baseIV []byte registry *KeyRegistry @@ -285,7 +284,7 @@ func (lf *logFile) Truncate(end int64) error { return nil } y.AssertTrue(!lf.opt.ReadOnly) - lf.size = uint32(end) + lf.size.Store(uint32(end)) return lf.MmapFile.Truncate(end) } @@ -390,14 +389,13 @@ func (lf *logFile) encryptionEnabled() bool { // Acquire lock on mmap/file if you are calling this func (lf *logFile) read(p valuePointer) (buf []byte, err error) { - var nbr int64 offset := p.Offset // Do not convert size to uint32, because the lf.Data can be of size // 4GB, which overflows the uint32 during conversion to make the size 0, // causing the read to fail with ErrEOF. See issue #585. size := int64(len(lf.Data)) valsz := p.Len - lfsz := atomic.LoadUint32(&lf.size) + lfsz := lf.size.Load() if int64(offset) >= size || int64(offset+valsz) > size || // Ensure that the read is within the file's actual size. It might be possible that // the offset+valsz length is beyond the file's actual size. This could happen when @@ -406,10 +404,7 @@ func (lf *logFile) read(p valuePointer) (buf []byte, err error) { err = y.ErrEOF } else { buf = lf.Data[offset : offset+valsz] - nbr = int64(valsz) } - y.NumReadsAdd(lf.opt.MetricsEnabled, 1) - y.NumBytesReadAdd(lf.opt.MetricsEnabled, nbr) return buf, err } @@ -489,7 +484,7 @@ loop: } var vp valuePointer - vp.Len = uint32(int(e.hlen) + len(e.Key) + len(e.Value) + crc32.Size) + vp.Len = uint32(e.hlen + len(e.Key) + len(e.Value) + crc32.Size) read.recordOffset += vp.Len vp.Offset = e.offset @@ -561,14 +556,14 @@ func (lf *logFile) open(path string, flags int, fsize int64) error { os.Remove(path) return err } - lf.size = vlogHeaderSize + lf.size.Store(vlogHeaderSize) } else if ferr != nil { return y.Wrapf(ferr, "while opening file: %s", path) } - lf.size = uint32(len(lf.Data)) + lf.size.Store(uint32(len(lf.Data))) - if lf.size < vlogHeaderSize { + if lf.size.Load() < vlogHeaderSize { // Every vlog file should have at least vlogHeaderSize. If it is less than vlogHeaderSize // then it must have been corrupted. But no need to handle here. log replayer will truncate // and bootstrap the logfile. So ignoring here. @@ -579,7 +574,7 @@ func (lf *logFile) open(path string, flags int, fsize int64) error { buf := make([]byte, vlogHeaderSize) y.AssertTruef(vlogHeaderSize == copy(buf, lf.Data), - "Unable to copy from %s, size %d", path, lf.size) + "Unable to copy from %s, size %d", path, lf.size.Load()) keyID := binary.BigEndian.Uint64(buf[:8]) // retrieve datakey. if dk, err := lf.registry.DataKey(keyID); err != nil { diff --git a/vendor/github.com/dgraph-io/badger/v3/merge.go b/vendor/github.com/dgraph-io/badger/v4/merge.go similarity index 97% rename from vendor/github.com/dgraph-io/badger/v3/merge.go rename to vendor/github.com/dgraph-io/badger/v4/merge.go index ac1a2b51e7..ec610e67ac 100644 --- a/vendor/github.com/dgraph-io/badger/v3/merge.go +++ b/vendor/github.com/dgraph-io/badger/v4/merge.go @@ -17,12 +17,12 @@ package badger import ( + stderrors "errors" "sync" "time" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" - "github.com/pkg/errors" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) // MergeOperator represents a Badger merge operator. @@ -57,7 +57,7 @@ func (db *DB) GetMergeOperator(key []byte, return op } -var errNoMerge = errors.New("No need for merge") +var errNoMerge = stderrors.New("No need for merge") func (op *MergeOperator) iterateAndMerge() (newVal []byte, latest uint64, err error) { txn := op.db.NewTransaction(false) diff --git a/vendor/github.com/dgraph-io/badger/v3/options.go b/vendor/github.com/dgraph-io/badger/v4/options.go similarity index 95% rename from vendor/github.com/dgraph-io/badger/v3/options.go rename to vendor/github.com/dgraph-io/badger/v4/options.go index 2963355608..06ec2b6bec 100644 --- a/vendor/github.com/dgraph-io/badger/v3/options.go +++ b/vendor/github.com/dgraph-io/badger/v4/options.go @@ -24,12 +24,12 @@ import ( "strings" "time" - "github.com/dgraph-io/ristretto/z" "github.com/pkg/errors" - "github.com/dgraph-io/badger/v3/options" - "github.com/dgraph-io/badger/v3/table" - "github.com/dgraph-io/badger/v3/y" + "github.com/dgraph-io/badger/v4/options" + "github.com/dgraph-io/badger/v4/table" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) // Note: If you add a new option X make sure you also add a WithX method on Options. @@ -42,6 +42,8 @@ import ( // // Each option X is documented on the WithX method. type Options struct { + testOnlyOptions + // Required options. Dir string @@ -112,6 +114,10 @@ type Options struct { // NamespaceOffset specifies the offset from where the next 8 bytes contains the namespace. NamespaceOffset int + // Magic version used by the application using badger to ensure that it doesn't open the DB + // with incompatible data format. + ExternalMagicVersion uint16 + // Transaction start and commit timestamps are managed by end-user. // This is only useful for databases built on top of Badger (like Dgraph). // Not recommended for most users. @@ -167,8 +173,6 @@ func DefaultOptions(path string) Options { // Benchmark code can be found in table/builder_test.go file ZSTDCompressionLevel: 1, - // Nothing to read/write value log using standard File I/O - // MemoryMap to mmap() the value log files // (2^30 - 1)*2 when mmapping < 2^31 - 1, max int32. // -1 so 2*ValueLogFileSize won't overflow on 32-bit systems. ValueLogFileSize: 1<<30 - 1, @@ -328,7 +332,7 @@ func (opt Options) FromSuperFlag(superflag string) Options { case reflect.Int, reflect.Int64: field.SetInt(flags.GetInt64(name)) case reflect.Uint32, reflect.Uint64: - field.SetUint(uint64(flags.GetUint64(name))) + field.SetUint(flags.GetUint64(name)) case reflect.Float64: field.SetFloat(flags.GetFloat64(name)) case reflect.String: @@ -457,7 +461,7 @@ func (opt Options) WithLoggingLevel(val loggingLevel) Options { return opt } -// WithBaseTableSize returns a new Options value with MaxTableSize set to the given value. +// WithBaseTableSize returns a new Options value with BaseTableSize set to the given value. // // BaseTableSize sets the maximum size in bytes for LSM table or file in the base level. // @@ -472,7 +476,7 @@ func (opt Options) WithBaseTableSize(val int64) Options { // // LevelSizeMultiplier sets the ratio between the maximum sizes of contiguous levels in the LSM. // Once a level grows to be larger than this ratio allowed, the compaction process will be -// triggered. +// triggered. // // The default value of LevelSizeMultiplier is 10. func (opt Options) WithLevelSizeMultiplier(val int) Options { @@ -495,7 +499,8 @@ func (opt Options) WithMaxLevels(val int) Options { // ValueThreshold sets the threshold used to decide whether a value is stored directly in the LSM // tree or separately in the log value files. // -// The default value of ValueThreshold is 1 MB, but LSMOnlyOptions sets it to maxValueThreshold. +// The default value of ValueThreshold is 1 MB, and LSMOnlyOptions sets it to maxValueThreshold +// which is set to 1 MB too. func (opt Options) WithValueThreshold(val int64) Options { opt.ValueThreshold = val return opt @@ -510,7 +515,7 @@ func (opt Options) WithValueThreshold(val int64) Options { // and only 1 percent in vlog. The value threshold will be dynamically updated within the range of // [ValueThreshold, Options.maxValueThreshold] // -// Say VLogPercentile with 1.0 means threshold will eventually set to Options.maxValueThreshold +// # Say VLogPercentile with 1.0 means threshold will eventually set to Options.maxValueThreshold // // The default value of VLogPercentile is 0.0. func (opt Options) WithVLogPercentile(t float64) Options { @@ -576,7 +581,7 @@ func (opt Options) WithNumLevelZeroTables(val int) Options { // WithNumLevelZeroTablesStall sets the number of Level 0 tables that once reached causes the DB to // stall until compaction succeeds. // -// The default value of NumLevelZeroTablesStall is 10. +// The default value of NumLevelZeroTablesStall is 15. func (opt Options) WithNumLevelZeroTablesStall(val int) Options { opt.NumLevelZeroTablesStall = val return opt @@ -611,7 +616,7 @@ func (opt Options) WithValueLogMaxEntries(val uint32) Options { // WithNumCompactors sets the number of compaction workers to run concurrently. Setting this to // zero stops compactions, which could eventually cause writes to block forever. // -// The default value of NumCompactors is 2. One is dedicated just for L0 and L1. +// The default value of NumCompactors is 4. One is dedicated just for L0 and L1. func (opt Options) WithNumCompactors(val int) Options { opt.NumCompactors = val return opt @@ -639,6 +644,8 @@ func (opt Options) WithEncryptionKey(key []byte) Options { // // Key Registry will use this duration to create new keys. If the previous generated // key exceed the given duration. Then the key registry will create new key. + +// The default value is set to 10 days. func (opt Options) WithEncryptionKeyRotationDuration(d time.Duration) Options { opt.EncryptionKeyRotationDuration = d return opt @@ -648,8 +655,7 @@ func (opt Options) WithEncryptionKeyRotationDuration(d time.Duration) Options { // block will be compressed using the specified algorithm. This option doesn't affect existing // tables. Only the newly created tables will be compressed. // -// The default compression algorithm used is zstd when built with Cgo. Without Cgo, the default is -// snappy. Compression is enabled by default. +// The default compression algorithm used is snappy. Compression is enabled by default. func (opt Options) WithCompression(cType options.CompressionType) Options { opt.Compression = cType return opt @@ -685,7 +691,7 @@ func (opt Options) WithChecksumVerificationMode(cvMode options.ChecksumVerificat // unnecessary overhead which will affect the read performance. Setting size to // zero disables the cache altogether. // -// Default value of BlockCacheSize is zero. +// Default value of BlockCacheSize is 256 MB. func (opt Options) WithBlockCacheSize(size int64) Options { opt.BlockCacheSize = size return opt @@ -779,6 +785,13 @@ func (opt Options) WithNamespaceOffset(offset int) Options { return opt } +// WithExternalMagic returns a new Options value with ExternalMagicVersion set to the given value. +// The DB would fail to start if either the internal or the external magic number fails validated. +func (opt Options) WithExternalMagic(magic uint16) Options { + opt.ExternalMagicVersion = magic + return opt +} + func (opt Options) getFileFlags() int { var flags int // opt.SyncWrites would be using msync to sync. All writes go through mmap. diff --git a/vendor/github.com/dgraph-io/badger/v3/options/options.go b/vendor/github.com/dgraph-io/badger/v4/options/options.go similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/options/options.go rename to vendor/github.com/dgraph-io/badger/v4/options/options.go diff --git a/vendor/github.com/dgraph-io/badger/v4/pb/badgerpb4.pb.go b/vendor/github.com/dgraph-io/badger/v4/pb/badgerpb4.pb.go new file mode 100644 index 0000000000..17bbfc36e2 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/v4/pb/badgerpb4.pb.go @@ -0,0 +1,870 @@ +// +// Copyright (C) 2017 Dgraph Labs, Inc. and Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Use protos/gen.sh to generate .pb.go files. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v3.21.12 +// source: badgerpb4.proto + +package pb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EncryptionAlgo int32 + +const ( + EncryptionAlgo_aes EncryptionAlgo = 0 +) + +// Enum value maps for EncryptionAlgo. +var ( + EncryptionAlgo_name = map[int32]string{ + 0: "aes", + } + EncryptionAlgo_value = map[string]int32{ + "aes": 0, + } +) + +func (x EncryptionAlgo) Enum() *EncryptionAlgo { + p := new(EncryptionAlgo) + *p = x + return p +} + +func (x EncryptionAlgo) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EncryptionAlgo) Descriptor() protoreflect.EnumDescriptor { + return file_badgerpb4_proto_enumTypes[0].Descriptor() +} + +func (EncryptionAlgo) Type() protoreflect.EnumType { + return &file_badgerpb4_proto_enumTypes[0] +} + +func (x EncryptionAlgo) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EncryptionAlgo.Descriptor instead. +func (EncryptionAlgo) EnumDescriptor() ([]byte, []int) { + return file_badgerpb4_proto_rawDescGZIP(), []int{0} +} + +type ManifestChange_Operation int32 + +const ( + ManifestChange_CREATE ManifestChange_Operation = 0 + ManifestChange_DELETE ManifestChange_Operation = 1 +) + +// Enum value maps for ManifestChange_Operation. +var ( + ManifestChange_Operation_name = map[int32]string{ + 0: "CREATE", + 1: "DELETE", + } + ManifestChange_Operation_value = map[string]int32{ + "CREATE": 0, + "DELETE": 1, + } +) + +func (x ManifestChange_Operation) Enum() *ManifestChange_Operation { + p := new(ManifestChange_Operation) + *p = x + return p +} + +func (x ManifestChange_Operation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ManifestChange_Operation) Descriptor() protoreflect.EnumDescriptor { + return file_badgerpb4_proto_enumTypes[1].Descriptor() +} + +func (ManifestChange_Operation) Type() protoreflect.EnumType { + return &file_badgerpb4_proto_enumTypes[1] +} + +func (x ManifestChange_Operation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ManifestChange_Operation.Descriptor instead. +func (ManifestChange_Operation) EnumDescriptor() ([]byte, []int) { + return file_badgerpb4_proto_rawDescGZIP(), []int{3, 0} +} + +type Checksum_Algorithm int32 + +const ( + Checksum_CRC32C Checksum_Algorithm = 0 + Checksum_XXHash64 Checksum_Algorithm = 1 +) + +// Enum value maps for Checksum_Algorithm. +var ( + Checksum_Algorithm_name = map[int32]string{ + 0: "CRC32C", + 1: "XXHash64", + } + Checksum_Algorithm_value = map[string]int32{ + "CRC32C": 0, + "XXHash64": 1, + } +) + +func (x Checksum_Algorithm) Enum() *Checksum_Algorithm { + p := new(Checksum_Algorithm) + *p = x + return p +} + +func (x Checksum_Algorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Checksum_Algorithm) Descriptor() protoreflect.EnumDescriptor { + return file_badgerpb4_proto_enumTypes[2].Descriptor() +} + +func (Checksum_Algorithm) Type() protoreflect.EnumType { + return &file_badgerpb4_proto_enumTypes[2] +} + +func (x Checksum_Algorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Checksum_Algorithm.Descriptor instead. +func (Checksum_Algorithm) EnumDescriptor() ([]byte, []int) { + return file_badgerpb4_proto_rawDescGZIP(), []int{4, 0} +} + +type KV struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + UserMeta []byte `protobuf:"bytes,3,opt,name=user_meta,json=userMeta,proto3" json:"user_meta,omitempty"` + Version uint64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + ExpiresAt uint64 `protobuf:"varint,5,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + Meta []byte `protobuf:"bytes,6,opt,name=meta,proto3" json:"meta,omitempty"` + // Stream id is used to identify which stream the KV came from. + StreamId uint32 `protobuf:"varint,10,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` + // Stream done is used to indicate end of stream. + StreamDone bool `protobuf:"varint,11,opt,name=stream_done,json=streamDone,proto3" json:"stream_done,omitempty"` +} + +func (x *KV) Reset() { + *x = KV{} + if protoimpl.UnsafeEnabled { + mi := &file_badgerpb4_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KV) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KV) ProtoMessage() {} + +func (x *KV) ProtoReflect() protoreflect.Message { + mi := &file_badgerpb4_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KV.ProtoReflect.Descriptor instead. +func (*KV) Descriptor() ([]byte, []int) { + return file_badgerpb4_proto_rawDescGZIP(), []int{0} +} + +func (x *KV) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *KV) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +func (x *KV) GetUserMeta() []byte { + if x != nil { + return x.UserMeta + } + return nil +} + +func (x *KV) GetVersion() uint64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *KV) GetExpiresAt() uint64 { + if x != nil { + return x.ExpiresAt + } + return 0 +} + +func (x *KV) GetMeta() []byte { + if x != nil { + return x.Meta + } + return nil +} + +func (x *KV) GetStreamId() uint32 { + if x != nil { + return x.StreamId + } + return 0 +} + +func (x *KV) GetStreamDone() bool { + if x != nil { + return x.StreamDone + } + return false +} + +type KVList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Kv []*KV `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` + // alloc_ref used internally for memory management. + AllocRef uint64 `protobuf:"varint,10,opt,name=alloc_ref,json=allocRef,proto3" json:"alloc_ref,omitempty"` +} + +func (x *KVList) Reset() { + *x = KVList{} + if protoimpl.UnsafeEnabled { + mi := &file_badgerpb4_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KVList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KVList) ProtoMessage() {} + +func (x *KVList) ProtoReflect() protoreflect.Message { + mi := &file_badgerpb4_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KVList.ProtoReflect.Descriptor instead. +func (*KVList) Descriptor() ([]byte, []int) { + return file_badgerpb4_proto_rawDescGZIP(), []int{1} +} + +func (x *KVList) GetKv() []*KV { + if x != nil { + return x.Kv + } + return nil +} + +func (x *KVList) GetAllocRef() uint64 { + if x != nil { + return x.AllocRef + } + return 0 +} + +type ManifestChangeSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A set of changes that are applied atomically. + Changes []*ManifestChange `protobuf:"bytes,1,rep,name=changes,proto3" json:"changes,omitempty"` +} + +func (x *ManifestChangeSet) Reset() { + *x = ManifestChangeSet{} + if protoimpl.UnsafeEnabled { + mi := &file_badgerpb4_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ManifestChangeSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ManifestChangeSet) ProtoMessage() {} + +func (x *ManifestChangeSet) ProtoReflect() protoreflect.Message { + mi := &file_badgerpb4_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ManifestChangeSet.ProtoReflect.Descriptor instead. +func (*ManifestChangeSet) Descriptor() ([]byte, []int) { + return file_badgerpb4_proto_rawDescGZIP(), []int{2} +} + +func (x *ManifestChangeSet) GetChanges() []*ManifestChange { + if x != nil { + return x.Changes + } + return nil +} + +type ManifestChange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id uint64 `protobuf:"varint,1,opt,name=Id,proto3" json:"Id,omitempty"` // Table ID. + Op ManifestChange_Operation `protobuf:"varint,2,opt,name=Op,proto3,enum=badgerpb4.ManifestChange_Operation" json:"Op,omitempty"` + Level uint32 `protobuf:"varint,3,opt,name=Level,proto3" json:"Level,omitempty"` // Only used for CREATE. + KeyId uint64 `protobuf:"varint,4,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` + EncryptionAlgo EncryptionAlgo `protobuf:"varint,5,opt,name=encryption_algo,json=encryptionAlgo,proto3,enum=badgerpb4.EncryptionAlgo" json:"encryption_algo,omitempty"` + Compression uint32 `protobuf:"varint,6,opt,name=compression,proto3" json:"compression,omitempty"` // Only used for CREATE Op. +} + +func (x *ManifestChange) Reset() { + *x = ManifestChange{} + if protoimpl.UnsafeEnabled { + mi := &file_badgerpb4_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ManifestChange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ManifestChange) ProtoMessage() {} + +func (x *ManifestChange) ProtoReflect() protoreflect.Message { + mi := &file_badgerpb4_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ManifestChange.ProtoReflect.Descriptor instead. +func (*ManifestChange) Descriptor() ([]byte, []int) { + return file_badgerpb4_proto_rawDescGZIP(), []int{3} +} + +func (x *ManifestChange) GetId() uint64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *ManifestChange) GetOp() ManifestChange_Operation { + if x != nil { + return x.Op + } + return ManifestChange_CREATE +} + +func (x *ManifestChange) GetLevel() uint32 { + if x != nil { + return x.Level + } + return 0 +} + +func (x *ManifestChange) GetKeyId() uint64 { + if x != nil { + return x.KeyId + } + return 0 +} + +func (x *ManifestChange) GetEncryptionAlgo() EncryptionAlgo { + if x != nil { + return x.EncryptionAlgo + } + return EncryptionAlgo_aes +} + +func (x *ManifestChange) GetCompression() uint32 { + if x != nil { + return x.Compression + } + return 0 +} + +type Checksum struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Algo Checksum_Algorithm `protobuf:"varint,1,opt,name=algo,proto3,enum=badgerpb4.Checksum_Algorithm" json:"algo,omitempty"` // For storing type of Checksum algorithm used + Sum uint64 `protobuf:"varint,2,opt,name=sum,proto3" json:"sum,omitempty"` +} + +func (x *Checksum) Reset() { + *x = Checksum{} + if protoimpl.UnsafeEnabled { + mi := &file_badgerpb4_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Checksum) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Checksum) ProtoMessage() {} + +func (x *Checksum) ProtoReflect() protoreflect.Message { + mi := &file_badgerpb4_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Checksum.ProtoReflect.Descriptor instead. +func (*Checksum) Descriptor() ([]byte, []int) { + return file_badgerpb4_proto_rawDescGZIP(), []int{4} +} + +func (x *Checksum) GetAlgo() Checksum_Algorithm { + if x != nil { + return x.Algo + } + return Checksum_CRC32C +} + +func (x *Checksum) GetSum() uint64 { + if x != nil { + return x.Sum + } + return 0 +} + +type DataKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyId uint64 `protobuf:"varint,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Iv []byte `protobuf:"bytes,3,opt,name=iv,proto3" json:"iv,omitempty"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` +} + +func (x *DataKey) Reset() { + *x = DataKey{} + if protoimpl.UnsafeEnabled { + mi := &file_badgerpb4_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DataKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DataKey) ProtoMessage() {} + +func (x *DataKey) ProtoReflect() protoreflect.Message { + mi := &file_badgerpb4_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DataKey.ProtoReflect.Descriptor instead. +func (*DataKey) Descriptor() ([]byte, []int) { + return file_badgerpb4_proto_rawDescGZIP(), []int{5} +} + +func (x *DataKey) GetKeyId() uint64 { + if x != nil { + return x.KeyId + } + return 0 +} + +func (x *DataKey) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *DataKey) GetIv() []byte { + if x != nil { + return x.Iv + } + return nil +} + +func (x *DataKey) GetCreatedAt() int64 { + if x != nil { + return x.CreatedAt + } + return 0 +} + +type Match struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` + IgnoreBytes string `protobuf:"bytes,2,opt,name=ignore_bytes,json=ignoreBytes,proto3" json:"ignore_bytes,omitempty"` // Comma separated with dash to represent ranges "1, 2-3, 4-7, 9" +} + +func (x *Match) Reset() { + *x = Match{} + if protoimpl.UnsafeEnabled { + mi := &file_badgerpb4_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Match) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Match) ProtoMessage() {} + +func (x *Match) ProtoReflect() protoreflect.Message { + mi := &file_badgerpb4_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Match.ProtoReflect.Descriptor instead. +func (*Match) Descriptor() ([]byte, []int) { + return file_badgerpb4_proto_rawDescGZIP(), []int{6} +} + +func (x *Match) GetPrefix() []byte { + if x != nil { + return x.Prefix + } + return nil +} + +func (x *Match) GetIgnoreBytes() string { + if x != nil { + return x.IgnoreBytes + } + return "" +} + +var File_badgerpb4_proto protoreflect.FileDescriptor + +var file_badgerpb4_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x62, 0x61, 0x64, 0x67, 0x65, 0x72, 0x70, 0x62, 0x34, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x09, 0x62, 0x61, 0x64, 0x67, 0x65, 0x72, 0x70, 0x62, 0x34, 0x22, 0xd4, 0x01, 0x0a, + 0x02, 0x4b, 0x56, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, + 0x73, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, + 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x64, 0x6f, 0x6e, + 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, + 0x6f, 0x6e, 0x65, 0x22, 0x44, 0x0a, 0x06, 0x4b, 0x56, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x02, 0x6b, 0x76, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x62, 0x61, 0x64, 0x67, + 0x65, 0x72, 0x70, 0x62, 0x34, 0x2e, 0x4b, 0x56, 0x52, 0x02, 0x6b, 0x76, 0x12, 0x1b, 0x0a, 0x09, + 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x08, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x52, 0x65, 0x66, 0x22, 0x48, 0x0a, 0x11, 0x4d, 0x61, 0x6e, + 0x69, 0x66, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x65, 0x74, 0x12, 0x33, + 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x62, 0x61, 0x64, 0x67, 0x65, 0x72, 0x70, 0x62, 0x34, 0x2e, 0x4d, 0x61, 0x6e, 0x69, + 0x66, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x73, 0x22, 0x8d, 0x02, 0x0a, 0x0e, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x02, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x02, 0x4f, 0x70, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x62, 0x61, 0x64, 0x67, 0x65, 0x72, 0x70, 0x62, 0x34, 0x2e, 0x4d, + 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x02, 0x4f, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x15, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x19, 0x2e, 0x62, 0x61, 0x64, 0x67, 0x65, 0x72, 0x70, 0x62, 0x34, 0x2e, 0x45, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x52, 0x0e, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x12, 0x20, 0x0a, 0x0b, + 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x23, + 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x43, + 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, + 0x45, 0x10, 0x01, 0x22, 0x76, 0x0a, 0x08, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, + 0x31, 0x0a, 0x04, 0x61, 0x6c, 0x67, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, + 0x62, 0x61, 0x64, 0x67, 0x65, 0x72, 0x70, 0x62, 0x34, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x2e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x04, 0x61, 0x6c, + 0x67, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x03, 0x73, 0x75, 0x6d, 0x22, 0x25, 0x0a, 0x09, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x52, 0x43, 0x33, 0x32, 0x43, 0x10, 0x00, 0x12, 0x0c, 0x0a, + 0x08, 0x58, 0x58, 0x48, 0x61, 0x73, 0x68, 0x36, 0x34, 0x10, 0x01, 0x22, 0x63, 0x0a, 0x07, 0x44, + 0x61, 0x74, 0x61, 0x4b, 0x65, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x76, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, + 0x76, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, + 0x22, 0x42, 0x0a, 0x05, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x42, + 0x79, 0x74, 0x65, 0x73, 0x2a, 0x19, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x12, 0x07, 0x0a, 0x03, 0x61, 0x65, 0x73, 0x10, 0x00, 0x42, + 0x23, 0x5a, 0x21, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x67, + 0x72, 0x61, 0x70, 0x68, 0x2d, 0x69, 0x6f, 0x2f, 0x62, 0x61, 0x64, 0x67, 0x65, 0x72, 0x2f, 0x76, + 0x34, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_badgerpb4_proto_rawDescOnce sync.Once + file_badgerpb4_proto_rawDescData = file_badgerpb4_proto_rawDesc +) + +func file_badgerpb4_proto_rawDescGZIP() []byte { + file_badgerpb4_proto_rawDescOnce.Do(func() { + file_badgerpb4_proto_rawDescData = protoimpl.X.CompressGZIP(file_badgerpb4_proto_rawDescData) + }) + return file_badgerpb4_proto_rawDescData +} + +var file_badgerpb4_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_badgerpb4_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_badgerpb4_proto_goTypes = []interface{}{ + (EncryptionAlgo)(0), // 0: badgerpb4.EncryptionAlgo + (ManifestChange_Operation)(0), // 1: badgerpb4.ManifestChange.Operation + (Checksum_Algorithm)(0), // 2: badgerpb4.Checksum.Algorithm + (*KV)(nil), // 3: badgerpb4.KV + (*KVList)(nil), // 4: badgerpb4.KVList + (*ManifestChangeSet)(nil), // 5: badgerpb4.ManifestChangeSet + (*ManifestChange)(nil), // 6: badgerpb4.ManifestChange + (*Checksum)(nil), // 7: badgerpb4.Checksum + (*DataKey)(nil), // 8: badgerpb4.DataKey + (*Match)(nil), // 9: badgerpb4.Match +} +var file_badgerpb4_proto_depIdxs = []int32{ + 3, // 0: badgerpb4.KVList.kv:type_name -> badgerpb4.KV + 6, // 1: badgerpb4.ManifestChangeSet.changes:type_name -> badgerpb4.ManifestChange + 1, // 2: badgerpb4.ManifestChange.Op:type_name -> badgerpb4.ManifestChange.Operation + 0, // 3: badgerpb4.ManifestChange.encryption_algo:type_name -> badgerpb4.EncryptionAlgo + 2, // 4: badgerpb4.Checksum.algo:type_name -> badgerpb4.Checksum.Algorithm + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_badgerpb4_proto_init() } +func file_badgerpb4_proto_init() { + if File_badgerpb4_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_badgerpb4_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KV); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_badgerpb4_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KVList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_badgerpb4_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ManifestChangeSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_badgerpb4_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ManifestChange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_badgerpb4_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Checksum); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_badgerpb4_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_badgerpb4_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Match); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_badgerpb4_proto_rawDesc, + NumEnums: 3, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_badgerpb4_proto_goTypes, + DependencyIndexes: file_badgerpb4_proto_depIdxs, + EnumInfos: file_badgerpb4_proto_enumTypes, + MessageInfos: file_badgerpb4_proto_msgTypes, + }.Build() + File_badgerpb4_proto = out.File + file_badgerpb4_proto_rawDesc = nil + file_badgerpb4_proto_goTypes = nil + file_badgerpb4_proto_depIdxs = nil +} diff --git a/vendor/github.com/dgraph-io/badger/v3/pb/badgerpb3.proto b/vendor/github.com/dgraph-io/badger/v4/pb/badgerpb4.proto similarity index 94% rename from vendor/github.com/dgraph-io/badger/v3/pb/badgerpb3.proto rename to vendor/github.com/dgraph-io/badger/v4/pb/badgerpb4.proto index d0df21fb33..62bfc90b62 100644 --- a/vendor/github.com/dgraph-io/badger/v3/pb/badgerpb3.proto +++ b/vendor/github.com/dgraph-io/badger/v4/pb/badgerpb4.proto @@ -17,9 +17,9 @@ // Use protos/gen.sh to generate .pb.go files. syntax = "proto3"; -package badgerpb3; +package badgerpb4; -option go_package = "github.com/dgraph-io/badger/v3/pb"; +option go_package = "github.com/dgraph-io/badger/v4/pb"; message KV { bytes key = 1; @@ -54,8 +54,8 @@ enum EncryptionAlgo { message ManifestChange { uint64 Id = 1; // Table ID. enum Operation { - CREATE = 0; - DELETE = 1; + CREATE = 0; + DELETE = 1; } Operation Op = 2; uint32 Level = 3; // Only used for CREATE. diff --git a/vendor/github.com/dgraph-io/badger/v4/pb/gen.sh b/vendor/github.com/dgraph-io/badger/v4/pb/gen.sh new file mode 100644 index 0000000000..f09cb24ee5 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/v4/pb/gen.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Run this script from its directory, so that badgerpb4.proto is where it's expected to +# be. + +go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.31.0 +protoc --go_out=. --go_opt=paths=source_relative badgerpb4.proto diff --git a/vendor/github.com/dgraph-io/badger/v3/publisher.go b/vendor/github.com/dgraph-io/badger/v4/publisher.go similarity index 87% rename from vendor/github.com/dgraph-io/badger/v3/publisher.go rename to vendor/github.com/dgraph-io/badger/v4/publisher.go index f4c31b2ea6..6b13a34564 100644 --- a/vendor/github.com/dgraph-io/badger/v3/publisher.go +++ b/vendor/github.com/dgraph-io/badger/v4/publisher.go @@ -20,20 +20,20 @@ import ( "sync" "sync/atomic" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/trie" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/trie" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) type subscriber struct { - id uint64 + id uint64 matches []pb.Match sendCh chan *pb.KVList subCloser *z.Closer // this will be atomic pointer which will be used to // track whether the subscriber is active or not - active *uint64 + active *atomic.Uint64 } type publisher struct { @@ -111,32 +111,35 @@ func (p *publisher) publishUpdates(reqs requests) { } for id, kvs := range batchedUpdates { - if atomic.LoadUint64(p.subscribers[id].active) == 1 { + if p.subscribers[id].active.Load() == 1 { p.subscribers[id].sendCh <- kvs } } } -func (p *publisher) newSubscriber(c *z.Closer, matches []pb.Match) subscriber { +func (p *publisher) newSubscriber(c *z.Closer, matches []pb.Match) (subscriber, error) { p.Lock() defer p.Unlock() ch := make(chan *pb.KVList, 1000) id := p.nextID // Increment next ID. p.nextID++ - active := uint64(1) s := subscriber{ - active: &active, - id: id, + id: id, matches: matches, sendCh: ch, subCloser: c, + active: new(atomic.Uint64), } + s.active.Store(1) + p.subscribers[id] = s for _, m := range matches { - p.indexer.AddMatch(m, id) + if err := p.indexer.AddMatch(m, id); err != nil { + return subscriber{}, err + } } - return s + return s, nil } // cleanSubscribers stops all the subscribers. Ideally, It should be called while closing DB. @@ -145,7 +148,7 @@ func (p *publisher) cleanSubscribers() { defer p.Unlock() for id, s := range p.subscribers { for _, m := range s.matches { - p.indexer.DeleteMatch(m, id) + _ = p.indexer.DeleteMatch(m, id) } delete(p.subscribers, id) s.subCloser.SignalAndWait() @@ -157,7 +160,7 @@ func (p *publisher) deleteSubscriber(id uint64) { defer p.Unlock() if s, ok := p.subscribers[id]; ok { for _, m := range s.matches { - p.indexer.DeleteMatch(m, id) + _ = p.indexer.DeleteMatch(m, id) } } delete(p.subscribers, id) diff --git a/vendor/github.com/dgraph-io/badger/v3/skl/README.md b/vendor/github.com/dgraph-io/badger/v4/skl/README.md similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/skl/README.md rename to vendor/github.com/dgraph-io/badger/v4/skl/README.md diff --git a/vendor/github.com/dgraph-io/badger/v3/skl/arena.go b/vendor/github.com/dgraph-io/badger/v4/skl/arena.go similarity index 93% rename from vendor/github.com/dgraph-io/badger/v3/skl/arena.go rename to vendor/github.com/dgraph-io/badger/v4/skl/arena.go index facf07325d..b471e8101c 100644 --- a/vendor/github.com/dgraph-io/badger/v3/skl/arena.go +++ b/vendor/github.com/dgraph-io/badger/v4/skl/arena.go @@ -20,7 +20,7 @@ import ( "sync/atomic" "unsafe" - "github.com/dgraph-io/badger/v3/y" + "github.com/dgraph-io/badger/v4/y" ) const ( @@ -35,7 +35,7 @@ const ( // Arena should be lock-free. type Arena struct { - n uint32 + n atomic.Uint32 buf []byte } @@ -43,15 +43,13 @@ type Arena struct { func newArena(n int64) *Arena { // Don't store data at position 0 in order to reserve offset=0 as a kind // of nil pointer. - out := &Arena{ - n: 1, - buf: make([]byte, n), - } + out := &Arena{buf: make([]byte, n)} + out.n.Store(1) return out } func (s *Arena) size() int64 { - return int64(atomic.LoadUint32(&s.n)) + return int64(s.n.Load()) } // putNode allocates a node in the arena. The node is aligned on a pointer-sized @@ -63,7 +61,7 @@ func (s *Arena) putNode(height int) uint32 { // Pad the allocation with enough bytes to ensure pointer alignment. l := uint32(MaxNodeSize - unusedSize + nodeAlign) - n := atomic.AddUint32(&s.n, l) + n := s.n.Add(l) y.AssertTruef(int(n) <= len(s.buf), "Arena too small, toWrite:%d newTotal:%d limit:%d", l, n, len(s.buf)) @@ -78,8 +76,8 @@ func (s *Arena) putNode(height int) uint32 { // size of val. We could also store this size inside arena but the encoding and // decoding will incur some overhead. func (s *Arena) putVal(v y.ValueStruct) uint32 { - l := uint32(v.EncodedSize()) - n := atomic.AddUint32(&s.n, l) + l := v.EncodedSize() + n := s.n.Add(l) y.AssertTruef(int(n) <= len(s.buf), "Arena too small, toWrite:%d newTotal:%d limit:%d", l, n, len(s.buf)) @@ -90,7 +88,7 @@ func (s *Arena) putVal(v y.ValueStruct) uint32 { func (s *Arena) putKey(key []byte) uint32 { l := uint32(len(key)) - n := atomic.AddUint32(&s.n, l) + n := s.n.Add(l) y.AssertTruef(int(n) <= len(s.buf), "Arena too small, toWrite:%d newTotal:%d limit:%d", l, n, len(s.buf)) diff --git a/vendor/github.com/dgraph-io/badger/v3/skl/skl.go b/vendor/github.com/dgraph-io/badger/v4/skl/skl.go similarity index 95% rename from vendor/github.com/dgraph-io/badger/v3/skl/skl.go rename to vendor/github.com/dgraph-io/badger/v4/skl/skl.go index a39d349210..7d122a8ac5 100644 --- a/vendor/github.com/dgraph-io/badger/v3/skl/skl.go +++ b/vendor/github.com/dgraph-io/badger/v4/skl/skl.go @@ -37,8 +37,8 @@ import ( "sync/atomic" "unsafe" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) const ( @@ -54,7 +54,7 @@ type node struct { // can be atomically loaded and stored: // value offset: uint32 (bits 0-31) // value size : uint16 (bits 32-63) - value uint64 + value atomic.Uint64 // A byte slice is 24 bytes. We are trying to save space here. keyOffset uint32 // Immutable. No need to lock to access key. @@ -70,25 +70,25 @@ type node struct { // is deliberately truncated to not include unneeded tower elements. // // All accesses to elements should use CAS operations, with no need to lock. - tower [maxHeight]uint32 + tower [maxHeight]atomic.Uint32 } type Skiplist struct { - height int32 // Current height. 1 <= height <= kMaxHeight. CAS. + height atomic.Int32 // Current height. 1 <= height <= kMaxHeight. CAS. head *node - ref int32 + ref atomic.Int32 arena *Arena OnClose func() } // IncrRef increases the refcount func (s *Skiplist) IncrRef() { - atomic.AddInt32(&s.ref, 1) + s.ref.Add(1) } // DecrRef decrements the refcount, deallocating the Skiplist when done using it func (s *Skiplist) DecrRef() { - newRef := atomic.AddInt32(&s.ref, -1) + newRef := s.ref.Add(-1) if newRef > 0 { return } @@ -111,7 +111,7 @@ func newNode(arena *Arena, key []byte, v y.ValueStruct, height int) *node { node.keyOffset = arena.putKey(key) node.keySize = uint16(len(key)) node.height = uint16(height) - node.value = encodeValue(arena.putVal(v), v.EncodedSize()) + node.value.Store(encodeValue(arena.putVal(v), v.EncodedSize())) return node } @@ -129,16 +129,14 @@ func decodeValue(value uint64) (valOffset uint32, valSize uint32) { func NewSkiplist(arenaSize int64) *Skiplist { arena := newArena(arenaSize) head := newNode(arena, nil, y.ValueStruct{}, maxHeight) - return &Skiplist{ - height: 1, - head: head, - arena: arena, - ref: 1, - } + s := &Skiplist{head: head, arena: arena} + s.height.Store(1) + s.ref.Store(1) + return s } func (s *node) getValueOffset() (uint32, uint32) { - value := atomic.LoadUint64(&s.value) + value := s.value.Load() return decodeValue(value) } @@ -149,15 +147,15 @@ func (s *node) key(arena *Arena) []byte { func (s *node) setValue(arena *Arena, v y.ValueStruct) { valOffset := arena.putVal(v) value := encodeValue(valOffset, v.EncodedSize()) - atomic.StoreUint64(&s.value, value) + s.value.Store(value) } func (s *node) getNextOffset(h int) uint32 { - return atomic.LoadUint32(&s.tower[h]) + return s.tower[h].Load() } func (s *node) casNextOffset(h int, old, val uint32) bool { - return atomic.CompareAndSwapUint32(&s.tower[h], old, val) + return s.tower[h].CompareAndSwap(old, val) } // Returns true if key is strictly > n.key. @@ -279,7 +277,7 @@ func (s *Skiplist) findSpliceForLevel(key []byte, before *node, level int) (*nod } func (s *Skiplist) getHeight() int32 { - return atomic.LoadInt32(&s.height) + return s.height.Load() } // Put inserts the key-value pair. @@ -308,7 +306,7 @@ func (s *Skiplist) Put(key []byte, v y.ValueStruct) { // Try to increase s.height via CAS. listHeight = s.getHeight() for height > int(listHeight) { - if atomic.CompareAndSwapInt32(&s.height, listHeight, int32(height)) { + if s.height.CompareAndSwap(listHeight, int32(height)) { // Successfully increased skiplist.height. break } @@ -329,7 +327,7 @@ func (s *Skiplist) Put(key []byte, v y.ValueStruct) { y.AssertTrue(prev[i] != next[i]) } nextOffset := s.arena.getNodeOffset(next[i]) - x.tower[i] = nextOffset + x.tower[i].Store(nextOffset) if prev[i].casNextOffset(i, nextOffset, s.arena.getNodeOffset(x)) { // Managed to insert x between prev[i] and next[i]. Go to the next level. break @@ -431,7 +429,7 @@ func (s *Iterator) Value() y.ValueStruct { // ValueUint64 returns the uint64 value of the current node. func (s *Iterator) ValueUint64() uint64 { - return s.n.value + return s.n.value.Load() } // Next advances to the next position. diff --git a/vendor/github.com/dgraph-io/badger/v3/stream.go b/vendor/github.com/dgraph-io/badger/v4/stream.go similarity index 91% rename from vendor/github.com/dgraph-io/badger/v3/stream.go rename to vendor/github.com/dgraph-io/badger/v4/stream.go index 0614f19aa5..934c8abcf9 100644 --- a/vendor/github.com/dgraph-io/badger/v3/stream.go +++ b/vendor/github.com/dgraph-io/badger/v4/stream.go @@ -24,10 +24,12 @@ import ( "sync/atomic" "time" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" humanize "github.com/dustin/go-humanize" + "google.golang.org/protobuf/proto" + + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) const batchSize = 16 << 20 // 16 MB @@ -61,6 +63,14 @@ type Stream struct { // Note: Calls to ChooseKey are concurrent. ChooseKey func(item *Item) bool + // MaxSize is the maximum allowed size of a stream batch. This is a soft limit + // as a single list that is still over the limit will have to be sent as is since it + // cannot be split further. This limit prevents the framework from creating batches + // so big that sending them causes issues (e.g running into the max size gRPC limit). + // If necessary, set it up before the Stream starts synchronisation + // This is not a concurrency-safe setting + MaxSize uint64 + // KeyToList, similar to ChooseKey, is only invoked on the highest version of the value. It // is upto the caller to iterate over the versions and generate zero, one or more KVs. It // is expected that the user would advance the iterator to go through the versions of the @@ -87,10 +97,10 @@ type Stream struct { db *DB rangeCh chan keyRange kvChan chan *z.Buffer - nextStreamId uint32 + nextStreamId atomic.Uint32 doneMarkers bool - scanned uint64 // used to estimate the ETA for data scan. - numProducers int32 + scanned atomic.Uint64 // used to estimate the ETA for data scan. + numProducers atomic.Int32 } // SendDoneMarkers when true would send out done markers on the stream. False by default. @@ -164,8 +174,8 @@ func (st *Stream) produceRanges(ctx context.Context) { // produceKVs picks up ranges from rangeCh, generates KV lists and sends them to kvChan. func (st *Stream) produceKVs(ctx context.Context, threadId int) error { - atomic.AddInt32(&st.numProducers, 1) - defer atomic.AddInt32(&st.numProducers, -1) + st.numProducers.Add(1) + defer st.numProducers.Add(-1) var txn *Txn if st.readTs > 0 { @@ -180,7 +190,7 @@ func (st *Stream) produceKVs(ctx context.Context, threadId int) error { defer func() { // The outList variable changes. So, we need to evaluate the variable in the defer. DO NOT // call `defer outList.Release()`. - outList.Release() + _ = outList.Release() }() iterate := func(kr keyRange) error { @@ -197,14 +207,14 @@ func (st *Stream) produceKVs(ctx context.Context, threadId int) error { defer itr.Alloc.Release() // This unique stream id is used to identify all the keys from this iteration. - streamId := atomic.AddUint32(&st.nextStreamId, 1) + streamId := st.nextStreamId.Add(1) var scanned int sendIt := func() error { select { case st.kvChan <- outList: outList = z.NewBuffer(2*batchSize, "Stream.ProduceKVs") - atomic.AddUint64(&st.scanned, uint64(itr.scanned-scanned)) + st.scanned.Add(uint64(itr.scanned - scanned)) scanned = itr.scanned case <-ctx.Done(): return ctx.Err() @@ -294,7 +304,7 @@ func (st *Stream) streamKVs(ctx context.Context) error { now := time.Now() sendBatch := func(batch *z.Buffer) error { - defer batch.Release() + defer func() { _ = batch.Release() }() sz := uint64(batch.LenNoPadding()) if sz == 0 { return nil @@ -314,7 +324,7 @@ func (st *Stream) streamKVs(ctx context.Context) error { // Send the batch immediately if it already exceeds the maximum allowed size. // If the size of the batch exceeds maxStreamSize, break from the loop to // avoid creating a batch that is so big that certain limits are reached. - if batch.LenNoPadding() > int(maxStreamSize) { + if uint64(batch.LenNoPadding()) > st.MaxSize { break loop } select { @@ -346,9 +356,9 @@ outer: // Instead of calculating speed over the entire lifetime, we average the speed over // ticker duration. writeRate.Capture(bytesSent) - scanned := atomic.LoadUint64(&st.scanned) + scanned := st.scanned.Load() scanRate.Capture(scanned) - numProducers := atomic.LoadInt32(&st.numProducers) + numProducers := st.numProducers.Load() st.db.opt.Infof("%s [%s] Scan (%d): ~%s/%s at %s/sec. Sent: %s at %s/sec."+ " jemalloc: %s\n", @@ -431,7 +441,7 @@ func (st *Stream) Orchestrate(ctx context.Context) error { defer func() { // If due to some error, we have buffers left in kvChan, we should release them. for buf := range st.kvChan { - buf.Release() + _ = buf.Release() } }() @@ -451,6 +461,7 @@ func (db *DB) newStream() *Stream { db: db, NumGo: db.opt.NumGoroutines, LogPrefix: "Badger.Stream", + MaxSize: maxStreamSize, } } @@ -476,7 +487,7 @@ func BufferToKVList(buf *z.Buffer) (*pb.KVList, error) { var list pb.KVList err := buf.SliceIterate(func(s []byte) error { kv := new(pb.KV) - if err := kv.Unmarshal(s); err != nil { + if err := proto.Unmarshal(s, kv); err != nil { return err } list.Kv = append(list.Kv, kv) @@ -486,6 +497,7 @@ func BufferToKVList(buf *z.Buffer) (*pb.KVList, error) { } func KVToBuffer(kv *pb.KV, buf *z.Buffer) { - out := buf.SliceAllocate(kv.Size()) - y.Check2(kv.MarshalToSizedBuffer(out)) + in := buf.SliceAllocate(proto.Size(kv))[:0] + _, err := proto.MarshalOptions{}.MarshalAppend(in, kv) + y.AssertTrue(err == nil) } diff --git a/vendor/github.com/dgraph-io/badger/v3/stream_writer.go b/vendor/github.com/dgraph-io/badger/v4/stream_writer.go similarity index 82% rename from vendor/github.com/dgraph-io/badger/v3/stream_writer.go rename to vendor/github.com/dgraph-io/badger/v4/stream_writer.go index 4613718cce..0059c1eae3 100644 --- a/vendor/github.com/dgraph-io/badger/v3/stream_writer.go +++ b/vendor/github.com/dgraph-io/badger/v4/stream_writer.go @@ -21,12 +21,14 @@ import ( "fmt" "sync" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/table" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" humanize "github.com/dustin/go-humanize" "github.com/pkg/errors" + "google.golang.org/protobuf/proto" + + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/table" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) // StreamWriter is used to write data coming from multiple streams. The streams must not have any @@ -47,6 +49,7 @@ type StreamWriter struct { throttle *y.Throttle maxVersion uint64 writers map[uint32]*sortedWriter + prevLevel int } // NewStreamWriter creates a StreamWriter. Right after creating StreamWriter, Prepare must be @@ -66,18 +69,72 @@ func (db *DB) NewStreamWriter() *StreamWriter { // Prepare should be called before writing any entry to StreamWriter. It deletes all data present in // existing DB, stops compactions and any writes being done by other means. Be very careful when // calling Prepare, because it could result in permanent data loss. Not calling Prepare would result -// in a corrupt Badger instance. +// in a corrupt Badger instance. Use PrepareIncremental to do incremental stream write. func (sw *StreamWriter) Prepare() error { sw.writeLock.Lock() defer sw.writeLock.Unlock() done, err := sw.db.dropAll() + // Ensure that done() is never called more than once. + var once sync.Once + sw.done = func() { once.Do(done) } + return err +} + +// PrepareIncremental should be called before writing any entry to StreamWriter incrementally. +// In incremental stream write, the tables are written at one level above the current base level. +func (sw *StreamWriter) PrepareIncremental() error { + sw.writeLock.Lock() + defer sw.writeLock.Unlock() // Ensure that done() is never called more than once. var once sync.Once + + // prepareToDrop will stop all the incoming writes and process any pending flush tasks. + // Before we start writing, we'll stop the compactions because no one else should be writing to + // the same level as the stream writer is writing to. + f, err := sw.db.prepareToDrop() + if err != nil { + sw.done = func() { once.Do(f) } + return err + } + sw.db.stopCompactions() + done := func() { + sw.db.startCompactions() + f() + } sw.done = func() { once.Do(done) } - return err + mts, decr := sw.db.getMemTables() + defer decr() + for _, m := range mts { + if !m.sl.Empty() { + return fmt.Errorf("Unable to do incremental writes because MemTable has data") + } + } + + isEmptyDB := true + for _, level := range sw.db.Levels() { + if level.NumTables > 0 { + sw.prevLevel = level.Level + isEmptyDB = false + break + } + } + if isEmptyDB { + // If DB is empty, we should allow doing incremental stream write. + return nil + } + if sw.prevLevel == 0 { + // It seems that data is present in all levels from Lmax to L0. If we call flatten + // on the tree, all the data will go to Lmax. All the levels above will be empty + // after flatten call. Now, we should be able to use incremental stream writer again. + if err := sw.db.Flatten(3); err != nil { + return errors.Wrapf(err, "error during flatten in StreamWriter") + } + sw.prevLevel = len(sw.db.Levels()) - 1 + } + return nil } // Write writes KVList to DB. Each KV within the list contains the stream id which StreamWriter @@ -96,7 +153,7 @@ func (sw *StreamWriter) Write(buf *z.Buffer) error { err := buf.SliceIterate(func(s []byte) error { var kv pb.KV - if err := kv.Unmarshal(s); err != nil { + if err := proto.Unmarshal(s, &kv); err != nil { return err } if kv.StreamDone { @@ -109,6 +166,18 @@ func (sw *StreamWriter) Write(buf *z.Buffer) error { panic(fmt.Sprintf("write performed on closed stream: %d", kv.StreamId)) } + sw.writeLock.Lock() + if sw.maxVersion < kv.Version { + sw.maxVersion = kv.Version + } + if sw.prevLevel == 0 { + // If prevLevel is 0, that means that we have not written anything yet. + // So, we can write to the maxLevel. newWriter writes to prevLevel - 1, + // so we can set prevLevel to len(levels). + sw.prevLevel = len(sw.db.lc.levels) + } + sw.writeLock.Unlock() + var meta, userMeta byte if len(kv.Meta) > 0 { meta = kv.Meta[0] @@ -116,9 +185,6 @@ func (sw *StreamWriter) Write(buf *z.Buffer) error { if len(kv.UserMeta) > 0 { userMeta = kv.UserMeta[0] } - if sw.maxVersion < kv.Version { - sw.maxVersion = kv.Version - } e := &Entry{ Key: y.KeyWithTs(kv.Key, kv.Version), Value: y.Copy(kv.Value), @@ -178,7 +244,7 @@ func (sw *StreamWriter) Write(buf *z.Buffer) error { for streamId := range closedStreams { writer, ok := sw.writers[streamId] if !ok { - sw.db.opt.Logger.Warningf("Trying to close stream: %d, but no sorted "+ + sw.db.opt.Warningf("Trying to close stream: %d, but no sorted "+ "writer found for it", streamId) continue } @@ -220,6 +286,11 @@ func (sw *StreamWriter) Flush() error { if sw.db.orc != nil { sw.db.orc.Stop() } + + if curMax := sw.db.orc.readTs(); curMax >= sw.maxVersion { + sw.maxVersion = curMax + } + sw.db.orc = newOracle(sw.db.opt) sw.db.orc.nextTxnTs = sw.maxVersion sw.db.orc.txnMark.Done(sw.maxVersion) @@ -284,6 +355,7 @@ type sortedWriter struct { builder *table.Builder lastKey []byte + level int streamID uint32 reqCh chan *request // Have separate closer for each writer, as it can be closed at any time. @@ -303,6 +375,7 @@ func (sw *StreamWriter) newWriter(streamID uint32) (*sortedWriter, error) { builder: table.NewTableBuilder(bopts), reqCh: make(chan *request, 3), closer: z.NewCloser(1), + level: sw.prevLevel - 1, // Write at the level just above the one we were writing to. } go w.handleRequests() @@ -434,7 +507,7 @@ func (w *sortedWriter) createTable(builder *table.Builder) error { } lc := w.db.lc - lhandler := lc.levels[len(lc.levels)-1] + lhandler := lc.levels[w.level] // Now that table can be opened successfully, let's add this to the MANIFEST. change := &pb.ManifestChange{ Id: tbl.ID(), diff --git a/vendor/github.com/dgraph-io/badger/v3/structs.go b/vendor/github.com/dgraph-io/badger/v4/structs.go similarity index 98% rename from vendor/github.com/dgraph-io/badger/v3/structs.go rename to vendor/github.com/dgraph-io/badger/v4/structs.go index c17f818cf7..7a06d4f13e 100644 --- a/vendor/github.com/dgraph-io/badger/v3/structs.go +++ b/vendor/github.com/dgraph-io/badger/v4/structs.go @@ -72,8 +72,8 @@ type header struct { const ( // Maximum possible size of the header. The maximum size of header struct will be 18 but the - // maximum size of varint encoded header will be 21. - maxHeaderSize = 21 + // maximum size of varint encoded header will be 22. + maxHeaderSize = 22 ) // Encode encodes the header into []byte. The provided []byte should be atleast 5 bytes. The @@ -174,6 +174,7 @@ func (e *Entry) skipVlogAndSetThreshold(threshold int64) bool { return int64(len(e.Value)) < e.valThreshold } +//nolint:unused func (e Entry) print(prefix string) { fmt.Printf("%s Key: %s Meta: %d UserMeta: %d Offset: %d len(val)=%d", prefix, e.Key, e.meta, e.UserMeta, e.offset, len(e.Value)) diff --git a/vendor/github.com/dgraph-io/badger/v3/table/README.md b/vendor/github.com/dgraph-io/badger/v4/table/README.md similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/table/README.md rename to vendor/github.com/dgraph-io/badger/v4/table/README.md diff --git a/vendor/github.com/dgraph-io/badger/v3/table/builder.go b/vendor/github.com/dgraph-io/badger/v4/table/builder.go similarity index 94% rename from vendor/github.com/dgraph-io/badger/v3/table/builder.go rename to vendor/github.com/dgraph-io/badger/v4/table/builder.go index 5bab48a72f..7d439f338a 100644 --- a/vendor/github.com/dgraph-io/badger/v3/table/builder.go +++ b/vendor/github.com/dgraph-io/badger/v4/table/builder.go @@ -24,16 +24,16 @@ import ( "sync/atomic" "unsafe" - "github.com/dgraph-io/badger/v3/fb" - "github.com/golang/protobuf/proto" - "github.com/golang/snappy" fbs "github.com/google/flatbuffers/go" + "github.com/klauspost/compress/s2" "github.com/pkg/errors" + "google.golang.org/protobuf/proto" - "github.com/dgraph-io/badger/v3/options" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" + "github.com/dgraph-io/badger/v4/fb" + "github.com/dgraph-io/badger/v4/options" + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) const ( @@ -80,11 +80,10 @@ type Builder struct { // Typically tens or hundreds of meg. This is for one single file. alloc *z.Allocator curBlock *bblock - compressedSize uint32 - uncompressedSize uint32 + compressedSize atomic.Uint32 + uncompressedSize atomic.Uint32 lenOffsets uint32 - estimatedSize uint32 keyHashes []uint32 // Used for building the bloomfilter. opts *Options maxVersion uint64 @@ -157,6 +156,16 @@ func NewTableBuilder(opts Options) *Builder { return b } +func maxEncodedLen(ctype options.CompressionType, sz int) int { + switch ctype { + case options.Snappy: + return s2.MaxEncodedLen(sz) + case options.ZSTD: + return y.ZSTDCompressBound(sz) + } + return sz +} + func (b *Builder) handleBlock() { defer b.wg.Done() @@ -179,13 +188,13 @@ func (b *Builder) handleBlock() { // BlockBuf should always less than or equal to allocated space. If the blockBuf is greater // than allocated space that means the data from this block cannot be stored in its // existing location. - allocatedSpace := (item.end) + padding + 1 + allocatedSpace := maxEncodedLen(b.opts.Compression, (item.end)) + padding + 1 y.AssertTrue(len(blockBuf) <= allocatedSpace) // blockBuf was allocated on allocator. So, we don't need to copy it over. item.data = blockBuf item.end = len(blockBuf) - atomic.AddUint32(&b.compressedSize, uint32(len(blockBuf))) + b.compressedSize.Add(uint32(len(blockBuf))) } } @@ -276,7 +285,7 @@ func (b *Builder) finishBlock() { b.append(y.U32ToBytes(uint32(len(checksum)))) b.blockList = append(b.blockList, b.curBlock) - atomic.AddUint32(&b.uncompressedSize, uint32(b.curBlock.end)) + b.uncompressedSize.Add(uint32(b.curBlock.end)) // Add length of baseKey (rounded to next multiple of 4 because of alignment). // Add another 40 Bytes, these additional 40 bytes consists of @@ -291,7 +300,6 @@ func (b *Builder) finishBlock() { if b.blockChan != nil { b.blockChan <- b.curBlock } - return } func (b *Builder) shouldFinishBlock(key []byte, value y.ValueStruct) bool { @@ -308,7 +316,7 @@ func (b *Builder) shouldFinishBlock(key []byte, value y.ValueStruct) bool { 8 + // Sum64 in checksum proto 4) // checksum length estimatedSize := uint32(b.curBlock.end) + uint32(6 /*header size for entry*/) + - uint32(len(key)) + uint32(value.EncodedSize()) + entriesOffsetsSize + uint32(len(key)) + value.EncodedSize() + entriesOffsetsSize if b.shouldEncrypt() { // IV is added at the end of the block, while encrypting. @@ -360,9 +368,9 @@ func (b *Builder) addInternal(key []byte, value y.ValueStruct, valueLen uint32, // ReachedCapacity returns true if we... roughly (?) reached capacity? func (b *Builder) ReachedCapacity() bool { // If encryption/compression is enabled then use the compresssed size. - sumBlockSizes := atomic.LoadUint32(&b.compressedSize) + sumBlockSizes := b.compressedSize.Load() if b.opts.Compression == options.None && b.opts.DataKey == nil { - sumBlockSizes = b.uncompressedSize + sumBlockSizes = b.uncompressedSize.Load() } blocksSize := sumBlockSizes + // actual length of current buffer uint32(len(b.curBlock.entryOffsets)*4) + // all entry offsets size @@ -515,9 +523,9 @@ func (b *Builder) compressData(data []byte) ([]byte, error) { case options.None: return data, nil case options.Snappy: - sz := snappy.MaxEncodedLen(len(data)) + sz := s2.MaxEncodedLen(len(data)) dst := b.alloc.Allocate(sz) - return snappy.Encode(dst, data), nil + return s2.EncodeSnappy(dst, data), nil case options.ZSTD: sz := y.ZSTDCompressBound(len(data)) dst := b.alloc.Allocate(sz) @@ -549,7 +557,7 @@ func (b *Builder) buildIndex(bloom []byte) ([]byte, uint32) { fb.TableIndexAddOffsets(builder, boEnd) fb.TableIndexAddBloomFilter(builder, bfoff) fb.TableIndexAddMaxVersion(builder, b.maxVersion) - fb.TableIndexAddUncompressedSize(builder, b.uncompressedSize) + fb.TableIndexAddUncompressedSize(builder, b.uncompressedSize.Load()) fb.TableIndexAddKeyCount(builder, uint32(len(b.keyHashes))) fb.TableIndexAddOnDiskSize(builder, b.onDiskSize) fb.TableIndexAddStaleDataSize(builder, uint32(b.staleDataSize)) diff --git a/vendor/github.com/dgraph-io/badger/v3/table/iterator.go b/vendor/github.com/dgraph-io/badger/v4/table/iterator.go similarity index 98% rename from vendor/github.com/dgraph-io/badger/v3/table/iterator.go rename to vendor/github.com/dgraph-io/badger/v4/table/iterator.go index 26c9043c28..d99203e39c 100644 --- a/vendor/github.com/dgraph-io/badger/v3/table/iterator.go +++ b/vendor/github.com/dgraph-io/badger/v4/table/iterator.go @@ -22,8 +22,8 @@ import ( "io" "sort" - "github.com/dgraph-io/badger/v3/fb" - "github.com/dgraph-io/badger/v3/y" + "github.com/dgraph-io/badger/v4/fb" + "github.com/dgraph-io/badger/v4/y" ) type blockIterator struct { @@ -34,7 +34,7 @@ type blockIterator struct { key []byte val []byte entryOffsets []uint32 - block *block + block *Block tableID uint64 blockID int @@ -43,7 +43,7 @@ type blockIterator struct { prevOverlap uint16 } -func (itr *blockIterator) setBlock(b *block) { +func (itr *blockIterator) setBlock(b *Block) { // Decrement the ref for the old block. If the old block was compressed, we // might be able to reuse it. itr.block.decrRef() diff --git a/vendor/github.com/dgraph-io/badger/v3/table/merge_iterator.go b/vendor/github.com/dgraph-io/badger/v4/table/merge_iterator.go similarity index 98% rename from vendor/github.com/dgraph-io/badger/v3/table/merge_iterator.go rename to vendor/github.com/dgraph-io/badger/v4/table/merge_iterator.go index 789a24fd7b..6e89fbbc24 100644 --- a/vendor/github.com/dgraph-io/badger/v3/table/merge_iterator.go +++ b/vendor/github.com/dgraph-io/badger/v4/table/merge_iterator.go @@ -19,7 +19,7 @@ package table import ( "bytes" - "github.com/dgraph-io/badger/v3/y" + "github.com/dgraph-io/badger/v4/y" ) // MergeIterator merges multiple iterators. @@ -115,7 +115,7 @@ func (mi *MergeIterator) fix() { case cmp < 0: // Small is less than bigger(). if mi.reverse { mi.swapSmall() - } else { + } else { //nolint:staticcheck // we don't need to do anything. Small already points to the smallest. } return diff --git a/vendor/github.com/dgraph-io/badger/v3/table/table.go b/vendor/github.com/dgraph-io/badger/v4/table/table.go similarity index 93% rename from vendor/github.com/dgraph-io/badger/v3/table/table.go rename to vendor/github.com/dgraph-io/badger/v4/table/table.go index 1378f9a36b..1f90a73e52 100644 --- a/vendor/github.com/dgraph-io/badger/v3/table/table.go +++ b/vendor/github.com/dgraph-io/badger/v4/table/table.go @@ -31,16 +31,17 @@ import ( "time" "unsafe" - "github.com/golang/protobuf/proto" - "github.com/golang/snappy" + "github.com/klauspost/compress/snappy" + "github.com/klauspost/compress/zstd" "github.com/pkg/errors" - - "github.com/dgraph-io/badger/v3/fb" - "github.com/dgraph-io/badger/v3/options" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto" - "github.com/dgraph-io/ristretto/z" + "google.golang.org/protobuf/proto" + + "github.com/dgraph-io/badger/v4/fb" + "github.com/dgraph-io/badger/v4/options" + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2" + "github.com/dgraph-io/ristretto/v2/z" ) const fileSuffix = ".sst" @@ -76,8 +77,8 @@ type Options struct { Compression options.CompressionType // Block cache is used to cache decompressed and decrypted blocks. - BlockCache *ristretto.Cache - IndexCache *ristretto.Cache + BlockCache *ristretto.Cache[[]byte, *Block] + IndexCache *ristretto.Cache[uint64, *fb.TableIndex] AllocPool *z.AllocatorPool @@ -102,7 +103,7 @@ type Table struct { _index *fb.TableIndex // Nil if encryption is enabled. Use fetchIndex to access. _cheap *cheapIndex - ref int32 // For file garbage collection. Atomic. + ref atomic.Int32 // For file garbage collection // The following are initialized once and const. smallest, biggest []byte // Smallest and largest keys (with timestamps). @@ -155,12 +156,12 @@ func (t *Table) CompressionType() options.CompressionType { // IncrRef increments the refcount (having to do with whether the file should be deleted) func (t *Table) IncrRef() { - atomic.AddInt32(&t.ref, 1) + t.ref.Add(1) } // DecrRef decrements the refcount and possibly deletes the table func (t *Table) DecrRef() error { - newRef := atomic.AddInt32(&t.ref, -1) + newRef := t.ref.Add(-1) if newRef == 0 { // We can safely delete this file, because for all the current files, we always have // at least one reference pointing to them. @@ -177,13 +178,11 @@ func (t *Table) DecrRef() error { } // BlockEvictHandler is used to reuse the byte slice stored in the block on cache eviction. -func BlockEvictHandler(value interface{}) { - if b, ok := value.(*block); ok { - b.decrRef() - } +func BlockEvictHandler(b *Block) { + b.decrRef() } -type block struct { +type Block struct { offset int data []byte checksum []byte @@ -191,19 +190,19 @@ type block struct { entryOffsets []uint32 // used to binary search an entry in the block. chkLen int // checksum length. freeMe bool // used to determine if the blocked should be reused. - ref int32 + ref atomic.Int32 } -var NumBlocks int32 +var NumBlocks atomic.Int32 // incrRef increments the ref of a block and return a bool indicating if the // increment was successful. A true value indicates that the block can be used. -func (b *block) incrRef() bool { +func (b *Block) incrRef() bool { for { // We can't blindly add 1 to ref. We need to check whether it has // reached zero first, because if it did, then we should absolutely not // use this block. - ref := atomic.LoadInt32(&b.ref) + ref := b.ref.Load() // The ref would not be equal to 0 unless the existing // block get evicted before this line. If the ref is zero, it means that // the block is already added the the blockPool and cannot be used @@ -216,12 +215,12 @@ func (b *block) incrRef() bool { // Increment the ref only if it is not zero and has not changed between // the time we read it and we're updating it. // - if atomic.CompareAndSwapInt32(&b.ref, ref, ref+1) { + if b.ref.CompareAndSwap(ref, ref+1) { return true } } } -func (b *block) decrRef() { +func (b *Block) decrRef() { if b == nil { return } @@ -232,21 +231,21 @@ func (b *block) decrRef() { // In case of an uncompressed block, the []byte is a reference to the // table.mmap []byte slice. Any attempt to write data to the mmap []byte // will lead to SEGFAULT. - if atomic.AddInt32(&b.ref, -1) == 0 { + if b.ref.Add(-1) == 0 { if b.freeMe { z.Free(b.data) } - atomic.AddInt32(&NumBlocks, -1) + NumBlocks.Add(-1) // blockPool.Put(&b.data) } - y.AssertTrue(atomic.LoadInt32(&b.ref) >= 0) + y.AssertTrue(b.ref.Load() >= 0) } -func (b *block) size() int64 { +func (b *Block) size() int64 { return int64(3*intSize /* Size of the offset, entriesIndexStart and chkLen */ + cap(b.data) + cap(b.checksum) + cap(b.entryOffsets)*4) } -func (b block) verifyCheckSum() error { +func (b *Block) verifyCheckSum() error { cs := &pb.Checksum{} if err := proto.Unmarshal(b.checksum, cs); err != nil { return y.Wrapf(err, "unable to unmarshal checksum for block") @@ -297,13 +296,14 @@ func OpenTable(mf *z.MmapFile, opts Options) (*Table, error) { } t := &Table{ MmapFile: mf, - ref: 1, // Caller is given one reference. id: id, opt: &opts, IsInmemory: false, tableSize: int(fileInfo.Size()), CreatedAt: fileInfo.ModTime(), } + // Caller is given one reference. + t.ref.Store(1) if err := t.initBiggestAndSmallest(); err != nil { return nil, y.Wrapf(err, "failed to initialize table") @@ -328,12 +328,13 @@ func OpenInMemoryTable(data []byte, id uint64, opt *Options) (*Table, error) { } t := &Table{ MmapFile: mf, - ref: 1, // Caller is given one reference. opt: opt, tableSize: len(data), IsInmemory: true, id: id, // It is important that each table gets a unique ID. } + // Caller is given one reference. + t.ref.Store(1) if err := t.initBiggestAndSmallest(); err != nil { return nil, err @@ -379,7 +380,7 @@ func (t *Table) initBiggestAndSmallest() error { checksum := &pb.Checksum{} readPos -= checksumLen buf = t.readNoFail(readPos, checksumLen) - proto.Unmarshal(buf, checksum) + _ = proto.Unmarshal(buf, checksum) fmt.Fprintf(&debugBuf, "checksum: %+v ", checksum) // Read index size from the footer. @@ -518,7 +519,7 @@ func (t *Table) fetchIndex() *fb.TableIndex { panic("Index Cache must be set for encrypted workloads") } if val, ok := t.opt.IndexCache.Get(t.indexKey()); ok && val != nil { - return val.(*fb.TableIndex) + return val } index, err := t.readTableIndex() @@ -534,7 +535,7 @@ func (t *Table) offsets(ko *fb.BlockOffset, i int) bool { // block function return a new block. Each block holds a ref and the byte // slice stored in the block will be reused when the ref becomes zero. The // caller should release the block by calling block.decrRef() on it. -func (t *Table) block(idx int, useCache bool) (*block, error) { +func (t *Table) block(idx int, useCache bool) (*Block, error) { y.AssertTruef(idx >= 0, "idx=%d", idx) if idx >= t.offsetsLength() { return nil, errors.New("block out of index") @@ -546,20 +547,18 @@ func (t *Table) block(idx int, useCache bool) (*block, error) { // Use the block only if the increment was successful. The block // could get evicted from the cache between the Get() call and the // incrRef() call. - if b := blk.(*block); b.incrRef() { - return b, nil + if blk.incrRef() { + return blk, nil } } } var ko fb.BlockOffset y.AssertTrue(t.offsets(&ko, idx)) - blk := &block{ - offset: int(ko.Offset()), - ref: 1, - } + blk := &Block{offset: int(ko.Offset())} + blk.ref.Store(1) defer blk.decrRef() // Deal with any errors, where blk would not be returned. - atomic.AddInt32(&NumBlocks, 1) + NumBlocks.Add(1) var err error if blk.data, err = t.read(blk.offset, int(ko.Len())); err != nil { @@ -794,7 +793,7 @@ func NewFilename(id uint64, dir string) string { } // decompress decompresses the data stored in a block. -func (t *Table) decompress(b *block) error { +func (t *Table) decompress(b *Block) error { var dst []byte var err error @@ -809,7 +808,7 @@ func (t *Table) decompress(b *block) error { if sz, err := snappy.DecodedLen(b.data); err == nil { dst = z.Calloc(sz, "Table.Decompress") } else { - dst = z.Calloc(len(b.data) * 4, "Table.Decompress") // Take a guess. + dst = z.Calloc(len(b.data)*4, "Table.Decompress") // Take a guess. } b.data, err = snappy.Decode(dst, b.data) if err != nil { @@ -818,6 +817,11 @@ func (t *Table) decompress(b *block) error { } case options.ZSTD: sz := int(float64(t.opt.BlockSize) * 1.2) + // Get frame content size from header. + var hdr zstd.Header + if err := hdr.Decode(b.data); err == nil && hdr.HasFCS && hdr.FrameContentSize < uint64(t.opt.BlockSize*2) { + sz = int(hdr.FrameContentSize) + } dst = z.Calloc(sz, "Table.Decompress") b.data, err = y.ZSTDDecompress(dst, b.data) if err != nil { @@ -828,7 +832,7 @@ func (t *Table) decompress(b *block) error { return errors.New("Unsupported compression type") } - if b.freeMe == true { + if b.freeMe { z.Free(src) b.freeMe = false } diff --git a/vendor/github.com/dgraph-io/badger/v3/test.sh b/vendor/github.com/dgraph-io/badger/v4/test.sh similarity index 60% rename from vendor/github.com/dgraph-io/badger/v3/test.sh rename to vendor/github.com/dgraph-io/badger/v4/test.sh index 15122767e3..53b5477aa6 100644 --- a/vendor/github.com/dgraph-io/badger/v3/test.sh +++ b/vendor/github.com/dgraph-io/badger/v4/test.sh @@ -16,7 +16,7 @@ fi # Run `go list` BEFORE setting GOFLAGS so that the output is in the right # format for grep. # export packages because the test will run in a sub process. -export packages=$(go list ./... | grep "github.com/dgraph-io/badger/v3/") +export packages=$(go list ./... | grep "github.com/dgraph-io/badger/v4/") tags="-tags=jemalloc" @@ -32,7 +32,7 @@ manual() { set -e for pkg in $packages; do echo "===> Testing $pkg" - go test $tags -timeout=25m $covermode $coverprofile -race -parallel 16 $pkg && write_coverage + go test $tags -timeout=25m $covermode $coverprofile -failfast -race -parallel 16 $pkg && write_coverage || return 1 done echo "==> DONE package tests" @@ -40,10 +40,10 @@ manual() { # Run the special Truncate test. rm -rf p set -e - go test $tags $timeout $covermode $coverprofile -run='TestTruncateVlogNoClose$' --manual=true && write_coverage + go test $tags $timeout $covermode $coverprofile -run='TestTruncateVlogNoClose$' -failfast --manual=true && write_coverage || return 1 truncate --size=4096 p/000000.vlog - go test $tags $timeout $covermode $coverprofile -run='TestTruncateVlogNoClose2$' --manual=true && write_coverage - go test $tags $timeout $covermode $coverprofile -run='TestTruncateVlogNoClose3$' --manual=true && write_coverage + go test $tags $timeout $covermode $coverprofile -run='TestTruncateVlogNoClose2$' -failfast --manual=true && write_coverage || return 1 + go test $tags $timeout $covermode $coverprofile -run='TestTruncateVlogNoClose3$' -failfast --manual=true && write_coverage || return 1 rm -rf p # TODO(ibrahim): Let's make these tests have Manual prefix. @@ -52,25 +52,24 @@ manual() { # TestValueGCManaged # TestDropPrefix # TestDropAllManaged - go test $tags $timeout $covermode $coverprofile -run='TestBigKeyValuePairs$' --manual=true && write_coverage - go test $tags $timeout $covermode $coverprofile -run='TestPushValueLogLimit' --manual=true && write_coverage - go test $tags $timeout $covermode $coverprofile -run='TestKeyCount' --manual=true && write_coverage - go test $tags $timeout $covermode $coverprofile -run='TestIteratePrefix' --manual=true && write_coverage - go test $tags $timeout $covermode $coverprofile -run='TestIterateParallel' --manual=true && write_coverage - go test $tags $timeout $covermode $coverprofile -run='TestBigStream' --manual=true && write_coverage - go test $tags $timeout $covermode $coverprofile -run='TestGoroutineLeak' --manual=true && write_coverage - go test $tags $timeout $covermode $coverprofile -run='TestGetMore' --manual=true && write_coverage + go test $tags $timeout $covermode $coverprofile -failfast -run='TestBigKeyValuePairs$' --manual=true && write_coverage || return 1 + go test $tags $timeout $covermode $coverprofile -failfast -run='TestPushValueLogLimit' --manual=true && write_coverage || return 1 + go test $tags $timeout $covermode $coverprofile -failfast -run='TestKeyCount' --manual=true && write_coverage || return 1 + go test $tags $timeout $covermode $coverprofile -failfast -run='TestIteratePrefix' --manual=true && write_coverage || return 1 + go test $tags $timeout $covermode $coverprofile -failfast -run='TestIterateParallel' --manual=true && write_coverage || return 1 + go test $tags $timeout $covermode $coverprofile -failfast -run='TestBigStream' --manual=true && write_coverage || return 1 + go test $tags $timeout $covermode $coverprofile -failfast -run='TestGoroutineLeak' --manual=true && write_coverage || return 1 + go test $tags $timeout $covermode $coverprofile -failfast -run='TestGetMore' --manual=true && write_coverage || return 1 echo "==> DONE manual tests" } root() { # Run the normal tests. - # go test -timeout=25m -v -race github.com/dgraph-io/badger/v3/... + # go test -timeout=25m -v -race github.com/dgraph-io/badger/v4/... echo "==> Running root level tests." - set -e - go test $tags -v -race -parallel=16 -timeout=25m $covermode $coverprofile . && write_coverage + go test $tags -v -race -parallel=16 -timeout=25m -failfast $covermode $coverprofile . && write_coverage || return 1 echo "==> DONE root level tests" } @@ -94,8 +93,8 @@ stream() { } write_coverage() { - if [ $CI = "true" ]; then - if [ -f cover_tmp.out ]; then + if [[ $CI = "true" ]]; then + if [[ -f cover_tmp.out ]]; then sed -i '1d' cover_tmp.out cat cover_tmp.out >> cover.out && rm cover_tmp.out fi diff --git a/vendor/github.com/dgraph-io/badger/v4/test_extensions.go b/vendor/github.com/dgraph-io/badger/v4/test_extensions.go new file mode 100644 index 0000000000..f9c025b43a --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/v4/test_extensions.go @@ -0,0 +1,79 @@ +/* + * Copyright 2023 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package badger + +// Important: Do NOT import the "testing" package, as otherwise, that +// will pull in imports into the production class that we do not want. + +// TODO: Consider using this with specific compilation tags so that it only +// shows up when performing testing (e.g., specify build tag=unit). +// We are not yet ready to do that, as it may impact customer usage as +// well as requiring us to update the CI build flags. Moreover, the +// current model does not actually incur any significant cost. +// If we do this, we will also want to introduce a parallel file that +// overrides some of these structs and functions with empty contents. + +// String constants for messages to be pushed to syncChan. +const ( + updateDiscardStatsMsg = "updateDiscardStats iteration done" + endVLogInitMsg = "End: vlog.init(db)" +) + +// testOnlyOptions specifies an extension to the type Options that we want to +// use only in the context of testing. +type testOnlyOptions struct { + // syncChan is used to listen for specific messages related to activities + // that can occur in a DB instance. Currently, this is only used in + // testing activities. + syncChan chan string +} + +// testOnlyDBExtensions specifies an extension to the type DB that we want to +// use only in the context of testing. +type testOnlyDBExtensions struct { + syncChan chan string + + // onCloseDiscardCapture will be populated by a DB instance during the + // process of performing the Close operation. Currently, we only consider + // using this during testing. + onCloseDiscardCapture map[uint64]uint64 +} + +// logToSyncChan sends a message to the DB's syncChan. Note that we expect +// that the DB never closes this channel; the responsibility for +// allocating and closing the channel belongs to the test module. +// if db.syncChan is nil or has never been initialized, ths will be +// silently ignored. +func (db *DB) logToSyncChan(msg string) { + if db.syncChan != nil { + db.syncChan <- msg + } +} + +// captureDiscardStats will copy the contents of the discardStats file +// maintained by vlog to the onCloseDiscardCapture map specified by +// db.opt. Of couse, if db.opt.onCloseDiscardCapture is nil (as expected +// for a production system as opposed to a test system), this is a no-op. +func (db *DB) captureDiscardStats() { + if db.onCloseDiscardCapture != nil { + db.vlog.discardStats.Lock() + db.vlog.discardStats.Iterate(func(id, val uint64) { + db.onCloseDiscardCapture[id] = val + }) + db.vlog.discardStats.Unlock() + } +} diff --git a/vendor/github.com/dgraph-io/badger/v3/trie/trie.go b/vendor/github.com/dgraph-io/badger/v4/trie/trie.go similarity index 98% rename from vendor/github.com/dgraph-io/badger/v3/trie/trie.go rename to vendor/github.com/dgraph-io/badger/v4/trie/trie.go index eac974ab22..2c250c3b82 100644 --- a/vendor/github.com/dgraph-io/badger/v3/trie/trie.go +++ b/vendor/github.com/dgraph-io/badger/v4/trie/trie.go @@ -21,9 +21,10 @@ import ( "strconv" "strings" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/badger/v3/y" "github.com/pkg/errors" + + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/badger/v4/y" ) type node struct { @@ -68,7 +69,7 @@ func parseIgnoreBytes(ig string) ([]bool, error) { if len(r) == 0 || len(r) > 2 { return out, fmt.Errorf("Invalid range: %s", each) } - start, end := -1, -1 + start, end := -1, -1 //nolint:ineffassign if len(r) == 2 { idx, err := strconv.Atoi(strings.TrimSpace(r[1])) if err != nil { diff --git a/vendor/github.com/dgraph-io/badger/v3/txn.go b/vendor/github.com/dgraph-io/badger/v4/txn.go similarity index 98% rename from vendor/github.com/dgraph-io/badger/v3/txn.go rename to vendor/github.com/dgraph-io/badger/v4/txn.go index 7ab42e60d5..a7d0528aa9 100644 --- a/vendor/github.com/dgraph-io/badger/v3/txn.go +++ b/vendor/github.com/dgraph-io/badger/v4/txn.go @@ -26,9 +26,10 @@ import ( "sync" "sync/atomic" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" "github.com/pkg/errors" + + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) type oracle struct { @@ -261,7 +262,7 @@ type Txn struct { pendingWrites map[string]*Entry // cache stores any writes done by txn. duplicateWrites []*Entry // Used in managed mode to store duplicate entries. - numIterators int32 + numIterators atomic.Int32 discarded bool doneRead bool update bool // update is used to conditionally keep track of reads. @@ -519,7 +520,7 @@ func (txn *Txn) Discard() { if txn.discarded { // Avoid a re-run. return } - if atomic.LoadInt32(&txn.numIterators) > 0 { + if txn.numIterators.Load() > 0 { panic("Unclosed iterator at time of Txn.Discard.") } txn.discarded = true @@ -660,7 +661,9 @@ func (txn *Txn) Commit() error { // txn.conflictKeys can be zero if conflict detection is turned off. So we // should check txn.pendingWrites. if len(txn.pendingWrites) == 0 { - return nil // Nothing to do. + // Discard the transaction so that the read is marked done. + txn.Discard() + return nil } // Precheck before discarding txn. if err := txn.commitPrecheck(); err != nil { @@ -715,6 +718,8 @@ func (txn *Txn) CommitWith(cb func(error)) { // callback might be acquiring the same locks. Instead run the callback // from another goroutine. go runTxnCallback(&txnCb{user: cb, err: nil}) + // Discard the transaction so that the read is marked done. + txn.Discard() return } @@ -757,9 +762,9 @@ func (txn *Txn) ReadTs() uint64 { // to. Commit API internally runs Discard, but running it twice wouldn't cause // any issues. // -// txn := db.NewTransaction(false) -// defer txn.Discard() -// // Call various APIs. +// txn := db.NewTransaction(false) +// defer txn.Discard() +// // Call various APIs. func (db *DB) NewTransaction(update bool) *Txn { return db.newTransaction(update, false) } diff --git a/vendor/github.com/dgraph-io/badger/v3/util.go b/vendor/github.com/dgraph-io/badger/v4/util.go similarity index 93% rename from vendor/github.com/dgraph-io/badger/v3/util.go rename to vendor/github.com/dgraph-io/badger/v4/util.go index 2e75a9ead6..8192594c27 100644 --- a/vendor/github.com/dgraph-io/badger/v3/util.go +++ b/vendor/github.com/dgraph-io/badger/v4/util.go @@ -18,14 +18,14 @@ package badger import ( "encoding/hex" - "io/ioutil" "math/rand" - "sync/atomic" + "os" "time" - "github.com/dgraph-io/badger/v3/table" - "github.com/dgraph-io/badger/v3/y" "github.com/pkg/errors" + + "github.com/dgraph-io/badger/v4/table" + "github.com/dgraph-io/badger/v4/y" ) func (s *levelsController) validate() error { @@ -91,12 +91,12 @@ func (s *levelHandler) validate() error { // reserveFileID reserves a unique file id. func (s *levelsController) reserveFileID() uint64 { - id := atomic.AddUint64(&s.nextFileID, 1) + id := s.nextFileID.Add(1) return id - 1 } func getIDMap(dir string) map[uint64]struct{} { - fileInfos, err := ioutil.ReadDir(dir) + fileInfos, err := os.ReadDir(dir) y.Check(err) idMap := make(map[uint64]struct{}) for _, info := range fileInfos { diff --git a/vendor/github.com/dgraph-io/badger/v3/value.go b/vendor/github.com/dgraph-io/badger/v4/value.go similarity index 93% rename from vendor/github.com/dgraph-io/badger/v3/value.go rename to vendor/github.com/dgraph-io/badger/v4/value.go index 6e8f9178e2..0b4e0aa7a5 100644 --- a/vendor/github.com/dgraph-io/badger/v3/value.go +++ b/vendor/github.com/dgraph-io/badger/v4/value.go @@ -19,11 +19,11 @@ package badger import ( "bytes" "context" + stderrors "errors" "fmt" "hash" "hash/crc32" "io" - "io/ioutil" "math" "os" "sort" @@ -32,10 +32,11 @@ import ( "sync" "sync/atomic" - "github.com/dgraph-io/badger/v3/y" - "github.com/dgraph-io/ristretto/z" "github.com/pkg/errors" otrace "go.opencensus.io/trace" + + "github.com/dgraph-io/badger/v4/y" + "github.com/dgraph-io/ristretto/v2/z" ) // maxVlogFileSize is the maximum size of the vlog file which can be created. Vlog Offset is of @@ -54,7 +55,7 @@ const ( bitTxn byte = 1 << 6 // Set if the entry is part of a txn. bitFinTxn byte = 1 << 7 // Set if the entry is to indicate end of txn in value log. - mi int64 = 1 << 20 + mi int64 = 1 << 20 //nolint:unused // size of vlog header. // +----------------+------------------+ @@ -63,9 +64,8 @@ const ( vlogHeaderSize = 20 ) -var errStop = errors.New("Stop iteration") -var errTruncate = errors.New("Do truncate") -var errDeleteVlogFile = errors.New("Delete vlog file") +var errStop = stderrors.New("Stop iteration") +var errTruncate = stderrors.New("Do truncate") type logEntry func(e Entry, vp valuePointer) error @@ -179,7 +179,7 @@ func (vlog *valueLog) rewrite(f *logFile) error { } } maxFid := vlog.maxFid - y.AssertTruef(uint32(f.fid) < maxFid, "fid to move: %d. Current max fid: %d", f.fid, maxFid) + y.AssertTruef(f.fid < maxFid, "fid to move: %d. Current max fid: %d", f.fid, maxFid) vlog.filesLock.RUnlock() vlog.opt.Infof("Rewriting fid: %d", f.fid) @@ -250,7 +250,7 @@ func (vlog *valueLog) rewrite(f *logFile) error { } wb = append(wb, ne) size += es - } else { + } else { //nolint:staticcheck // It might be possible that the entry read from LSM Tree points to // an older vlog file. This can happen in the following situation. // Assume DB is opened with @@ -364,15 +364,15 @@ func (vlog *valueLog) rewrite(f *logFile) error { } func (vlog *valueLog) incrIteratorCount() { - atomic.AddInt32(&vlog.numActiveIterators, 1) + vlog.numActiveIterators.Add(1) } func (vlog *valueLog) iteratorCount() int { - return int(atomic.LoadInt32(&vlog.numActiveIterators)) + return int(vlog.numActiveIterators.Load()) } func (vlog *valueLog) decrIteratorCount() error { - num := atomic.AddInt32(&vlog.numActiveIterators, -1) + num := vlog.numActiveIterators.Add(-1) if num != 0 { return nil } @@ -439,7 +439,7 @@ func (vlog *valueLog) dropAll() (int, error) { } func (db *DB) valueThreshold() int64 { - return atomic.LoadInt64(&db.threshold.valueThreshold) + return db.threshold.valueThreshold.Load() } type valueLog struct { @@ -451,10 +451,10 @@ type valueLog struct { maxFid uint32 filesToBeDeleted []uint32 // A refcount of iterators -- when this hits zero, we can delete the filesToBeDeleted. - numActiveIterators int32 + numActiveIterators atomic.Int32 db *DB - writableLogOffset uint32 // read by read, written by write. Must access via atomics. + writableLogOffset atomic.Uint32 // read by read, written by write numEntriesWritten uint32 opt Options @@ -473,7 +473,7 @@ func (vlog *valueLog) fpath(fid uint32) string { func (vlog *valueLog) populateFilesMap() error { vlog.filesMap = make(map[uint32]*logFile) - files, err := ioutil.ReadDir(vlog.dirPath) + files, err := os.ReadDir(vlog.dirPath) if err != nil { return errFile(err, vlog.dirPath, "Unable to open log dir.") } @@ -528,7 +528,7 @@ func (vlog *valueLog) createVlogFile() (*logFile, error) { // writableLogOffset is only written by write func, by read by Read func. // To avoid a race condition, all reads and updates to this variable must be // done via atomics. - atomic.StoreUint32(&vlog.writableLogOffset, vlogHeaderSize) + vlog.writableLogOffset.Store(vlogHeaderSize) vlog.numEntriesWritten = 0 vlog.filesLock.Unlock() @@ -555,6 +555,8 @@ func (vlog *valueLog) init(db *DB) { lf, err := InitDiscardStats(vlog.opt) y.Check(err) vlog.discardStats = lf + // See TestPersistLFDiscardStats for purpose of statement below. + db.logToSyncChan(endVLogInitMsg) } func (vlog *valueLog) open(db *DB) error { @@ -587,7 +589,7 @@ func (vlog *valueLog) open(db *DB) error { return y.Wrapf(err, "Open existing file: %q", lf.path) } // We shouldn't delete the maxFid file. - if lf.size == vlogHeaderSize && fid != vlog.maxFid { + if lf.size.Load() == vlogHeaderSize && fid != vlog.maxFid { vlog.opt.Infof("Deleting empty file: %s", lf.path) if err := lf.Delete(); err != nil { return y.Wrapf(err, "while trying to delete empty file: %s", lf.path) @@ -641,6 +643,7 @@ func (vlog *valueLog) Close() error { } } if vlog.discardStats != nil { + vlog.db.captureDiscardStats() if terr := vlog.discardStats.Close(-1); terr != nil && err == nil { err = terr } @@ -674,7 +677,7 @@ type request struct { Ptrs []valuePointer Wg sync.WaitGroup Err error - ref int32 + ref atomic.Int32 } func (req *request) reset() { @@ -682,15 +685,15 @@ func (req *request) reset() { req.Ptrs = req.Ptrs[:0] req.Wg = sync.WaitGroup{} req.Err = nil - req.ref = 0 + req.ref.Store(0) } func (req *request) IncrRef() { - atomic.AddInt32(&req.ref, 1) + req.ref.Add(1) } func (req *request) DecrRef() { - nRef := atomic.AddInt32(&req.ref, -1) + nRef := req.ref.Add(-1) if nRef > 0 { return } @@ -748,7 +751,7 @@ func (vlog *valueLog) sync() error { } func (vlog *valueLog) woffset() uint32 { - return atomic.LoadUint32(&vlog.writableLogOffset) + return vlog.writableLogOffset.Load() } // validateWrites will check whether the given requests can fit into 4GB vlog file. @@ -816,16 +819,19 @@ func (vlog *valueLog) write(reqs []*request) error { } n := uint32(buf.Len()) - endOffset := atomic.AddUint32(&vlog.writableLogOffset, n) + endOffset := vlog.writableLogOffset.Add(n) // Increase the file size if we cannot accommodate this entry. + // [Aman] Should this be >= or just >? Doesn't make sense to extend the file if it big enough already. if int(endOffset) >= len(curlf.Data) { - curlf.Truncate(int64(endOffset)) + if err := curlf.Truncate(int64(endOffset)); err != nil { + return err + } } start := int(endOffset - n) y.AssertTrue(copy(curlf.Data[start:], buf.Bytes()) == int(n)) - atomic.StoreUint32(&curlf.size, endOffset) + curlf.size.Store(endOffset) return nil } @@ -888,8 +894,8 @@ func (vlog *valueLog) write(reqs []*request) error { bytesWritten += buf.Len() // No need to flush anything, we write to file directly via mmap. } - y.NumWritesAdd(vlog.opt.MetricsEnabled, int64(written)) - y.NumBytesWrittenAdd(vlog.opt.MetricsEnabled, int64(bytesWritten)) + y.NumWritesVlogAdd(vlog.opt.MetricsEnabled, int64(written)) + y.NumBytesWrittenVlogAdd(vlog.opt.MetricsEnabled, int64(bytesWritten)) vlog.numEntriesWritten += uint32(written) vlog.db.threshold.update(valueSizes) @@ -964,7 +970,7 @@ func (vlog *valueLog) Read(vp valuePointer, _ *y.Slice) ([]byte, func(), error) } } if uint32(len(kv)) < h.klen+h.vlen { - vlog.db.opt.Logger.Errorf("Invalid read: vp: %+v", vp) + vlog.db.opt.Errorf("Invalid read: vp: %+v", vp) return nil, nil, errors.Errorf("Invalid read: Len: %d read at:[%d:%d]", len(kv), h.klen, h.klen+h.vlen) } @@ -989,6 +995,8 @@ func (vlog *valueLog) readValueBytes(vp valuePointer) ([]byte, *logFile, error) } buf, err := lf.read(vp) + y.NumReadsVlogAdd(vlog.db.opt.MetricsEnabled, 1) + y.NumBytesReadsVlogAdd(vlog.db.opt.MetricsEnabled, int64(len(buf))) return buf, lf, err } @@ -1024,8 +1032,7 @@ LOOP: discard, thr, fi.Name()) return nil } - maxFid := atomic.LoadUint32(&vlog.maxFid) - if fid < maxFid { + if fid < vlog.maxFid { vlog.opt.Infof("Found value log max discard fid: %d discard: %d\n", fid, discard) lf, ok := vlog.filesMap[fid] y.AssertTrue(ok) @@ -1055,12 +1062,6 @@ func discardEntry(e Entry, vs y.ValueStruct, db *DB) bool { return false } -type reason struct { - total float64 - discard float64 - count int -} - func (vlog *valueLog) doRunGC(lf *logFile) error { _, span := otrace.StartSpan(context.Background(), "Badger.GC") span.Annotatef(nil, "GC rewrite for: %v", lf.path) @@ -1108,12 +1109,15 @@ func (vlog *valueLog) updateDiscardStats(stats map[uint32]int64) { for fid, discard := range stats { vlog.discardStats.Update(fid, discard) } + // The following is to coordinate with some test cases where we want to + // verify that at least one iteration of updateDiscardStats has been completed. + vlog.db.logToSyncChan(updateDiscardStatsMsg) } type vlogThreshold struct { logger Logger percentile float64 - valueThreshold int64 + valueThreshold atomic.Int64 valueCh chan []int64 clearCh chan bool closer *z.Closer @@ -1142,19 +1146,20 @@ func initVlogThreshold(opt *Options) *vlogThreshold { } return bounds } - return &vlogThreshold{ - logger: opt.Logger, - percentile: opt.VLogPercentile, - valueThreshold: opt.ValueThreshold, - valueCh: make(chan []int64, 1000), - clearCh: make(chan bool, 1), - closer: z.NewCloser(1), - vlMetrics: z.NewHistogramData(getBounds()), + lt := &vlogThreshold{ + logger: opt.Logger, + percentile: opt.VLogPercentile, + valueCh: make(chan []int64, 1000), + clearCh: make(chan bool, 1), + closer: z.NewCloser(1), + vlMetrics: z.NewHistogramData(getBounds()), } + lt.valueThreshold.Store(opt.ValueThreshold) + return lt } func (v *vlogThreshold) Clear(opt Options) { - atomic.StoreInt64(&v.valueThreshold, opt.ValueThreshold) + v.valueThreshold.Store(opt.ValueThreshold) v.clearCh <- true } @@ -1180,11 +1185,11 @@ func (v *vlogThreshold) listenForValueThresholdUpdate() { // in range of Options.VlogPercentile will make it to the LSM tree and rest to the // value log file. p := int64(v.vlMetrics.Percentile(v.percentile)) - if atomic.LoadInt64(&v.valueThreshold) != p { + if v.valueThreshold.Load() != p { if v.logger != nil { v.logger.Infof("updating value of threshold to: %d", p) } - atomic.StoreInt64(&v.valueThreshold, p) + v.valueThreshold.Store(p) } case <-v.clearCh: v.vlMetrics.Clear() diff --git a/vendor/github.com/dgraph-io/badger/v3/y/bloom.go b/vendor/github.com/dgraph-io/badger/v4/y/bloom.go similarity index 99% rename from vendor/github.com/dgraph-io/badger/v3/y/bloom.go rename to vendor/github.com/dgraph-io/badger/v4/y/bloom.go index 806770bbf7..65f4f08794 100644 --- a/vendor/github.com/dgraph-io/badger/v3/y/bloom.go +++ b/vendor/github.com/dgraph-io/badger/v4/y/bloom.go @@ -67,7 +67,7 @@ func appendFilter(buf []byte, keys []uint32, bitsPerKey int) []byte { k = 30 } - nBits := len(keys) * int(bitsPerKey) + nBits := len(keys) * bitsPerKey // For small len(keys), we can see a very high false positive rate. Fix it // by enforcing a minimum bloom filter length. if nBits < 64 { diff --git a/vendor/github.com/dgraph-io/badger/v3/y/checksum.go b/vendor/github.com/dgraph-io/badger/v4/y/checksum.go similarity index 90% rename from vendor/github.com/dgraph-io/badger/v3/y/checksum.go rename to vendor/github.com/dgraph-io/badger/v4/y/checksum.go index 17d60c55d5..555c22e324 100644 --- a/vendor/github.com/dgraph-io/badger/v3/y/checksum.go +++ b/vendor/github.com/dgraph-io/badger/v4/y/checksum.go @@ -17,16 +17,16 @@ package y import ( + stderrors "errors" "hash/crc32" - "github.com/dgraph-io/badger/v3/pb" + "github.com/cespare/xxhash/v2" - "github.com/cespare/xxhash" - "github.com/pkg/errors" + "github.com/dgraph-io/badger/v4/pb" ) // ErrChecksumMismatch is returned at checksum mismatch. -var ErrChecksumMismatch = errors.New("checksum mismatch") +var ErrChecksumMismatch = stderrors.New("checksum mismatch") // CalculateChecksum calculates checksum for data using ct checksum type. func CalculateChecksum(data []byte, ct pb.Checksum_Algorithm) uint64 { diff --git a/vendor/github.com/dgraph-io/badger/v3/y/encrypt.go b/vendor/github.com/dgraph-io/badger/v4/y/encrypt.go similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/y/encrypt.go rename to vendor/github.com/dgraph-io/badger/v4/y/encrypt.go diff --git a/vendor/github.com/dgraph-io/badger/v3/y/error.go b/vendor/github.com/dgraph-io/badger/v4/y/error.go similarity index 90% rename from vendor/github.com/dgraph-io/badger/v3/y/error.go rename to vendor/github.com/dgraph-io/badger/v4/y/error.go index a727a82eaa..f36181b465 100644 --- a/vendor/github.com/dgraph-io/badger/v3/y/error.go +++ b/vendor/github.com/dgraph-io/badger/v4/y/error.go @@ -84,3 +84,16 @@ func Wrapf(err error, format string, args ...interface{}) error { } return errors.Wrapf(err, format, args...) } + +func CombineErrors(one, other error) error { + if one != nil && other != nil { + return fmt.Errorf("%v; %v", one, other) + } + if one != nil && other == nil { + return fmt.Errorf("%v", one) + } + if one == nil && other != nil { + return fmt.Errorf("%v", other) + } + return nil +} diff --git a/vendor/github.com/dgraph-io/badger/v3/y/event_log.go b/vendor/github.com/dgraph-io/badger/v4/y/event_log.go similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/y/event_log.go rename to vendor/github.com/dgraph-io/badger/v4/y/event_log.go diff --git a/vendor/github.com/dgraph-io/badger/v3/y/file_dsync.go b/vendor/github.com/dgraph-io/badger/v4/y/file_dsync.go similarity index 93% rename from vendor/github.com/dgraph-io/badger/v3/y/file_dsync.go rename to vendor/github.com/dgraph-io/badger/v4/y/file_dsync.go index ea4d9ab260..1e171733eb 100644 --- a/vendor/github.com/dgraph-io/badger/v3/y/file_dsync.go +++ b/vendor/github.com/dgraph-io/badger/v4/y/file_dsync.go @@ -1,3 +1,4 @@ +//go:build !dragonfly && !freebsd && !windows && !plan9 // +build !dragonfly,!freebsd,!windows,!plan9 /* diff --git a/vendor/github.com/dgraph-io/badger/v3/y/file_nodsync.go b/vendor/github.com/dgraph-io/badger/v4/y/file_nodsync.go similarity index 93% rename from vendor/github.com/dgraph-io/badger/v3/y/file_nodsync.go rename to vendor/github.com/dgraph-io/badger/v4/y/file_nodsync.go index 54a2184e19..99f6888c71 100644 --- a/vendor/github.com/dgraph-io/badger/v3/y/file_nodsync.go +++ b/vendor/github.com/dgraph-io/badger/v4/y/file_nodsync.go @@ -1,3 +1,4 @@ +//go:build dragonfly || freebsd || windows || plan9 // +build dragonfly freebsd windows plan9 /* diff --git a/vendor/github.com/dgraph-io/badger/v3/y/iterator.go b/vendor/github.com/dgraph-io/badger/v4/y/iterator.go similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/y/iterator.go rename to vendor/github.com/dgraph-io/badger/v4/y/iterator.go diff --git a/vendor/github.com/dgraph-io/badger/v4/y/metrics.go b/vendor/github.com/dgraph-io/badger/v4/y/metrics.go new file mode 100644 index 0000000000..026328d197 --- /dev/null +++ b/vendor/github.com/dgraph-io/badger/v4/y/metrics.go @@ -0,0 +1,223 @@ +/* + * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package y + +import ( + "expvar" +) + +const ( + BADGER_METRIC_PREFIX = "badger_" +) + +var ( + // lsmSize has size of the LSM in bytes + lsmSize *expvar.Map + // vlogSize has size of the value log in bytes + vlogSize *expvar.Map + // pendingWrites tracks the number of pending writes. + pendingWrites *expvar.Map + + // These are cumulative + + // VLOG METRICS + // numReads has cumulative number of reads from vlog + numReadsVlog *expvar.Int + // numWrites has cumulative number of writes into vlog + numWritesVlog *expvar.Int + // numBytesRead has cumulative number of bytes read from VLOG + numBytesReadVlog *expvar.Int + // numBytesVlogWritten has cumulative number of bytes written into VLOG + numBytesVlogWritten *expvar.Int + + // LSM METRICS + // numBytesRead has cumulative number of bytes read from LSM tree + numBytesReadLSM *expvar.Int + // numBytesWrittenToL0 has cumulative number of bytes written into LSM Tree + numBytesWrittenToL0 *expvar.Int + // numLSMGets is number of LSM gets + numLSMGets *expvar.Map + // numBytesCompactionWritten is the number of bytes written in the lsm tree due to compaction + numBytesCompactionWritten *expvar.Map + // numLSMBloomHits is number of LMS bloom hits + numLSMBloomHits *expvar.Map + + // DB METRICS + // numGets is number of gets -> Number of get requests made + numGets *expvar.Int + // number of get queries in which we actually get a result + numGetsWithResults *expvar.Int + // number of iterators created, these would be the number of range queries + numIteratorsCreated *expvar.Int + // numPuts is number of puts -> Number of puts requests made + numPuts *expvar.Int + // numMemtableGets is number of memtable gets -> Number of get requests made on memtable + numMemtableGets *expvar.Int + // numCompactionTables is the number of tables being compacted + numCompactionTables *expvar.Int + // Total writes by a user in bytes + numBytesWrittenUser *expvar.Int +) + +// These variables are global and have cumulative values for all kv stores. +// Naming convention of metrics: {badger_version}_{singular operation}_{granularity}_{component} +func init() { + numReadsVlog = expvar.NewInt(BADGER_METRIC_PREFIX + "read_num_vlog") + numBytesReadVlog = expvar.NewInt(BADGER_METRIC_PREFIX + "read_bytes_vlog") + numWritesVlog = expvar.NewInt(BADGER_METRIC_PREFIX + "write_num_vlog") + numBytesVlogWritten = expvar.NewInt(BADGER_METRIC_PREFIX + "write_bytes_vlog") + + numBytesReadLSM = expvar.NewInt(BADGER_METRIC_PREFIX + "read_bytes_lsm") + numBytesWrittenToL0 = expvar.NewInt(BADGER_METRIC_PREFIX + "write_bytes_l0") + numBytesCompactionWritten = expvar.NewMap(BADGER_METRIC_PREFIX + "write_bytes_compaction") + + numLSMGets = expvar.NewMap(BADGER_METRIC_PREFIX + "get_num_lsm") + numLSMBloomHits = expvar.NewMap(BADGER_METRIC_PREFIX + "hit_num_lsm_bloom_filter") + numMemtableGets = expvar.NewInt(BADGER_METRIC_PREFIX + "get_num_memtable") + + // User operations + numGets = expvar.NewInt(BADGER_METRIC_PREFIX + "get_num_user") + numPuts = expvar.NewInt(BADGER_METRIC_PREFIX + "put_num_user") + numBytesWrittenUser = expvar.NewInt(BADGER_METRIC_PREFIX + "write_bytes_user") + + // Required for Enabled + numGetsWithResults = expvar.NewInt(BADGER_METRIC_PREFIX + "get_with_result_num_user") + numIteratorsCreated = expvar.NewInt(BADGER_METRIC_PREFIX + "iterator_num_user") + + // Sizes + lsmSize = expvar.NewMap(BADGER_METRIC_PREFIX + "size_bytes_lsm") + vlogSize = expvar.NewMap(BADGER_METRIC_PREFIX + "size_bytes_vlog") + + pendingWrites = expvar.NewMap(BADGER_METRIC_PREFIX + "write_pending_num_memtable") + numCompactionTables = expvar.NewInt(BADGER_METRIC_PREFIX + "compaction_current_num_lsm") +} + +func NumIteratorsCreatedAdd(enabled bool, val int64) { + addInt(enabled, numIteratorsCreated, val) +} + +func NumGetsWithResultsAdd(enabled bool, val int64) { + addInt(enabled, numGetsWithResults, val) +} + +func NumReadsVlogAdd(enabled bool, val int64) { + addInt(enabled, numReadsVlog, val) +} + +func NumBytesWrittenUserAdd(enabled bool, val int64) { + addInt(enabled, numBytesWrittenUser, val) +} + +func NumWritesVlogAdd(enabled bool, val int64) { + addInt(enabled, numWritesVlog, val) +} + +func NumBytesReadsVlogAdd(enabled bool, val int64) { + addInt(enabled, numBytesReadVlog, val) +} + +func NumBytesReadsLSMAdd(enabled bool, val int64) { + addInt(enabled, numBytesReadLSM, val) +} + +func NumBytesWrittenVlogAdd(enabled bool, val int64) { + addInt(enabled, numBytesVlogWritten, val) +} + +func NumBytesWrittenToL0Add(enabled bool, val int64) { + addInt(enabled, numBytesWrittenToL0, val) +} + +func NumBytesCompactionWrittenAdd(enabled bool, key string, val int64) { + addToMap(enabled, numBytesCompactionWritten, key, val) +} + +func NumGetsAdd(enabled bool, val int64) { + addInt(enabled, numGets, val) +} + +func NumPutsAdd(enabled bool, val int64) { + addInt(enabled, numPuts, val) +} + +func NumMemtableGetsAdd(enabled bool, val int64) { + addInt(enabled, numMemtableGets, val) +} + +func NumCompactionTablesAdd(enabled bool, val int64) { + addInt(enabled, numCompactionTables, val) +} + +func LSMSizeSet(enabled bool, key string, val expvar.Var) { + storeToMap(enabled, lsmSize, key, val) +} + +func VlogSizeSet(enabled bool, key string, val expvar.Var) { + storeToMap(enabled, vlogSize, key, val) +} + +func PendingWritesSet(enabled bool, key string, val expvar.Var) { + storeToMap(enabled, pendingWrites, key, val) +} + +func NumLSMBloomHitsAdd(enabled bool, key string, val int64) { + addToMap(enabled, numLSMBloomHits, key, val) +} + +func NumLSMGetsAdd(enabled bool, key string, val int64) { + addToMap(enabled, numLSMGets, key, val) +} + +func LSMSizeGet(enabled bool, key string) expvar.Var { + return getFromMap(enabled, lsmSize, key) +} + +func VlogSizeGet(enabled bool, key string) expvar.Var { + return getFromMap(enabled, vlogSize, key) +} + +func addInt(enabled bool, metric *expvar.Int, val int64) { + if !enabled { + return + } + + metric.Add(val) +} + +func addToMap(enabled bool, metric *expvar.Map, key string, val int64) { + if !enabled { + return + } + + metric.Add(key, val) +} + +func storeToMap(enabled bool, metric *expvar.Map, key string, val expvar.Var) { + if !enabled { + return + } + + metric.Set(key, val) +} + +func getFromMap(enabled bool, metric *expvar.Map, key string) expvar.Var { + if !enabled { + return nil + } + + return metric.Get(key) +} diff --git a/vendor/github.com/dgraph-io/badger/v3/y/watermark.go b/vendor/github.com/dgraph-io/badger/v4/y/watermark.go similarity index 92% rename from vendor/github.com/dgraph-io/badger/v3/y/watermark.go rename to vendor/github.com/dgraph-io/badger/v4/y/watermark.go index 1d7b9509c6..fe685088be 100644 --- a/vendor/github.com/dgraph-io/badger/v3/y/watermark.go +++ b/vendor/github.com/dgraph-io/badger/v4/y/watermark.go @@ -21,7 +21,7 @@ import ( "context" "sync/atomic" - "github.com/dgraph-io/ristretto/z" + "github.com/dgraph-io/ristretto/v2/z" ) type uint64Heap []uint64 @@ -51,8 +51,8 @@ type mark struct { // WaterMark is used to keep track of the minimum un-finished index. Typically, an index k becomes // finished or "done" according to a WaterMark once Done(k) has been called -// 1. as many times as Begin(k) has, AND -// 2. a positive number of times. +// 1. as many times as Begin(k) has, AND +// 2. a positive number of times. // // An index may also become "done" by calling SetDoneUntil at a time such that it is not // inter-mingled with Begin/Done calls. @@ -60,8 +60,8 @@ type mark struct { // Since doneUntil and lastIndex addresses are passed to sync/atomic packages, we ensure that they // are 64-bit aligned by putting them at the beginning of the structure. type WaterMark struct { - doneUntil uint64 - lastIndex uint64 + doneUntil atomic.Uint64 + lastIndex atomic.Uint64 Name string markCh chan mark } @@ -74,13 +74,13 @@ func (w *WaterMark) Init(closer *z.Closer) { // Begin sets the last index to the given value. func (w *WaterMark) Begin(index uint64) { - atomic.StoreUint64(&w.lastIndex, index) + w.lastIndex.Store(index) w.markCh <- mark{index: index, done: false} } // BeginMany works like Begin but accepts multiple indices. func (w *WaterMark) BeginMany(indices []uint64) { - atomic.StoreUint64(&w.lastIndex, indices[len(indices)-1]) + w.lastIndex.Store(indices[len(indices)-1]) w.markCh <- mark{index: 0, indices: indices, done: false} } @@ -97,18 +97,18 @@ func (w *WaterMark) DoneMany(indices []uint64) { // DoneUntil returns the maximum index that has the property that all indices // less than or equal to it are done. func (w *WaterMark) DoneUntil() uint64 { - return atomic.LoadUint64(&w.doneUntil) + return w.doneUntil.Load() } // SetDoneUntil sets the maximum index that has the property that all indices // less than or equal to it are done. func (w *WaterMark) SetDoneUntil(val uint64) { - atomic.StoreUint64(&w.doneUntil, val) + w.doneUntil.Store(val) } // LastIndex returns the last index for which Begin has been called. func (w *WaterMark) LastIndex() uint64 { - return atomic.LoadUint64(&w.lastIndex) + return w.lastIndex.Load() } // WaitForMark waits until the given index is marked as done. @@ -182,7 +182,7 @@ func (w *WaterMark) process(closer *z.Closer) { } if until != doneUntil { - AssertTrue(atomic.CompareAndSwapUint64(&w.doneUntil, doneUntil, until)) + AssertTrue(w.doneUntil.CompareAndSwap(doneUntil, until)) } notifyAndRemove := func(idx uint64, toNotify []chan struct{}) { @@ -216,7 +216,7 @@ func (w *WaterMark) process(closer *z.Closer) { return case mark := <-w.markCh: if mark.waiter != nil { - doneUntil := atomic.LoadUint64(&w.doneUntil) + doneUntil := w.doneUntil.Load() if doneUntil >= mark.index { close(mark.waiter) } else { @@ -228,7 +228,8 @@ func (w *WaterMark) process(closer *z.Closer) { } } } else { - if mark.index > 0 { + // it is possible that mark.index is zero. We need to handle that case as well. + if mark.index > 0 || (mark.index == 0 && len(mark.indices) == 0) { processOne(mark.index, mark.done) } for _, index := range mark.indices { diff --git a/vendor/github.com/dgraph-io/badger/v3/y/y.go b/vendor/github.com/dgraph-io/badger/v4/y/y.go similarity index 96% rename from vendor/github.com/dgraph-io/badger/v3/y/y.go rename to vendor/github.com/dgraph-io/badger/v4/y/y.go index 92dfffcfd0..8f61eb2818 100644 --- a/vendor/github.com/dgraph-io/badger/v3/y/y.go +++ b/vendor/github.com/dgraph-io/badger/v4/y/y.go @@ -19,6 +19,7 @@ package y import ( "bytes" "encoding/binary" + stderrors "errors" "fmt" "hash/crc32" "io" @@ -30,19 +31,18 @@ import ( "time" "unsafe" - "github.com/dgraph-io/badger/v3/pb" - "github.com/dgraph-io/ristretto/z" - "github.com/pkg/errors" + "github.com/dgraph-io/badger/v4/pb" + "github.com/dgraph-io/ristretto/v2/z" ) var ( // ErrEOF indicates an end of file when trying to read from a memory mapped file // and encountering the end of slice. - ErrEOF = errors.New("ErrEOF: End of file") + ErrEOF = stderrors.New("ErrEOF: End of file") // ErrCommitAfterFinish indicates that write batch commit was called after // finish - ErrCommitAfterFinish = errors.New("Batch commit not permitted after finish") + ErrCommitAfterFinish = stderrors.New("Batch commit not permitted after finish") ) type Flags int @@ -257,6 +257,18 @@ func (t *Throttle) Finish() error { return t.finishErr } +// U16ToBytes converts the given Uint16 to bytes +func U16ToBytes(v uint16) []byte { + var uBuf [2]byte + binary.BigEndian.PutUint16(uBuf[:], v) + return uBuf[:] +} + +// BytesToU16 converts the given byte slice to uint16 +func BytesToU16(b []byte) uint16 { + return binary.BigEndian.Uint16(b) +} + // U32ToBytes converts the given Uint32 to bytes func U32ToBytes(v uint32) []byte { var uBuf [4]byte @@ -495,7 +507,7 @@ func (r *PageBufferReader) Read(p []byte) (int, error) { } } - if read == 0 { + if read == 0 && len(p) > 0 { return read, io.EOF } diff --git a/vendor/github.com/dgraph-io/badger/v3/y/zstd.go b/vendor/github.com/dgraph-io/badger/v4/y/zstd.go similarity index 100% rename from vendor/github.com/dgraph-io/badger/v3/y/zstd.go rename to vendor/github.com/dgraph-io/badger/v4/y/zstd.go diff --git a/vendor/github.com/dgraph-io/ristretto/.deepsource.toml b/vendor/github.com/dgraph-io/ristretto/.deepsource.toml deleted file mode 100644 index 40609eff3f..0000000000 --- a/vendor/github.com/dgraph-io/ristretto/.deepsource.toml +++ /dev/null @@ -1,17 +0,0 @@ -version = 1 - -test_patterns = [ - '**/*_test.go' -] - -exclude_patterns = [ - -] - -[[analyzers]] -name = 'go' -enabled = true - - - [analyzers.meta] - import_path = 'github.com/dgraph-io/ristretto' diff --git a/vendor/github.com/dgraph-io/ristretto/.go-version b/vendor/github.com/dgraph-io/ristretto/.go-version deleted file mode 100644 index b8f1e3fd3e..0000000000 --- a/vendor/github.com/dgraph-io/ristretto/.go-version +++ /dev/null @@ -1 +0,0 @@ -1.17.11 diff --git a/vendor/github.com/dgraph-io/ristretto/.golangci.yml b/vendor/github.com/dgraph-io/ristretto/.golangci.yml deleted file mode 100644 index 7318e9a3b6..0000000000 --- a/vendor/github.com/dgraph-io/ristretto/.golangci.yml +++ /dev/null @@ -1,23 +0,0 @@ -run: - tests: false - skip-dirs: - - contrib - - sim - -linters-settings: - lll: - line-length: 120 - -linters: - disable-all: true - enable: - #- errcheck - #- ineffassign - - gas - #- gofmt - #- golint - #- gosimple - #- govet - - lll - #- varcheck - #- unused diff --git a/vendor/github.com/dgraph-io/ristretto/README.md b/vendor/github.com/dgraph-io/ristretto/README.md deleted file mode 100644 index e71ae3df9e..0000000000 --- a/vendor/github.com/dgraph-io/ristretto/README.md +++ /dev/null @@ -1,220 +0,0 @@ -# Ristretto -[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/dgraph-io/ristretto) -[![ci-ristretto-tests](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-tests.yml/badge.svg)](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-tests.yml) -[![ci-ristretto-lint](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-lint.yml/badge.svg)](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-lint.yml) -[![Coverage Status](https://coveralls.io/repos/github/dgraph-io/ristretto/badge.svg?branch=main)](https://coveralls.io/github/dgraph-io/ristretto?branch=main) -[![Go Report Card](https://img.shields.io/badge/go%20report-A%2B-brightgreen)](https://goreportcard.com/report/github.com/dgraph-io/ristretto) - -Ristretto is a fast, concurrent cache library built with a focus on performance and correctness. - -The motivation to build Ristretto comes from the need for a contention-free -cache in [Dgraph][]. - -[Dgraph]: https://github.com/dgraph-io/dgraph - -## Features - -* **High Hit Ratios** - with our unique admission/eviction policy pairing, Ristretto's performance is best in class. - * **Eviction: SampledLFU** - on par with exact LRU and better performance on Search and Database traces. - * **Admission: TinyLFU** - extra performance with little memory overhead (12 bits per counter). -* **Fast Throughput** - we use a variety of techniques for managing contention and the result is excellent throughput. -* **Cost-Based Eviction** - any large new item deemed valuable can evict multiple smaller items (cost could be anything). -* **Fully Concurrent** - you can use as many goroutines as you want with little throughput degradation. -* **Metrics** - optional performance metrics for throughput, hit ratios, and other stats. -* **Simple API** - just figure out your ideal `Config` values and you're off and running. - -## Status - -Ristretto is production-ready. See [Projects using Ristretto](#projects-using-ristretto). - -## Table of Contents - -* [Usage](#Usage) - * [Example](#Example) - * [Config](#Config) - * [NumCounters](#Config) - * [MaxCost](#Config) - * [BufferItems](#Config) - * [Metrics](#Config) - * [OnEvict](#Config) - * [KeyToHash](#Config) - * [Cost](#Config) -* [Benchmarks](#Benchmarks) - * [Hit Ratios](#Hit-Ratios) - * [Search](#Search) - * [Database](#Database) - * [Looping](#Looping) - * [CODASYL](#CODASYL) - * [Throughput](#Throughput) - * [Mixed](#Mixed) - * [Read](#Read) - * [Write](#Write) -* [Projects using Ristretto](#projects-using-ristretto) -* [FAQ](#FAQ) - -## Usage - -### Example - -```go -func main() { - cache, err := ristretto.NewCache(&ristretto.Config{ - NumCounters: 1e7, // number of keys to track frequency of (10M). - MaxCost: 1 << 30, // maximum cost of cache (1GB). - BufferItems: 64, // number of keys per Get buffer. - }) - if err != nil { - panic(err) - } - - // set a value with a cost of 1 - cache.Set("key", "value", 1) - - // wait for value to pass through buffers - time.Sleep(10 * time.Millisecond) - - value, found := cache.Get("key") - if !found { - panic("missing value") - } - fmt.Println(value) - cache.Del("key") -} -``` - -### Config - -The `Config` struct is passed to `NewCache` when creating Ristretto instances (see the example above). - -**NumCounters** `int64` - -NumCounters is the number of 4-bit access counters to keep for admission and eviction. We've seen good performance in setting this to 10x the number of items you expect to keep in the cache when full. - -For example, if you expect each item to have a cost of 1 and MaxCost is 100, set NumCounters to 1,000. Or, if you use variable cost values but expect the cache to hold around 10,000 items when full, set NumCounters to 100,000. The important thing is the *number of unique items* in the full cache, not necessarily the MaxCost value. - -**MaxCost** `int64` - -MaxCost is how eviction decisions are made. For example, if MaxCost is 100 and a new item with a cost of 1 increases total cache cost to 101, 1 item will be evicted. - -MaxCost can also be used to denote the max size in bytes. For example, if MaxCost is 1,000,000 (1MB) and the cache is full with 1,000 1KB items, a new item (that's accepted) would cause 5 1KB items to be evicted. - -MaxCost could be anything as long as it matches how you're using the cost values when calling Set. - -**BufferItems** `int64` - -BufferItems is the size of the Get buffers. The best value we've found for this is 64. - -If for some reason you see Get performance decreasing with lots of contention (you shouldn't), try increasing this value in increments of 64. This is a fine-tuning mechanism and you probably won't have to touch this. - -**Metrics** `bool` - -Metrics is true when you want real-time logging of a variety of stats. The reason this is a Config flag is because there's a 10% throughput performance overhead. - -**OnEvict** `func(hashes [2]uint64, value interface{}, cost int64)` - -OnEvict is called for every eviction. - -**KeyToHash** `func(key interface{}) [2]uint64` - -KeyToHash is the hashing algorithm used for every key. If this is nil, Ristretto has a variety of [defaults depending on the underlying interface type](https://github.com/dgraph-io/ristretto/blob/master/z/z.go#L19-L41). - -Note that if you want 128bit hashes you should use the full `[2]uint64`, -otherwise just fill the `uint64` at the `0` position and it will behave like -any 64bit hash. - -**Cost** `func(value interface{}) int64` - -Cost is an optional function you can pass to the Config in order to evaluate -item cost at runtime, and only for the Set calls that aren't dropped (this is -useful if calculating item cost is particularly expensive and you don't want to -waste time on items that will be dropped anyways). - -To signal to Ristretto that you'd like to use this Cost function: - -1. Set the Cost field to a non-nil function. -2. When calling Set for new items or item updates, use a `cost` of 0. - -## Benchmarks - -The benchmarks can be found in https://github.com/dgraph-io/benchmarks/tree/master/cachebench/ristretto. - -### Hit Ratios - -#### Search - -This trace is described as "disk read accesses initiated by a large commercial -search engine in response to various web search requests." - -

- -

- -#### Database - -This trace is described as "a database server running at a commercial site -running an ERP application on top of a commercial database." - -

- -

- -#### Looping - -This trace demonstrates a looping access pattern. - -

- -

- -#### CODASYL - -This trace is described as "references to a CODASYL database for a one hour -period." - -

- -

- -### Throughput - -All throughput benchmarks were ran on an Intel Core i7-8700K (3.7GHz) with 16gb -of RAM. - -#### Mixed - -

- -

- -#### Read - -

- -

- -#### Write - -

- -

- -## Projects Using Ristretto - -Below is a list of known projects that use Ristretto: - -- [Badger](https://github.com/dgraph-io/badger) - Embeddable key-value DB in Go -- [Dgraph](https://github.com/dgraph-io/dgraph) - Horizontally scalable and distributed GraphQL database with a graph backend -- [Vitess](https://github.com/vitessio/vitess) - Database clustering system for horizontal scaling of MySQL -- [SpiceDB](https://github.com/authzed/spicedb) - Horizontally scalable permissions database - -## FAQ - -### How are you achieving this performance? What shortcuts are you taking? - -We go into detail in the [Ristretto blog post](https://blog.dgraph.io/post/introducing-ristretto-high-perf-go-cache/), but in short: our throughput performance can be attributed to a mix of batching and eventual consistency. Our hit ratio performance is mostly due to an excellent [admission policy](https://arxiv.org/abs/1512.00727) and SampledLFU eviction policy. - -As for "shortcuts," the only thing Ristretto does that could be construed as one is dropping some Set calls. That means a Set call for a new item (updates are guaranteed) isn't guaranteed to make it into the cache. The new item could be dropped at two points: when passing through the Set buffer or when passing through the admission policy. However, this doesn't affect hit ratios much at all as we expect the most popular items to be Set multiple times and eventually make it in the cache. - -### Is Ristretto distributed? - -No, it's just like any other Go library that you can import into your project and use in a single process. diff --git a/vendor/github.com/dgraph-io/ristretto/v2/.gitignore b/vendor/github.com/dgraph-io/ristretto/v2/.gitignore new file mode 100644 index 0000000000..64bd913847 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/v2/.gitignore @@ -0,0 +1,3 @@ +# IDE +.idea +.vscode \ No newline at end of file diff --git a/vendor/github.com/dgraph-io/ristretto/v2/.golangci.yml b/vendor/github.com/dgraph-io/ristretto/v2/.golangci.yml new file mode 100644 index 0000000000..1af58b1644 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/v2/.golangci.yml @@ -0,0 +1,30 @@ +run: + skip-dirs: + skip-files: + +linters-settings: + lll: + line-length: 120 + staticcheck: + checks: + - all + - '-SA1019' # it is okay to use math/rand at times. + gosec: + excludes: + - G404 # it is okay to use math/rand at times. + - G115 # presents false positives for conversion + +linters: + disable-all: true + enable: + - errcheck + - gofmt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - lll + - staticcheck + - unconvert + - unused diff --git a/vendor/github.com/dgraph-io/ristretto/CHANGELOG.md b/vendor/github.com/dgraph-io/ristretto/v2/CHANGELOG.md similarity index 73% rename from vendor/github.com/dgraph-io/ristretto/CHANGELOG.md rename to vendor/github.com/dgraph-io/ristretto/v2/CHANGELOG.md index 3d18e39ed6..d004b9bb4c 100644 --- a/vendor/github.com/dgraph-io/ristretto/CHANGELOG.md +++ b/vendor/github.com/dgraph-io/ristretto/v2/CHANGELOG.md @@ -4,6 +4,64 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project will adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html) starting v1.0.0. + +## [v2.0.0] - 2024-11-11 + +### Breaking + +- [Support generic API](https://github.com/dgraph-io/ristretto/pull/321) +- [Restrict generic key type to only those supported](https://github.com/dgraph-io/ristretto/pull/371) + +### Added + +- [Fix build with GOOS=js GOARCH=wasm](https://github.com/dgraph-io/ristretto/pull/375) + +### Fixed + +- [Disable mmap size check on arm arch as well as arm64](https://github.com/dgraph-io/ristretto/pull/366) +- [Upgrade xxhash dependency to v2.2.0](https://github.com/dgraph-io/ristretto/pull/367) +- [fix: race in close](https://github.com/dgraph-io/ristretto/pull/384) +- [Fix some memory leaks in TTL implementation](https://github.com/dgraph-io/ristretto/pull/358) +- [stop using rand.Seed()](https://github.com/dgraph-io/ristretto/pull/385) +- [chore(deps): bump the actions group with 4 updates](https://github.com/dgraph-io/ristretto/pull/392) +- [chore(deps): bump the minor group with 3 updates](https://github.com/dgraph-io/ristretto/pull/391) +- [chore(deps): bump golang.org/x/sys from 0.25.0 to 0.26.0 in the minor group](https://github.com/dgraph-io/ristretto/pull/402) +- [Remove the policy interface](https://github.com/dgraph-io/ristretto/pull/393) +- [Perform validation to ensure that the three parameters, NumCounters, …](https://github.com/dgraph-io/ristretto/pull/410) +- [set min version to go 1.21 in go.mod](https://github.com/dgraph-io/ristretto/pull/411) + +**Full Changelog**: https://github.com/dgraph-io/ristretto/compare/v0.2.0...v2.0.0 + + +## [v1.0.0] + +**This release is deprecated** + +## [v1.0.1] + +**This release is deprecated** + +## [v0.2.0] - 2024-10-06 + +### Added + +- [fix: support compilation to wasip1 by @achille-roussel](https://github.com/dgraph-io/ristretto/pull/344) +- [add config for cleanup ticker duration by @singhvikash11](https://github.com/dgraph-io/ristretto/pull/342) + +### Fixed +- [docs(readme): Use new Wait method by @angadn](https://github.com/dgraph-io/ristretto/pull/327) +- [docs: format example on readme by @rfyiamcool](https://github.com/dgraph-io/ristretto/pull/339) +- [Fix flakes in TestDropUpdates by @evanj](https://github.com/dgraph-io/ristretto/pull/334) +- [docs(Cache): document Wait, clarify Get by @evanj](https://github.com/dgraph-io/ristretto/pull/333) +- [chore: fix typo error by @proost](https://github.com/dgraph-io/ristretto/pull/341) +- [remove glog dependency by @jhawk28](https://github.com/dgraph-io/ristretto/pull/350) +- [fix(OnEvict): Set missing Expiration field on evicted items by @0x1ee7](https://github.com/dgraph-io/ristretto/pull/345) +- [uint32 -> uint64 in slice methods by @mocurin](https://github.com/dgraph-io/ristretto/pull/323) +- [fix: cleanupTicker not being stopped by @IlyaFloppy](https://github.com/dgraph-io/ristretto/pull/343) + +**Full Changelog**: https://github.com/dgraph-io/ristretto/compare/v0.1.1...v0.2.0 + + ## [0.1.1] - 2022-10-12 [0.1.1]: https://github.com/dgraph-io/ristretto/compare/v0.1.0..v0.1.1 @@ -21,6 +79,7 @@ incorporates CI steps in our repository. - [fix(z): Address inconsistent mremap return arguments with arm64](https://github.com/dgraph-io/ristretto/pull/309) - [fix(z): runtime error: index out of range for !amd64 env #287](https://github.com/dgraph-io/ristretto/pull/307) + ## [0.1.0] - 2021-06-03 [0.1.0]: https://github.com/dgraph-io/ristretto/compare/v0.0.3..v0.1.0 @@ -145,6 +204,7 @@ improve performance and reduce memory requirements. - Add histogram.Mean() method (#188) - Introduce Calloc: Manual Memory Management via jemalloc (#186) + ## [0.0.3] - 2020-07-06 [0.0.3]: https://github.com/dgraph-io/ristretto/compare/v0.0.2..v0.0.3 @@ -161,6 +221,7 @@ improve performance and reduce memory requirements. - Improve handling of updated items ([#168][]) - Fix droppedSets count while updating the item ([#171][]) + ## [0.0.2] - 2020-02-24 [0.0.2]: https://github.com/dgraph-io/ristretto/compare/v0.0.1..v0.0.2 @@ -172,7 +233,7 @@ improve performance and reduce memory requirements. ### Fixed - Fix the way metrics are handled for deletions. ([#111][]) -- Support nil `*Cache` values in `Clear` and `Close`. ([#119][]) +- Support nil `*Cache` values in `Clear` and `Close`. ([#119][]) - Delete item immediately. ([#113][]) - Remove key from policy after TTL eviction. ([#130][]) @@ -182,6 +243,7 @@ improve performance and reduce memory requirements. [#122]: https://github.com/dgraph-io/ristretto/issues/122 [#130]: https://github.com/dgraph-io/ristretto/issues/130 + ## 0.0.1 First release. Basic cache functionality based on a LFU policy. diff --git a/vendor/github.com/dgraph-io/ristretto/LICENSE b/vendor/github.com/dgraph-io/ristretto/v2/LICENSE similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/LICENSE rename to vendor/github.com/dgraph-io/ristretto/v2/LICENSE diff --git a/vendor/github.com/dgraph-io/ristretto/v2/README.md b/vendor/github.com/dgraph-io/ristretto/v2/README.md new file mode 100644 index 0000000000..9b7b72d7e9 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/v2/README.md @@ -0,0 +1,154 @@ +# Ristretto +[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/dgraph-io/ristretto/v2) +[![ci-ristretto-tests](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-tests.yml/badge.svg)](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-tests.yml) +[![ci-ristretto-lint](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-lint.yml/badge.svg)](https://github.com/dgraph-io/ristretto/actions/workflows/ci-ristretto-lint.yml) +[![Coverage Status](https://coveralls.io/repos/github/dgraph-io/ristretto/badge.svg?branch=main)](https://coveralls.io/github/dgraph-io/ristretto?branch=main) +[![Go Report Card](https://img.shields.io/badge/go%20report-A%2B-brightgreen)](https://goreportcard.com/report/github.com/dgraph-io/ristretto) + +Ristretto is a fast, concurrent cache library built with a focus on performance and correctness. + +The motivation to build Ristretto comes from the need for a contention-free cache in [Dgraph][]. + +[Dgraph]: https://github.com/dgraph-io/dgraph + + +## Features + +* **High Hit Ratios** - with our unique admission/eviction policy pairing, Ristretto's performance is best in class. + * **Eviction: SampledLFU** - on par with exact LRU and better performance on Search and Database traces. + * **Admission: TinyLFU** - extra performance with little memory overhead (12 bits per counter). +* **Fast Throughput** - we use a variety of techniques for managing contention and the result is excellent throughput. +* **Cost-Based Eviction** - any large new item deemed valuable can evict multiple smaller items (cost could be anything). +* **Fully Concurrent** - you can use as many goroutines as you want with little throughput degradation. +* **Metrics** - optional performance metrics for throughput, hit ratios, and other stats. +* **Simple API** - just figure out your ideal `Config` values and you're off and running. + + +## Status + +Ristretto is production-ready. See [Projects using Ristretto](#projects-using-ristretto). + + +## Usage + +```go +package main + +import ( + "fmt" + + "github.com/dgraph-io/ristretto/v2" +) + +func main() { + cache, err := ristretto.NewCache(&ristretto.Config[string, string]{ + NumCounters: 1e7, // number of keys to track frequency of (10M). + MaxCost: 1 << 30, // maximum cost of cache (1GB). + BufferItems: 64, // number of keys per Get buffer. + }) + if err != nil { + panic(err) + } + defer cache.Close() + + // set a value with a cost of 1 + cache.Set("key", "value", 1) + + // wait for value to pass through buffers + cache.Wait() + + // get value from cache + value, found := cache.Get("key") + if !found { + panic("missing value") + } + fmt.Println(value) + + // del value from cache + cache.Del("key") +} +``` + + +## Benchmarks + +The benchmarks can be found in https://github.com/dgraph-io/benchmarks/tree/master/cachebench/ristretto. + +### Hit Ratios for Search + +This trace is described as "disk read accesses initiated by a large commercial +search engine in response to various web search requests." + +

+ +

+ +### Hit Ratio for Database + +This trace is described as "a database server running at a commercial site +running an ERP application on top of a commercial database." + +

+ +

+ +### Hit Ratio for Looping + +This trace demonstrates a looping access pattern. + +

+ +

+ +### Hit Ratio for CODASYL + +This trace is described as "references to a CODASYL database for a one hour period." + +

+ +

+ +### Throughput for Mixed Workload + +

+ +

+ +### Throughput ffor Read Workload + +

+ +

+ +### Through for Write Workload + +

+ +

+ + +## Projects Using Ristretto + +Below is a list of known projects that use Ristretto: + +- [Badger](https://github.com/dgraph-io/badger) - Embeddable key-value DB in Go +- [Dgraph](https://github.com/dgraph-io/dgraph) - Horizontally scalable and distributed GraphQL database with a graph backend + + +## FAQ + +### How are you achieving this performance? What shortcuts are you taking? + +We go into detail in the [Ristretto blog post](https://blog.dgraph.io/post/introducing-ristretto-high-perf-go-cache/), +but in short: our throughput performance can be attributed to a mix of batching and eventual consistency. Our hit ratio +performance is mostly due to an excellent [admission policy](https://arxiv.org/abs/1512.00727) and SampledLFU eviction policy. + +As for "shortcuts," the only thing Ristretto does that could be construed as one is dropping some Set calls. That means +a Set call for a new item (updates are guaranteed) isn't guaranteed to make it into the cache. The new item could be +dropped at two points: when passing through the Set buffer or when passing through the admission policy. However, this +doesn't affect hit ratios much at all as we expect the most popular items to be Set multiple times and eventually make +it in the cache. + +### Is Ristretto distributed? + +No, it's just like any other Go library that you can import into your project and use in a single process. diff --git a/vendor/github.com/dgraph-io/ristretto/cache.go b/vendor/github.com/dgraph-io/ristretto/v2/cache.go similarity index 70% rename from vendor/github.com/dgraph-io/ristretto/cache.go rename to vendor/github.com/dgraph-io/ristretto/v2/cache.go index 7226245bcc..0f6d416335 100644 --- a/vendor/github.com/dgraph-io/ristretto/cache.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/cache.go @@ -28,7 +28,7 @@ import ( "time" "unsafe" - "github.com/dgraph-io/ristretto/z" + "github.com/dgraph-io/ristretto/v2/z" ) var ( @@ -36,40 +36,46 @@ var ( setBufSize = 32 * 1024 ) -type itemCallback func(*Item) +const itemSize = int64(unsafe.Sizeof(storeItem[any]{})) -const itemSize = int64(unsafe.Sizeof(storeItem{})) +func zeroValue[T any]() T { + var zero T + return zero +} + +// Key is the generic type to represent the keys type in key-value pair of the cache. +type Key = z.Key // Cache is a thread-safe implementation of a hashmap with a TinyLFU admission // policy and a Sampled LFU eviction policy. You can use the same Cache instance // from as many goroutines as you want. -type Cache struct { - // store is the central concurrent hashmap where key-value items are stored. - store store - // policy determines what gets let in to the cache and what gets kicked out. - policy policy +type Cache[K Key, V any] struct { + // storedItems is the central concurrent hashmap where key-value items are stored. + storedItems store[V] + // cachePolicy determines what gets let in to the cache and what gets kicked out. + cachePolicy *defaultPolicy[V] // getBuf is a custom ring buffer implementation that gets pushed to when // keys are read. getBuf *ringBuffer // setBuf is a buffer allowing us to batch/drop Sets during times of high // contention. - setBuf chan *Item + setBuf chan *Item[V] // onEvict is called for item evictions. - onEvict itemCallback + onEvict func(*Item[V]) // onReject is called when an item is rejected via admission policy. - onReject itemCallback + onReject func(*Item[V]) // onExit is called whenever a value goes out of scope from the cache. - onExit (func(interface{})) + onExit (func(V)) // KeyToHash function is used to customize the key hashing algorithm. // Each key will be hashed using the provided function. If keyToHash value // is not set, the default keyToHash function is used. - keyToHash func(interface{}) (uint64, uint64) + keyToHash func(K) (uint64, uint64) // stop is used to stop the processItems goroutine. stop chan struct{} // indicates whether cache is closed. - isClosed bool + isClosed atomic.Bool // cost calculates cost from a value. - cost func(value interface{}) int64 + cost func(value V) int64 // ignoreInternalCost dictates whether to ignore the cost of internally storing // the item in the cost calculation. ignoreInternalCost bool @@ -81,7 +87,7 @@ type Cache struct { } // Config is passed to NewCache for creating new Cache instances. -type Config struct { +type Config[K Key, V any] struct { // NumCounters determines the number of counters (keys) to keep that hold // access frequency information. It's generally a good idea to have more // counters than the max cache capacity, as this will improve eviction @@ -93,7 +99,15 @@ type Config struct { // counter for the bloom filter). Note that the number of counters is // internally rounded up to the nearest power of 2, so the space usage // may be a little larger than 3 bytes * NumCounters. + // + // We've seen good performance in setting this to 10x the number of items + // you expect to keep in the cache when full. NumCounters int64 + + // MaxCost is how eviction decisions are made. For example, if MaxCost is + // 100 and a new item with a cost of 1 increases total cache cost to 101, + // 1 item will be evicted. + // // MaxCost can be considered as the cache capacity, in whatever units you // choose to use. // @@ -102,39 +116,70 @@ type Config struct { // the `cost` parameter for calls to Set. If new items are accepted, the // eviction process will take care of making room for the new item and not // overflowing the MaxCost value. + // + // MaxCost could be anything as long as it matches how you're using the cost + // values when calling Set. MaxCost int64 + // BufferItems determines the size of Get buffers. // // Unless you have a rare use case, using `64` as the BufferItems value // results in good performance. + // + // If for some reason you see Get performance decreasing with lots of + // contention (you shouldn't), try increasing this value in increments of 64. + // This is a fine-tuning mechanism and you probably won't have to touch this. BufferItems int64 - // Metrics determines whether cache statistics are kept during the cache's - // lifetime. There *is* some overhead to keeping statistics, so you should - // only set this flag to true when testing or throughput performance isn't a - // major factor. + + // Metrics is true when you want variety of stats about the cache. + // There is some overhead to keeping statistics, so you should only set this + // flag to true when testing or throughput performance isn't a major factor. Metrics bool - // OnEvict is called for every eviction and passes the hashed key, value, - // and cost to the function. - OnEvict func(item *Item) + + // OnEvict is called for every eviction with the evicted item. + OnEvict func(item *Item[V]) + // OnReject is called for every rejection done via the policy. - OnReject func(item *Item) + OnReject func(item *Item[V]) + // OnExit is called whenever a value is removed from cache. This can be // used to do manual memory deallocation. Would also be called on eviction - // and rejection of the value. - OnExit func(val interface{}) + // as well as on rejection of the value. + OnExit func(val V) + // KeyToHash function is used to customize the key hashing algorithm. // Each key will be hashed using the provided function. If keyToHash value // is not set, the default keyToHash function is used. - KeyToHash func(key interface{}) (uint64, uint64) - // Cost evaluates a value and outputs a corresponding cost. This function - // is ran after Set is called for a new item or an item update with a cost - // param of 0. - Cost func(value interface{}) int64 + // + // Ristretto has a variety of defaults depending on the underlying interface type + // https://github.com/dgraph-io/ristretto/blob/master/z/z.go#L19-L41). + // + // Note that if you want 128bit hashes you should use the both the values + // in the return of the function. If you want to use 64bit hashes, you can + // just return the first uint64 and return 0 for the second uint64. + KeyToHash func(key K) (uint64, uint64) + + // Cost evaluates a value and outputs a corresponding cost. This function is ran + // after Set is called for a new item or an item is updated with a cost param of 0. + // + // Cost is an optional function you can pass to the Config in order to evaluate + // item cost at runtime, and only whentthe Set call isn't going to be dropped. This + // is useful if calculating item cost is particularly expensive and you don't want to + // waste time on items that will be dropped anyways. + // + // To signal to Ristretto that you'd like to use this Cost function: + // 1. Set the Cost field to a non-nil function. + // 2. When calling Set for new items or item updates, use a `cost` of 0. + Cost func(value V) int64 + // IgnoreInternalCost set to true indicates to the cache that the cost of // internally storing the value should be ignored. This is useful when the // cost passed to set is not using bytes as units. Keep in mind that setting // this to true will increase the memory usage. IgnoreInternalCost bool + + // TtlTickerDurationInSec sets the value of time ticker for cleanup keys on TTL expiry. + TtlTickerDurationInSec int64 } type itemFlag byte @@ -145,59 +190,68 @@ const ( itemUpdate ) -// Item is passed to setBuf so items can eventually be added to the cache. -type Item struct { +// Item is a full representation of what's stored in the cache for each key-value pair. +type Item[V any] struct { flag itemFlag Key uint64 Conflict uint64 - Value interface{} + Value V Cost int64 Expiration time.Time wg *sync.WaitGroup } // NewCache returns a new Cache instance and any configuration errors, if any. -func NewCache(config *Config) (*Cache, error) { +func NewCache[K Key, V any](config *Config[K, V]) (*Cache[K, V], error) { switch { case config.NumCounters == 0: return nil, errors.New("NumCounters can't be zero") + case config.NumCounters < 0: + return nil, errors.New("NumCounters can't be negative number") case config.MaxCost == 0: return nil, errors.New("MaxCost can't be zero") + case config.MaxCost < 0: + return nil, errors.New("MaxCost can't be be negative number") case config.BufferItems == 0: return nil, errors.New("BufferItems can't be zero") - } - policy := newPolicy(config.NumCounters, config.MaxCost) - cache := &Cache{ - store: newStore(), - policy: policy, + case config.BufferItems < 0: + return nil, errors.New("BufferItems can't be be negative number") + case config.TtlTickerDurationInSec == 0: + config.TtlTickerDurationInSec = bucketDurationSecs + } + policy := newPolicy[V](config.NumCounters, config.MaxCost) + cache := &Cache[K, V]{ + storedItems: newStore[V](), + cachePolicy: policy, getBuf: newRingBuffer(policy, config.BufferItems), - setBuf: make(chan *Item, setBufSize), + setBuf: make(chan *Item[V], setBufSize), keyToHash: config.KeyToHash, stop: make(chan struct{}), cost: config.Cost, ignoreInternalCost: config.IgnoreInternalCost, - cleanupTicker: time.NewTicker(time.Duration(bucketDurationSecs) * time.Second / 2), + cleanupTicker: time.NewTicker(time.Duration(config.TtlTickerDurationInSec) * time.Second / 2), } - cache.onExit = func(val interface{}) { - if config.OnExit != nil && val != nil { + cache.onExit = func(val V) { + if config.OnExit != nil { config.OnExit(val) } } - cache.onEvict = func(item *Item) { + cache.onEvict = func(item *Item[V]) { if config.OnEvict != nil { config.OnEvict(item) } cache.onExit(item.Value) } - cache.onReject = func(item *Item) { + cache.onReject = func(item *Item[V]) { if config.OnReject != nil { config.OnReject(item) } cache.onExit(item.Value) } if cache.keyToHash == nil { - cache.keyToHash = z.KeyToHash + cache.keyToHash = z.KeyToHash[K] } + if config.Metrics { cache.collectMetrics() } @@ -208,26 +262,29 @@ func NewCache(config *Config) (*Cache, error) { return cache, nil } -func (c *Cache) Wait() { - if c == nil || c.isClosed { +// Wait blocks until all buffered writes have been applied. This ensures a call to Set() +// will be visible to future calls to Get(). +func (c *Cache[K, V]) Wait() { + if c == nil || c.isClosed.Load() { return } wg := &sync.WaitGroup{} wg.Add(1) - c.setBuf <- &Item{wg: wg} + c.setBuf <- &Item[V]{wg: wg} wg.Wait() } // Get returns the value (if any) and a boolean representing whether the // value was found or not. The value can be nil and the boolean can be true at -// the same time. -func (c *Cache) Get(key interface{}) (interface{}, bool) { - if c == nil || c.isClosed || key == nil { - return nil, false +// the same time. Get will not return expired items. +func (c *Cache[K, V]) Get(key K) (V, bool) { + if c == nil || c.isClosed.Load() { + return zeroValue[V](), false } keyHash, conflictHash := c.keyToHash(key) + c.getBuf.Push(keyHash) - value, ok := c.store.Get(keyHash, conflictHash) + value, ok := c.storedItems.Get(keyHash, conflictHash) if ok { c.Metrics.add(hit, keyHash, 1) } else { @@ -245,7 +302,13 @@ func (c *Cache) Get(key interface{}) (interface{}, bool) { // To dynamically evaluate the items cost using the Config.Coster function, set // the cost parameter to 0 and Coster will be ran when needed in order to find // the items true cost. -func (c *Cache) Set(key, value interface{}, cost int64) bool { +// +// Set writes the value of type V as is. If type V is a pointer type, It is ok +// to update the memory pointed to by the pointer. Updating the pointer itself +// will not be reflected in the cache. Be careful when using slice types as the +// value type V. Calling `append` may update the underlined array pointer which +// will not be reflected in the cache. +func (c *Cache[K, V]) Set(key K, value V, cost int64) bool { return c.SetWithTTL(key, value, cost, 0*time.Second) } @@ -253,8 +316,10 @@ func (c *Cache) Set(key, value interface{}, cost int64) bool { // after the specified TTL (time to live) has passed. A zero value means the value never // expires, which is identical to calling Set. A negative value is a no-op and the value // is discarded. -func (c *Cache) SetWithTTL(key, value interface{}, cost int64, ttl time.Duration) bool { - if c == nil || c.isClosed || key == nil { +// +// See Set for more information. +func (c *Cache[K, V]) SetWithTTL(key K, value V, cost int64, ttl time.Duration) bool { + if c == nil || c.isClosed.Load() { return false } @@ -264,14 +329,14 @@ func (c *Cache) SetWithTTL(key, value interface{}, cost int64, ttl time.Duration // No expiration. break case ttl < 0: - // Treat this a a no-op. + // Treat this a no-op. return false default: expiration = time.Now().Add(ttl) } keyHash, conflictHash := c.keyToHash(key) - i := &Item{ + i := &Item[V]{ flag: itemNew, Key: keyHash, Conflict: conflictHash, @@ -281,18 +346,18 @@ func (c *Cache) SetWithTTL(key, value interface{}, cost int64, ttl time.Duration } // cost is eventually updated. The expiration must also be immediately updated // to prevent items from being prematurely removed from the map. - if prev, ok := c.store.Update(i); ok { + if prev, ok := c.storedItems.Update(i); ok { c.onExit(prev) i.flag = itemUpdate } - // Attempt to send item to policy. + // Attempt to send item to cachePolicy. select { case c.setBuf <- i: return true default: if i.flag == itemUpdate { // Return true if this was an update operation since we've already - // updated the store. For all the other operations (set/delete), we + // updated the storedItems. For all the other operations (set/delete), we // return false which means the item was not inserted. return true } @@ -302,19 +367,19 @@ func (c *Cache) SetWithTTL(key, value interface{}, cost int64, ttl time.Duration } // Del deletes the key-value item from the cache if it exists. -func (c *Cache) Del(key interface{}) { - if c == nil || c.isClosed || key == nil { +func (c *Cache[K, V]) Del(key K) { + if c == nil || c.isClosed.Load() { return } keyHash, conflictHash := c.keyToHash(key) // Delete immediately. - _, prev := c.store.Del(keyHash, conflictHash) + _, prev := c.storedItems.Del(keyHash, conflictHash) c.onExit(prev) // If we've set an item, it would be applied slightly later. // So we must push the same item to `setBuf` with the deletion flag. // This ensures that if a set is followed by a delete, it will be // applied in the correct order. - c.setBuf <- &Item{ + c.setBuf <- &Item[V]{ flag: itemDelete, Key: keyHash, Conflict: conflictHash, @@ -323,18 +388,18 @@ func (c *Cache) Del(key interface{}) { // GetTTL returns the TTL for the specified key and a bool that is true if the // item was found and is not expired. -func (c *Cache) GetTTL(key interface{}) (time.Duration, bool) { - if c == nil || key == nil { +func (c *Cache[K, V]) GetTTL(key K) (time.Duration, bool) { + if c == nil { return 0, false } keyHash, conflictHash := c.keyToHash(key) - if _, ok := c.store.Get(keyHash, conflictHash); !ok { + if _, ok := c.storedItems.Get(keyHash, conflictHash); !ok { // not found return 0, false } - expiration := c.store.Expiration(keyHash) + expiration := c.storedItems.Expiration(keyHash) if expiration.IsZero() { // found but no expiration return 0, true @@ -349,8 +414,8 @@ func (c *Cache) GetTTL(key interface{}) (time.Duration, bool) { } // Close stops all goroutines and closes all channels. -func (c *Cache) Close() { - if c == nil || c.isClosed { +func (c *Cache[K, V]) Close() { + if c == nil || c.isClosed.Load() { return } c.Clear() @@ -359,15 +424,16 @@ func (c *Cache) Close() { c.stop <- struct{}{} close(c.stop) close(c.setBuf) - c.policy.Close() - c.isClosed = true + c.cachePolicy.Close() + c.cleanupTicker.Stop() + c.isClosed.Store(true) } -// Clear empties the hashmap and zeroes all policy counters. Note that this is +// Clear empties the hashmap and zeroes all cachePolicy counters. Note that this is // not an atomic operation (but that shouldn't be a problem as it's assumed that // Set/Get calls won't be occurring until after this). -func (c *Cache) Clear() { - if c == nil || c.isClosed { +func (c *Cache[K, V]) Clear() { + if c == nil || c.isClosed.Load() { return } // Block until processItems goroutine is returned. @@ -383,7 +449,7 @@ loop: continue } if i.flag != itemUpdate { - // In itemUpdate, the value is already set in the store. So, no need to call + // In itemUpdate, the value is already set in the storedItems. So, no need to call // onEvict here. c.onEvict(i) } @@ -392,9 +458,9 @@ loop: } } - // Clear value hashmap and policy data. - c.policy.Clear() - c.store.Clear(c.onEvict) + // Clear value hashmap and cachePolicy data. + c.cachePolicy.Clear() + c.storedItems.Clear(c.onEvict) // Only reset metrics if they're enabled. if c.Metrics != nil { c.Metrics.Clear() @@ -404,23 +470,23 @@ loop: } // MaxCost returns the max cost of the cache. -func (c *Cache) MaxCost() int64 { +func (c *Cache[K, V]) MaxCost() int64 { if c == nil { return 0 } - return c.policy.MaxCost() + return c.cachePolicy.MaxCost() } // UpdateMaxCost updates the maxCost of an existing cache. -func (c *Cache) UpdateMaxCost(maxCost int64) { +func (c *Cache[K, V]) UpdateMaxCost(maxCost int64) { if c == nil { return } - c.policy.UpdateMaxCost(maxCost) + c.cachePolicy.UpdateMaxCost(maxCost) } // processItems is ran by goroutines processing the Set buffer. -func (c *Cache) processItems() { +func (c *Cache[K, V]) processItems() { startTs := make(map[uint64]time.Time) numToKeep := 100000 // TODO: Make this configurable via options. @@ -438,7 +504,7 @@ func (c *Cache) processItems() { } } } - onEvict := func(i *Item) { + onEvict := func(i *Item[V]) { if ts, has := startTs[i.Key]; has { c.Metrics.trackEviction(int64(time.Since(ts) / time.Second)) delete(startTs, i.Key) @@ -466,29 +532,29 @@ func (c *Cache) processItems() { switch i.flag { case itemNew: - victims, added := c.policy.Add(i.Key, i.Cost) + victims, added := c.cachePolicy.Add(i.Key, i.Cost) if added { - c.store.Set(i) + c.storedItems.Set(i) c.Metrics.add(keyAdd, i.Key, 1) trackAdmission(i.Key) } else { c.onReject(i) } for _, victim := range victims { - victim.Conflict, victim.Value = c.store.Del(victim.Key, 0) + victim.Conflict, victim.Value = c.storedItems.Del(victim.Key, 0) onEvict(victim) } case itemUpdate: - c.policy.Update(i.Key, i.Cost) + c.cachePolicy.Update(i.Key, i.Cost) case itemDelete: - c.policy.Del(i.Key) // Deals with metrics updates. - _, val := c.store.Del(i.Key, i.Conflict) + c.cachePolicy.Del(i.Key) // Deals with metrics updates. + _, val := c.storedItems.Del(i.Key, i.Conflict) c.onExit(val) } case <-c.cleanupTicker.C: - c.store.Cleanup(c.policy, onEvict) + c.storedItems.Cleanup(c.cachePolicy, onEvict) case <-c.stop: return } @@ -497,9 +563,9 @@ func (c *Cache) processItems() { // collectMetrics just creates a new *Metrics instance and adds the pointers // to the cache and policy instances. -func (c *Cache) collectMetrics() { +func (c *Cache[K, V]) collectMetrics() { c.Metrics = newMetrics() - c.policy.CollectMetrics(c.Metrics) + c.cachePolicy.CollectMetrics(c.Metrics) } type metricType int diff --git a/vendor/github.com/dgraph-io/ristretto/policy.go b/vendor/github.com/dgraph-io/ristretto/v2/policy.go similarity index 78% rename from vendor/github.com/dgraph-io/ristretto/policy.go rename to vendor/github.com/dgraph-io/ristretto/v2/policy.go index bf23f91fd9..d4e340ad42 100644 --- a/vendor/github.com/dgraph-io/ristretto/policy.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/policy.go @@ -21,7 +21,7 @@ import ( "sync" "sync/atomic" - "github.com/dgraph-io/ristretto/z" + "github.com/dgraph-io/ristretto/v2/z" ) const ( @@ -30,43 +30,11 @@ const ( lfuSample = 5 ) -// policy is the interface encapsulating eviction/admission behavior. -// -// TODO: remove this interface and just rename defaultPolicy to policy, as we -// are probably only going to use/implement/maintain one policy. -type policy interface { - ringConsumer - // Add attempts to Add the key-cost pair to the Policy. It returns a slice - // of evicted keys and a bool denoting whether or not the key-cost pair - // was added. If it returns true, the key should be stored in cache. - Add(uint64, int64) ([]*Item, bool) - // Has returns true if the key exists in the Policy. - Has(uint64) bool - // Del deletes the key from the Policy. - Del(uint64) - // Cap returns the available capacity. - Cap() int64 - // Close stops all goroutines and closes all channels. - Close() - // Update updates the cost value for the key. - Update(uint64, int64) - // Cost returns the cost value of a key or -1 if missing. - Cost(uint64) int64 - // Optionally, set stats object to track how policy is performing. - CollectMetrics(*Metrics) - // Clear zeroes out all counters and clears hashmaps. - Clear() - // MaxCost returns the current max cost of the cache policy. - MaxCost() int64 - // UpdateMaxCost updates the max cost of the cache policy. - UpdateMaxCost(int64) -} - -func newPolicy(numCounters, maxCost int64) policy { - return newDefaultPolicy(numCounters, maxCost) -} - -type defaultPolicy struct { +func newPolicy[V any](numCounters, maxCost int64) *defaultPolicy[V] { + return newDefaultPolicy[V](numCounters, maxCost) +} + +type defaultPolicy[V any] struct { sync.Mutex admit *tinyLFU evict *sampledLFU @@ -76,8 +44,8 @@ type defaultPolicy struct { metrics *Metrics } -func newDefaultPolicy(numCounters, maxCost int64) *defaultPolicy { - p := &defaultPolicy{ +func newDefaultPolicy[V any](numCounters, maxCost int64) *defaultPolicy[V] { + p := &defaultPolicy[V]{ admit: newTinyLFU(numCounters), evict: newSampledLFU(maxCost), itemsCh: make(chan []uint64, 3), @@ -87,7 +55,7 @@ func newDefaultPolicy(numCounters, maxCost int64) *defaultPolicy { return p } -func (p *defaultPolicy) CollectMetrics(metrics *Metrics) { +func (p *defaultPolicy[V]) CollectMetrics(metrics *Metrics) { p.metrics = metrics p.evict.metrics = metrics } @@ -97,7 +65,7 @@ type policyPair struct { cost int64 } -func (p *defaultPolicy) processItems() { +func (p *defaultPolicy[V]) processItems() { for { select { case items := <-p.itemsCh: @@ -110,7 +78,7 @@ func (p *defaultPolicy) processItems() { } } -func (p *defaultPolicy) Push(keys []uint64) bool { +func (p *defaultPolicy[V]) Push(keys []uint64) bool { if p.isClosed { return false } @@ -132,7 +100,7 @@ func (p *defaultPolicy) Push(keys []uint64) bool { // Add decides whether the item with the given key and cost should be accepted by // the policy. It returns the list of victims that have been evicted and a boolean // indicating whether the incoming item should be accepted. -func (p *defaultPolicy) Add(key uint64, cost int64) ([]*Item, bool) { +func (p *defaultPolicy[V]) Add(key uint64, cost int64) ([]*Item[V], bool) { p.Lock() defer p.Unlock() @@ -166,7 +134,7 @@ func (p *defaultPolicy) Add(key uint64, cost int64) ([]*Item, bool) { // O(lg N). sample := make([]*policyPair, 0, lfuSample) // As items are evicted they will be appended to victims. - victims := make([]*Item, 0) + victims := make([]*Item[V], 0) // Delete victims until there's enough space or a minKey is found that has // more hits than incoming item. @@ -196,7 +164,7 @@ func (p *defaultPolicy) Add(key uint64, cost int64) ([]*Item, bool) { sample[minId] = sample[len(sample)-1] sample = sample[:len(sample)-1] // Store victim in evicted victims slice. - victims = append(victims, &Item{ + victims = append(victims, &Item[V]{ Key: minKey, Conflict: 0, Cost: minCost, @@ -208,33 +176,33 @@ func (p *defaultPolicy) Add(key uint64, cost int64) ([]*Item, bool) { return victims, true } -func (p *defaultPolicy) Has(key uint64) bool { +func (p *defaultPolicy[V]) Has(key uint64) bool { p.Lock() _, exists := p.evict.keyCosts[key] p.Unlock() return exists } -func (p *defaultPolicy) Del(key uint64) { +func (p *defaultPolicy[V]) Del(key uint64) { p.Lock() p.evict.del(key) p.Unlock() } -func (p *defaultPolicy) Cap() int64 { +func (p *defaultPolicy[V]) Cap() int64 { p.Lock() - capacity := int64(p.evict.getMaxCost() - p.evict.used) + capacity := p.evict.getMaxCost() - p.evict.used p.Unlock() return capacity } -func (p *defaultPolicy) Update(key uint64, cost int64) { +func (p *defaultPolicy[V]) Update(key uint64, cost int64) { p.Lock() p.evict.updateIfHas(key, cost) p.Unlock() } -func (p *defaultPolicy) Cost(key uint64) int64 { +func (p *defaultPolicy[V]) Cost(key uint64) int64 { p.Lock() if cost, found := p.evict.keyCosts[key]; found { p.Unlock() @@ -244,14 +212,14 @@ func (p *defaultPolicy) Cost(key uint64) int64 { return -1 } -func (p *defaultPolicy) Clear() { +func (p *defaultPolicy[V]) Clear() { p.Lock() p.admit.clear() p.evict.clear() p.Unlock() } -func (p *defaultPolicy) Close() { +func (p *defaultPolicy[V]) Close() { if p.isClosed { return } @@ -263,14 +231,14 @@ func (p *defaultPolicy) Close() { p.isClosed = true } -func (p *defaultPolicy) MaxCost() int64 { +func (p *defaultPolicy[V]) MaxCost() int64 { if p == nil || p.evict == nil { return 0 } return p.evict.getMaxCost() } -func (p *defaultPolicy) UpdateMaxCost(maxCost int64) { +func (p *defaultPolicy[V]) UpdateMaxCost(maxCost int64) { if p == nil || p.evict == nil { return } @@ -346,7 +314,7 @@ func (p *sampledLFU) updateIfHas(key uint64, cost int64) bool { p.metrics.add(keyUpdate, key, 1) if prev > cost { diff := prev - cost - p.metrics.add(costAdd, key, ^uint64(uint64(diff)-1)) + p.metrics.add(costAdd, key, ^(uint64(diff) - 1)) } else if cost > prev { diff := cost - prev p.metrics.add(costAdd, key, uint64(diff)) diff --git a/vendor/github.com/dgraph-io/ristretto/ring.go b/vendor/github.com/dgraph-io/ristretto/v2/ring.go similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/ring.go rename to vendor/github.com/dgraph-io/ristretto/v2/ring.go diff --git a/vendor/github.com/dgraph-io/ristretto/sketch.go b/vendor/github.com/dgraph-io/ristretto/v2/sketch.go similarity index 86% rename from vendor/github.com/dgraph-io/ristretto/sketch.go rename to vendor/github.com/dgraph-io/ristretto/v2/sketch.go index 6368d2bde0..efda9a4d64 100644 --- a/vendor/github.com/dgraph-io/ristretto/sketch.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/sketch.go @@ -14,14 +14,6 @@ * limitations under the License. */ -// This package includes multiple probabalistic data structures needed for -// admission/eviction metadata. Most are Counting Bloom Filter variations, but -// a caching-specific feature that is also required is a "freshness" mechanism, -// which basically serves as a "lifetime" process. This freshness mechanism -// was described in the original TinyLFU paper [1], but other mechanisms may -// be better suited for certain data distributions. -// -// [1]: https://arxiv.org/abs/1512.00727 package ristretto import ( @@ -103,7 +95,7 @@ func newCmRow(numCounters int64) cmRow { } func (r cmRow) get(n uint64) byte { - return byte(r[n/2]>>((n&1)*4)) & 0x0f + return (r[n/2] >> ((n & 1) * 4)) & 0x0f } func (r cmRow) increment(n uint64) { diff --git a/vendor/github.com/dgraph-io/ristretto/store.go b/vendor/github.com/dgraph-io/ristretto/v2/store.go similarity index 67% rename from vendor/github.com/dgraph-io/ristretto/store.go rename to vendor/github.com/dgraph-io/ristretto/v2/store.go index e42a98b787..dfcfaa8db3 100644 --- a/vendor/github.com/dgraph-io/ristretto/store.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/store.go @@ -22,10 +22,10 @@ import ( ) // TODO: Do we need this to be a separate struct from Item? -type storeItem struct { +type storeItem[V any] struct { key uint64 conflict uint64 - value interface{} + value V expiration time.Time } @@ -35,58 +35,58 @@ type storeItem struct { // in Ristretto. // // Every store is safe for concurrent usage. -type store interface { +type store[V any] interface { // Get returns the value associated with the key parameter. - Get(uint64, uint64) (interface{}, bool) + Get(uint64, uint64) (V, bool) // Expiration returns the expiration time for this key. Expiration(uint64) time.Time // Set adds the key-value pair to the Map or updates the value if it's // already present. The key-value pair is passed as a pointer to an // item object. - Set(*Item) + Set(*Item[V]) // Del deletes the key-value pair from the Map. - Del(uint64, uint64) (uint64, interface{}) + Del(uint64, uint64) (uint64, V) // Update attempts to update the key with a new value and returns true if // successful. - Update(*Item) (interface{}, bool) + Update(*Item[V]) (V, bool) // Cleanup removes items that have an expired TTL. - Cleanup(policy policy, onEvict itemCallback) + Cleanup(policy *defaultPolicy[V], onEvict func(item *Item[V])) // Clear clears all contents of the store. - Clear(onEvict itemCallback) + Clear(onEvict func(item *Item[V])) } // newStore returns the default store implementation. -func newStore() store { - return newShardedMap() +func newStore[V any]() store[V] { + return newShardedMap[V]() } const numShards uint64 = 256 -type shardedMap struct { - shards []*lockedMap - expiryMap *expirationMap +type shardedMap[V any] struct { + shards []*lockedMap[V] + expiryMap *expirationMap[V] } -func newShardedMap() *shardedMap { - sm := &shardedMap{ - shards: make([]*lockedMap, int(numShards)), - expiryMap: newExpirationMap(), +func newShardedMap[V any]() *shardedMap[V] { + sm := &shardedMap[V]{ + shards: make([]*lockedMap[V], int(numShards)), + expiryMap: newExpirationMap[V](), } for i := range sm.shards { - sm.shards[i] = newLockedMap(sm.expiryMap) + sm.shards[i] = newLockedMap[V](sm.expiryMap) } return sm } -func (sm *shardedMap) Get(key, conflict uint64) (interface{}, bool) { +func (sm *shardedMap[V]) Get(key, conflict uint64) (V, bool) { return sm.shards[key%numShards].get(key, conflict) } -func (sm *shardedMap) Expiration(key uint64) time.Time { +func (sm *shardedMap[V]) Expiration(key uint64) time.Time { return sm.shards[key%numShards].Expiration(key) } -func (sm *shardedMap) Set(i *Item) { +func (sm *shardedMap[V]) Set(i *Item[V]) { if i == nil { // If item is nil make this Set a no-op. return @@ -95,62 +95,63 @@ func (sm *shardedMap) Set(i *Item) { sm.shards[i.Key%numShards].Set(i) } -func (sm *shardedMap) Del(key, conflict uint64) (uint64, interface{}) { +func (sm *shardedMap[V]) Del(key, conflict uint64) (uint64, V) { return sm.shards[key%numShards].Del(key, conflict) } -func (sm *shardedMap) Update(newItem *Item) (interface{}, bool) { +func (sm *shardedMap[V]) Update(newItem *Item[V]) (V, bool) { return sm.shards[newItem.Key%numShards].Update(newItem) } -func (sm *shardedMap) Cleanup(policy policy, onEvict itemCallback) { +func (sm *shardedMap[V]) Cleanup(policy *defaultPolicy[V], onEvict func(item *Item[V])) { sm.expiryMap.cleanup(sm, policy, onEvict) } -func (sm *shardedMap) Clear(onEvict itemCallback) { +func (sm *shardedMap[V]) Clear(onEvict func(item *Item[V])) { for i := uint64(0); i < numShards; i++ { sm.shards[i].Clear(onEvict) } + sm.expiryMap.clear() } -type lockedMap struct { +type lockedMap[V any] struct { sync.RWMutex - data map[uint64]storeItem - em *expirationMap + data map[uint64]storeItem[V] + em *expirationMap[V] } -func newLockedMap(em *expirationMap) *lockedMap { - return &lockedMap{ - data: make(map[uint64]storeItem), +func newLockedMap[V any](em *expirationMap[V]) *lockedMap[V] { + return &lockedMap[V]{ + data: make(map[uint64]storeItem[V]), em: em, } } -func (m *lockedMap) get(key, conflict uint64) (interface{}, bool) { +func (m *lockedMap[V]) get(key, conflict uint64) (V, bool) { m.RLock() item, ok := m.data[key] m.RUnlock() if !ok { - return nil, false + return zeroValue[V](), false } if conflict != 0 && (conflict != item.conflict) { - return nil, false + return zeroValue[V](), false } // Handle expired items. if !item.expiration.IsZero() && time.Now().After(item.expiration) { - return nil, false + return zeroValue[V](), false } return item.value, true } -func (m *lockedMap) Expiration(key uint64) time.Time { +func (m *lockedMap[V]) Expiration(key uint64) time.Time { m.RLock() defer m.RUnlock() return m.data[key].expiration } -func (m *lockedMap) Set(i *Item) { +func (m *lockedMap[V]) Set(i *Item[V]) { if i == nil { // If the item is nil make this Set a no-op. return @@ -173,7 +174,7 @@ func (m *lockedMap) Set(i *Item) { m.em.add(i.Key, i.Conflict, i.Expiration) } - m.data[i.Key] = storeItem{ + m.data[i.Key] = storeItem[V]{ key: i.Key, conflict: i.Conflict, value: i.Value, @@ -181,16 +182,16 @@ func (m *lockedMap) Set(i *Item) { } } -func (m *lockedMap) Del(key, conflict uint64) (uint64, interface{}) { +func (m *lockedMap[V]) Del(key, conflict uint64) (uint64, V) { m.Lock() item, ok := m.data[key] if !ok { m.Unlock() - return 0, nil + return 0, zeroValue[V]() } if conflict != 0 && (conflict != item.conflict) { m.Unlock() - return 0, nil + return 0, zeroValue[V]() } if !item.expiration.IsZero() { @@ -202,20 +203,20 @@ func (m *lockedMap) Del(key, conflict uint64) (uint64, interface{}) { return item.conflict, item.value } -func (m *lockedMap) Update(newItem *Item) (interface{}, bool) { +func (m *lockedMap[V]) Update(newItem *Item[V]) (V, bool) { m.Lock() item, ok := m.data[newItem.Key] if !ok { m.Unlock() - return nil, false + return zeroValue[V](), false } if newItem.Conflict != 0 && (newItem.Conflict != item.conflict) { m.Unlock() - return nil, false + return zeroValue[V](), false } m.em.update(newItem.Key, newItem.Conflict, item.expiration, newItem.Expiration) - m.data[newItem.Key] = storeItem{ + m.data[newItem.Key] = storeItem[V]{ key: newItem.Key, conflict: newItem.Conflict, value: newItem.Value, @@ -226,9 +227,9 @@ func (m *lockedMap) Update(newItem *Item) (interface{}, bool) { return item.value, true } -func (m *lockedMap) Clear(onEvict itemCallback) { +func (m *lockedMap[V]) Clear(onEvict func(item *Item[V])) { m.Lock() - i := &Item{} + i := &Item[V]{} if onEvict != nil { for _, si := range m.data { i.Key = si.key @@ -237,6 +238,6 @@ func (m *lockedMap) Clear(onEvict itemCallback) { onEvict(i) } } - m.data = make(map[uint64]storeItem) + m.data = make(map[uint64]storeItem[V]) m.Unlock() } diff --git a/vendor/github.com/dgraph-io/ristretto/ttl.go b/vendor/github.com/dgraph-io/ristretto/v2/ttl.go similarity index 56% rename from vendor/github.com/dgraph-io/ristretto/ttl.go rename to vendor/github.com/dgraph-io/ristretto/v2/ttl.go index 337976ad43..ccb0d7cb51 100644 --- a/vendor/github.com/dgraph-io/ristretto/ttl.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/ttl.go @@ -40,18 +40,20 @@ func cleanupBucket(t time.Time) int64 { type bucket map[uint64]uint64 // expirationMap is a map of bucket number to the corresponding bucket. -type expirationMap struct { +type expirationMap[V any] struct { sync.RWMutex - buckets map[int64]bucket + buckets map[int64]bucket + lastCleanedBucketNum int64 } -func newExpirationMap() *expirationMap { - return &expirationMap{ - buckets: make(map[int64]bucket), +func newExpirationMap[V any]() *expirationMap[V] { + return &expirationMap[V]{ + buckets: make(map[int64]bucket), + lastCleanedBucketNum: cleanupBucket(time.Now()), } } -func (m *expirationMap) add(key, conflict uint64, expiration time.Time) { +func (m *expirationMap[_]) add(key, conflict uint64, expiration time.Time) { if m == nil { return } @@ -73,7 +75,7 @@ func (m *expirationMap) add(key, conflict uint64, expiration time.Time) { b[key] = conflict } -func (m *expirationMap) update(key, conflict uint64, oldExpTime, newExpTime time.Time) { +func (m *expirationMap[_]) update(key, conflict uint64, oldExpTime, newExpTime time.Time) { if m == nil { return } @@ -87,6 +89,11 @@ func (m *expirationMap) update(key, conflict uint64, oldExpTime, newExpTime time delete(oldBucket, key) } + // Items that don't expire don't need to be in the expiration map. + if newExpTime.IsZero() { + return + } + newBucketNum := storageBucket(newExpTime) newBucket, ok := m.buckets[newBucketNum] if !ok { @@ -96,7 +103,7 @@ func (m *expirationMap) update(key, conflict uint64, oldExpTime, newExpTime time newBucket[key] = conflict } -func (m *expirationMap) del(key uint64, expiration time.Time) { +func (m *expirationMap[_]) del(key uint64, expiration time.Time) { if m == nil { return } @@ -114,34 +121,57 @@ func (m *expirationMap) del(key uint64, expiration time.Time) { // cleanup removes all the items in the bucket that was just completed. It deletes // those items from the store, and calls the onEvict function on those items. // This function is meant to be called periodically. -func (m *expirationMap) cleanup(store store, policy policy, onEvict itemCallback) { +func (m *expirationMap[V]) cleanup(store store[V], policy *defaultPolicy[V], onEvict func(item *Item[V])) { if m == nil { return } m.Lock() now := time.Now() - bucketNum := cleanupBucket(now) - keys := m.buckets[bucketNum] - delete(m.buckets, bucketNum) + currentBucketNum := cleanupBucket(now) + // Clean up all buckets up to and including currentBucketNum, starting from + // (but not including) the last one that was cleaned up + var buckets []bucket + for bucketNum := m.lastCleanedBucketNum + 1; bucketNum <= currentBucketNum; bucketNum++ { + buckets = append(buckets, m.buckets[bucketNum]) + delete(m.buckets, bucketNum) + } + m.lastCleanedBucketNum = currentBucketNum m.Unlock() - for key, conflict := range keys { - // Sanity check. Verify that the store agrees that this key is expired. - if store.Expiration(key).After(now) { - continue + for _, keys := range buckets { + for key, conflict := range keys { + expr := store.Expiration(key) + // Sanity check. Verify that the store agrees that this key is expired. + if expr.After(now) { + continue + } + + cost := policy.Cost(key) + policy.Del(key) + _, value := store.Del(key, conflict) + + if onEvict != nil { + onEvict(&Item[V]{Key: key, + Conflict: conflict, + Value: value, + Cost: cost, + Expiration: expr, + }) + } } + } +} - cost := policy.Cost(key) - policy.Del(key) - _, value := store.Del(key, conflict) - - if onEvict != nil { - onEvict(&Item{Key: key, - Conflict: conflict, - Value: value, - Cost: cost, - }) - } +// clear clears the expirationMap, the caller is responsible for properly +// evicting the referenced items +func (m *expirationMap[V]) clear() { + if m == nil { + return } + + m.Lock() + m.buckets = make(map[int64]bucket) + m.lastCleanedBucketNum = cleanupBucket(time.Now()) + m.Unlock() } diff --git a/vendor/github.com/dgraph-io/ristretto/z/LICENSE b/vendor/github.com/dgraph-io/ristretto/v2/z/LICENSE similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/LICENSE rename to vendor/github.com/dgraph-io/ristretto/v2/z/LICENSE diff --git a/vendor/github.com/dgraph-io/ristretto/z/README.md b/vendor/github.com/dgraph-io/ristretto/v2/z/README.md similarity index 91% rename from vendor/github.com/dgraph-io/ristretto/z/README.md rename to vendor/github.com/dgraph-io/ristretto/v2/z/README.md index 6d77e146eb..ad48a3c17c 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/README.md +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/README.md @@ -1,22 +1,22 @@ ## bbloom: a bitset Bloom filter for go/golang === -package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter. +package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter. NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom === -changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache. +changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache. -This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html". -Nonetheless bbloom should work with any other form of entries. +This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html". +Nonetheless bbloom should work with any other form of entries. ~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~ Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash ) -Minimum hashset size is: 512 ([4]uint64; will be set automatically). +Minimum hashset size is: 512 ([4]uint64; will be set automatically). ###install @@ -25,7 +25,7 @@ go get github.com/AndreasBriese/bbloom ``` ###test -+ change to folder ../bbloom ++ change to folder ../bbloom + create wordlist in file "words.txt" (you might use `python permut.py`) + run 'go test -bench=.' within the folder @@ -52,10 +52,10 @@ import ( at your header. In the program use ```go -// create a bloom filter for 65536 items and 1 % wrong-positive ratio +// create a bloom filter for 65536 items and 1 % wrong-positive ratio bf := bbloom.New(float64(1<<16), float64(0.01)) -// or +// or // create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly // bf = bbloom.New(float64(650000), float64(7)) // or @@ -64,7 +64,7 @@ bf = bbloom.New(650000.0, 7.0) // add one item bf.Add([]byte("butter")) -// Number of elements added is exposed now +// Number of elements added is exposed now // Note: ElemNum will not be included in JSON export (for compatability to older version) nOfElementsInFilter := bf.ElemNum @@ -86,7 +86,7 @@ isNotIn = bf.HasTS([]byte("peanutButter")) // should be false added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new -// convert to JSON ([]byte) +// convert to JSON ([]byte) Json := bf.JSONMarshal() // bloomfilters Mutex is exposed for external un-/locking @@ -95,7 +95,7 @@ bf.Mtx.Lock() Json = bf.JSONMarshal() bf.Mtx.Unlock() -// restore a bloom filter from storage +// restore a bloom filter from storage bfNew := bbloom.JSONUnmarshal(Json) isInNew := bfNew.Has([]byte("butter")) // should be true @@ -105,17 +105,17 @@ isNotInNew := bfNew.Has([]byte("Butter")) // should be false to work with the bloom filter. -### why 'fast'? +### why 'fast'? + +It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint: -It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint: - Bloom filter (filter size 524288, 7 hashlocs) github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op) github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op) github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op) github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op) - + github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op) github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op) github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op) @@ -126,4 +126,4 @@ It's about 3 times faster than William Fitzgeralds bitset bloom filter https://g (on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz) -With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions. +With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions. diff --git a/vendor/github.com/dgraph-io/ristretto/z/allocator.go b/vendor/github.com/dgraph-io/ristretto/v2/z/allocator.go similarity index 98% rename from vendor/github.com/dgraph-io/ristretto/z/allocator.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/allocator.go index eae0f83449..dc272f68fc 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/allocator.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/allocator.go @@ -56,9 +56,7 @@ func init() { allocs = make(map[uint64]*Allocator) // Set up a unique Ref per process. - rand.Seed(time.Now().UnixNano()) - allocRef = uint64(rand.Int63n(1<<16)) << 48 //nolint:gosec // cryptographic precision not needed - + allocRef = uint64(rand.Int63n(1<<16)) << 48 calculatedLog2 = make([]int, 1025) for i := 1; i <= 1024; i++ { calculatedLog2[i] = int(math.Log2(float64(i))) diff --git a/vendor/github.com/dgraph-io/ristretto/z/bbloom.go b/vendor/github.com/dgraph-io/ristretto/v2/z/bbloom.go similarity index 97% rename from vendor/github.com/dgraph-io/ristretto/z/bbloom.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/bbloom.go index 37135b012f..3e2137ff6a 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/bbloom.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/bbloom.go @@ -23,10 +23,9 @@ package z import ( "bytes" "encoding/json" + "log" "math" "unsafe" - - "github.com/golang/glog" ) // helper @@ -60,7 +59,7 @@ func NewBloomFilter(params ...float64) (bloomfilter *Bloom) { entries, locs = uint64(params[0]), uint64(params[1]) } } else { - glog.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations))" + + log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations))" + " i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries)," + " float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))") } @@ -205,7 +204,7 @@ func (bl Bloom) JSONMarshal() []byte { } data, err := json.Marshal(bloomImEx) if err != nil { - glog.Fatal("json.Marshal failed: ", err) + log.Fatal("json.Marshal failed: ", err) } return data } diff --git a/vendor/github.com/dgraph-io/ristretto/z/btree.go b/vendor/github.com/dgraph-io/ristretto/v2/z/btree.go similarity index 99% rename from vendor/github.com/dgraph-io/ristretto/z/btree.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/btree.go index 12b735bb03..a8a83816e0 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/btree.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/btree.go @@ -24,12 +24,13 @@ import ( "strings" "unsafe" - "github.com/dgraph-io/ristretto/z/simd" + "github.com/dgraph-io/ristretto/v2/z/simd" ) var ( pageSize = os.Getpagesize() maxKeys = (pageSize / 16) - 1 + //nolint:unused oneThird = int(float64(maxKeys) / 3) ) @@ -480,6 +481,8 @@ func (t *Tree) split(pid uint64) node { // shareWithSiblingXXX is unused for now. The idea is to move some keys to // sibling when a node is full. But, I don't see any special benefits in our // access pattern. It doesn't result in better occupancy ratios. +// +//nolint:unused func (t *Tree) shareWithSiblingXXX(n node, idx int) bool { if idx == 0 { return false diff --git a/vendor/github.com/dgraph-io/ristretto/z/buffer.go b/vendor/github.com/dgraph-io/ristretto/v2/z/buffer.go similarity index 95% rename from vendor/github.com/dgraph-io/ristretto/z/buffer.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/buffer.go index 5a22de8c7f..a662d47cc3 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/buffer.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/buffer.go @@ -19,12 +19,11 @@ package z import ( "encoding/binary" "fmt" - "io/ioutil" + "log" "os" "sort" "sync/atomic" - "github.com/golang/glog" "github.com/pkg/errors" ) @@ -92,7 +91,7 @@ func NewBufferTmp(dir string, capacity int) (*Buffer, error) { if dir == "" { dir = tmpDir } - file, err := ioutil.TempFile(dir, "buffer") + file, err := os.CreateTemp(dir, "buffer") if err != nil { return nil, err } @@ -200,7 +199,7 @@ func (b *Buffer) Grow(n int) { // If autoMmap gets triggered, copy the slice over to an mmaped file. if b.autoMmapAfter > 0 && b.curSz > b.autoMmapAfter { b.bufType = UseMmap - file, err := ioutil.TempFile(b.autoMmapDir, "") + file, err := os.CreateTemp(b.autoMmapDir, "") if err != nil { panic(err) } @@ -254,8 +253,8 @@ func (b *Buffer) AllocateOffset(n int) int { } func (b *Buffer) writeLen(sz int) { - buf := b.Allocate(4) - binary.BigEndian.PutUint32(buf, uint32(sz)) + buf := b.Allocate(8) + binary.BigEndian.PutUint64(buf, uint64(sz)) } // SliceAllocate would encode the size provided into the buffer, followed by a call to Allocate, @@ -263,7 +262,7 @@ func (b *Buffer) writeLen(sz int) { // this big buffer. // Note that SliceAllocate should NOT be mixed with normal calls to Write. func (b *Buffer) SliceAllocate(sz int) []byte { - b.Grow(4 + sz) + b.Grow(8 + sz) b.writeLen(sz) return b.Allocate(sz) } @@ -281,7 +280,9 @@ func (b *Buffer) SliceIterate(f func(slice []byte) error) error { if b.IsEmpty() { return nil } - slice, next := []byte{}, b.StartOffset() + + next := b.StartOffset() + var slice []byte for next >= 0 { slice, next = b.Slice(next) if len(slice) == 0 { @@ -291,6 +292,7 @@ func (b *Buffer) SliceIterate(f func(slice []byte) error) error { return err } } + return nil } @@ -339,19 +341,19 @@ func (s *sortHelper) sortSmall(start, end int) { }) // Now we iterate over the s.small offsets and copy over the slices. The result is now in order. for _, off := range s.small { - s.tmp.Write(rawSlice(s.b.buf[off:])) + _, _ = s.tmp.Write(rawSlice(s.b.buf[off:])) } assert(end-start == copy(s.b.buf[start:end], s.tmp.Bytes())) } func assert(b bool) { if !b { - glog.Fatalf("%+v", errors.Errorf("Assertion failure")) + log.Fatalf("%+v", errors.Errorf("Assertion failure")) } } func check(err error) { if err != nil { - glog.Fatalf("%+v", err) + log.Fatalf("%+v", err) } } func check2(_ interface{}, err error) { @@ -392,7 +394,7 @@ func (s *sortHelper) merge(left, right []byte, start, end int) { rs = rawSlice(right) // We skip the first 4 bytes in the rawSlice, because that stores the length. - if s.less(ls[4:], rs[4:]) { + if s.less(ls[8:], rs[8:]) { copyLeft() } else { copyRight() @@ -454,7 +456,7 @@ func (b *Buffer) SortSliceBetween(start, end int, less LessFunc) { small: make([]int, 0, 1024), tmp: NewBuffer(szTmp, b.tag), } - defer s.tmp.Release() + defer func() { _ = s.tmp.Release() }() left := offsets[0] for _, off := range offsets[1:] { @@ -465,8 +467,8 @@ func (b *Buffer) SortSliceBetween(start, end int, less LessFunc) { } func rawSlice(buf []byte) []byte { - sz := binary.BigEndian.Uint32(buf) - return buf[:4+int(sz)] + sz := binary.BigEndian.Uint64(buf) + return buf[:8+int(sz)] } // Slice would return the slice written at offset. @@ -475,8 +477,8 @@ func (b *Buffer) Slice(offset int) ([]byte, int) { return nil, -1 } - sz := binary.BigEndian.Uint32(b.buf[offset:]) - start := offset + 4 + sz := binary.BigEndian.Uint64(b.buf[offset:]) + start := offset + 8 next := start + int(sz) res := b.buf[start:next] if next >= int(b.offset) { diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc.go b/vendor/github.com/dgraph-io/ristretto/v2/z/calloc.go similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/calloc.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/calloc.go diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_32bit.go b/vendor/github.com/dgraph-io/ristretto/v2/z/calloc_32bit.go similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/calloc_32bit.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/calloc_32bit.go diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go b/vendor/github.com/dgraph-io/ristretto/v2/z/calloc_64bit.go similarity index 81% rename from vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/calloc_64bit.go index b898248bba..6c02cabd94 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc_64bit.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/calloc_64bit.go @@ -2,6 +2,7 @@ // of this source code is governed by a BSD-style license that can be found in // the LICENSE file. +//go:build amd64 || arm64 || arm64be || ppc64 || ppc64le || mips64 || mips64le || riscv64 || s390x || sparc64 // +build amd64 arm64 arm64be ppc64 ppc64le mips64 mips64le riscv64 s390x sparc64 package z diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_jemalloc.go b/vendor/github.com/dgraph-io/ristretto/v2/z/calloc_jemalloc.go similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/calloc_jemalloc.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/calloc_jemalloc.go diff --git a/vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go b/vendor/github.com/dgraph-io/ristretto/v2/z/calloc_nojemalloc.go similarity index 93% rename from vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/calloc_nojemalloc.go index 93ceedf906..20c9ae3596 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/calloc_nojemalloc.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/calloc_nojemalloc.go @@ -2,6 +2,7 @@ // of this source code is governed by a BSD-style license that can be found in // the LICENSE file. +//go:build !jemalloc || !cgo // +build !jemalloc !cgo package z @@ -34,4 +35,4 @@ func StatsPrint() { // ReadMemStats doesn't do anything since all the memory is being managed // by the Go runtime. -func ReadMemStats(_ *MemStats) { return } +func ReadMemStats(_ *MemStats) {} diff --git a/vendor/github.com/dgraph-io/ristretto/z/file.go b/vendor/github.com/dgraph-io/ristretto/v2/z/file.go similarity index 98% rename from vendor/github.com/dgraph-io/ristretto/z/file.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/file.go index 880caf0ad9..c07949a725 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/file.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/file.go @@ -61,7 +61,9 @@ func OpenMmapFileUsing(fd *os.File, sz int, writable bool) (*MmapFile, error) { if fileSize == 0 { dir, _ := filepath.Split(filename) - go SyncDir(dir) + if err := SyncDir(dir); err != nil { + return nil, err + } } return &MmapFile{ Data: buf, diff --git a/vendor/github.com/dgraph-io/ristretto/z/file_default.go b/vendor/github.com/dgraph-io/ristretto/v2/z/file_default.go similarity index 98% rename from vendor/github.com/dgraph-io/ristretto/z/file_default.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/file_default.go index d9c0db43e7..00e7d0870f 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/file_default.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/file_default.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux /* diff --git a/vendor/github.com/dgraph-io/ristretto/z/file_linux.go b/vendor/github.com/dgraph-io/ristretto/v2/z/file_linux.go similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/file_linux.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/file_linux.go diff --git a/vendor/github.com/dgraph-io/ristretto/z/flags.go b/vendor/github.com/dgraph-io/ristretto/v2/z/flags.go similarity index 94% rename from vendor/github.com/dgraph-io/ristretto/z/flags.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/flags.go index a55c474ab2..1e1d2ffc8e 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/flags.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/flags.go @@ -2,6 +2,7 @@ package z import ( "fmt" + "log" "os" "os/user" "path/filepath" @@ -10,7 +11,6 @@ import ( "strings" "time" - "github.com/golang/glog" "github.com/pkg/errors" ) @@ -109,7 +109,7 @@ type SuperFlag struct { func NewSuperFlag(flag string) *SuperFlag { sf, err := newSuperFlagImpl(flag) if err != nil { - glog.Fatal(err) + log.Fatal(err) } return sf } @@ -136,7 +136,7 @@ func (sf *SuperFlag) String() string { func (sf *SuperFlag) MergeAndCheckDefault(flag string) *SuperFlag { sf, err := sf.mergeAndCheckDefaultImpl(flag) if err != nil { - glog.Fatal(err) + log.Fatal(err) } return sf } @@ -162,7 +162,7 @@ func (sf *SuperFlag) mergeAndCheckDefaultImpl(flag string) (*SuperFlag, error) { } } if numKeys != 0 { - return nil, fmt.Errorf("superflag: found invalid options in flag: %s.\nvalid options: %v", sf, flag) + return nil, fmt.Errorf("superflag: found invalid options: %s.\nvalid options: %v", sf, flag) } for k, v := range src { if _, ok := sf.m[k]; !ok { @@ -184,7 +184,7 @@ func (sf *SuperFlag) GetDuration(opt string) time.Duration { } if strings.Contains(val, "d") { val = strings.Replace(val, "d", "", 1) - days, err := strconv.ParseUint(val, 0, 64) + days, err := strconv.ParseInt(val, 0, 64) if err != nil { return time.Duration(0) } @@ -207,7 +207,7 @@ func (sf *SuperFlag) GetBool(opt string) bool { err = errors.Wrapf(err, "Unable to parse %s as bool for key: %s. Options: %s\n", val, opt, sf) - glog.Fatalf("%+v", err) + log.Fatalf("%+v", err) } return b } @@ -222,7 +222,7 @@ func (sf *SuperFlag) GetFloat64(opt string) float64 { err = errors.Wrapf(err, "Unable to parse %s as float64 for key: %s. Options: %s\n", val, opt, sf) - glog.Fatalf("%+v", err) + log.Fatalf("%+v", err) } return f } @@ -237,7 +237,7 @@ func (sf *SuperFlag) GetInt64(opt string) int64 { err = errors.Wrapf(err, "Unable to parse %s as int64 for key: %s. Options: %s\n", val, opt, sf) - glog.Fatalf("%+v", err) + log.Fatalf("%+v", err) } return i } @@ -252,7 +252,7 @@ func (sf *SuperFlag) GetUint64(opt string) uint64 { err = errors.Wrapf(err, "Unable to parse %s as uint64 for key: %s. Options: %s\n", val, opt, sf) - glog.Fatalf("%+v", err) + log.Fatalf("%+v", err) } return u } @@ -267,7 +267,7 @@ func (sf *SuperFlag) GetUint32(opt string) uint32 { err = errors.Wrapf(err, "Unable to parse %s as uint32 for key: %s. Options: %s\n", val, opt, sf) - glog.Fatalf("%+v", err) + log.Fatalf("%+v", err) } return uint32(u) } @@ -283,7 +283,7 @@ func (sf *SuperFlag) GetPath(opt string) string { p := sf.GetString(opt) path, err := expandPath(p) if err != nil { - glog.Fatalf("Failed to get path: %+v", err) + log.Fatalf("Failed to get path: %+v", err) } return path } diff --git a/vendor/github.com/dgraph-io/ristretto/z/histogram.go b/vendor/github.com/dgraph-io/ristretto/v2/z/histogram.go similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/histogram.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/histogram.go diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap.go b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap.go similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/mmap.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/mmap.go diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap_darwin.go b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_darwin.go similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/mmap_darwin.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/mmap_darwin.go diff --git a/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_js.go b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_js.go new file mode 100644 index 0000000000..d15b562960 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_js.go @@ -0,0 +1,40 @@ +//go:build js + +/* + * Copyright 2023 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package z + +import ( + "os" + "syscall" +) + +func mmap(fd *os.File, writeable bool, size int64) ([]byte, error) { + return nil, syscall.ENOSYS +} + +func munmap(b []byte) error { + return syscall.ENOSYS +} + +func madvise(b []byte, readahead bool) error { + return syscall.ENOSYS +} + +func msync(b []byte) error { + return syscall.ENOSYS +} diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap_linux.go b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_linux.go similarity index 98% rename from vendor/github.com/dgraph-io/ristretto/z/mmap_linux.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/mmap_linux.go index 331330cff9..1d68f8fe82 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/mmap_linux.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_linux.go @@ -1,3 +1,6 @@ +//go:build !js +// +build !js + /* * Copyright 2020 Dgraph Labs, Inc. and Contributors * diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap_plan9.go b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_plan9.go similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/mmap_plan9.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/mmap_plan9.go diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap_unix.go b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_unix.go similarity index 92% rename from vendor/github.com/dgraph-io/ristretto/z/mmap_unix.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/mmap_unix.go index e8b2699cf9..13f2bb0c60 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/mmap_unix.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_unix.go @@ -1,4 +1,5 @@ -// +build !windows,!darwin,!plan9,!linux +//go:build !windows && !darwin && !plan9 && !linux && !wasip1 && !js +// +build !windows,!darwin,!plan9,!linux,!wasip1,!js /* * Copyright 2019 Dgraph Labs, Inc. and Contributors diff --git a/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_wasip1.go b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_wasip1.go new file mode 100644 index 0000000000..94f7148451 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_wasip1.go @@ -0,0 +1,40 @@ +//go:build wasip1 + +/* + * Copyright 2023 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package z + +import ( + "os" + "syscall" +) + +func mmap(fd *os.File, writeable bool, size int64) ([]byte, error) { + return nil, syscall.ENOSYS +} + +func munmap(b []byte) error { + return syscall.ENOSYS +} + +func madvise(b []byte, readahead bool) error { + return syscall.ENOSYS +} + +func msync(b []byte) error { + return syscall.ENOSYS +} diff --git a/vendor/github.com/dgraph-io/ristretto/z/mmap_windows.go b/vendor/github.com/dgraph-io/ristretto/v2/z/mmap_windows.go similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/mmap_windows.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/mmap_windows.go diff --git a/vendor/github.com/dgraph-io/ristretto/z/mremap_linux_arm64.go b/vendor/github.com/dgraph-io/ristretto/v2/z/mremap_nosize.go similarity index 94% rename from vendor/github.com/dgraph-io/ristretto/z/mremap_linux_arm64.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/mremap_nosize.go index 09683cdfeb..c052bebba8 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/mremap_linux_arm64.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/mremap_nosize.go @@ -1,3 +1,8 @@ +//go:build (arm64 || arm) && linux && !js +// +build arm64 arm +// +build linux +// +build !js + /* * Copyright 2020 Dgraph Labs, Inc. and Contributors * diff --git a/vendor/github.com/dgraph-io/ristretto/z/mremap_linux.go b/vendor/github.com/dgraph-io/ristretto/v2/z/mremap_size.go similarity index 95% rename from vendor/github.com/dgraph-io/ristretto/z/mremap_linux.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/mremap_size.go index 225678658d..958587d73f 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/mremap_linux.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/mremap_size.go @@ -1,4 +1,5 @@ -// +build !arm64 +//go:build linux && !arm64 && !arm && !js +// +build linux,!arm64,!arm,!js /* * Copyright 2020 Dgraph Labs, Inc. and Contributors diff --git a/vendor/github.com/dgraph-io/ristretto/z/rtutil.go b/vendor/github.com/dgraph-io/ristretto/v2/z/rtutil.go similarity index 99% rename from vendor/github.com/dgraph-io/ristretto/z/rtutil.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/rtutil.go index 8f317c80d3..89e4176451 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/rtutil.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/rtutil.go @@ -27,10 +27,12 @@ import ( ) // NanoTime returns the current time in nanoseconds from a monotonic clock. +// //go:linkname NanoTime runtime.nanotime func NanoTime() int64 // CPUTicks is a faster alternative to NanoTime to measure time duration. +// //go:linkname CPUTicks runtime.cputicks func CPUTicks() int64 @@ -60,6 +62,7 @@ func MemHashString(str string) uint64 { } // FastRand is a fast thread local random function. +// //go:linkname FastRand runtime.fastrand func FastRand() uint32 diff --git a/vendor/github.com/dgraph-io/ristretto/z/rtutil.s b/vendor/github.com/dgraph-io/ristretto/v2/z/rtutil.s similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/rtutil.s rename to vendor/github.com/dgraph-io/ristretto/v2/z/rtutil.s diff --git a/vendor/github.com/dgraph-io/ristretto/z/simd/baseline.go b/vendor/github.com/dgraph-io/ristretto/v2/z/simd/baseline.go similarity index 97% rename from vendor/github.com/dgraph-io/ristretto/z/simd/baseline.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/simd/baseline.go index 967e3a307e..e98f628603 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/simd/baseline.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/simd/baseline.go @@ -98,6 +98,7 @@ func Binary(keys []uint64, key uint64) int16 { })) } +//nolint:unused func cmp2_native(twos, pk [2]uint64) int16 { if twos[0] == pk[0] { return 0 @@ -108,6 +109,7 @@ func cmp2_native(twos, pk [2]uint64) int16 { return 2 } +//nolint:unused func cmp4_native(fours, pk [4]uint64) int16 { for i := range fours { if fours[i] >= pk[i] { @@ -117,6 +119,7 @@ func cmp4_native(fours, pk [4]uint64) int16 { return 4 } +//nolint:unused func cmp8_native(a [8]uint64, pk [4]uint64) int16 { for i := range a { if a[i] >= pk[0] { diff --git a/vendor/github.com/dgraph-io/ristretto/z/simd/search.go b/vendor/github.com/dgraph-io/ristretto/v2/z/simd/search.go similarity index 95% rename from vendor/github.com/dgraph-io/ristretto/z/simd/search.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/simd/search.go index b1e639225a..f27206163c 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/simd/search.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/simd/search.go @@ -1,3 +1,4 @@ +//go:build !amd64 // +build !amd64 /* @@ -20,7 +21,7 @@ package simd // Search uses the Clever search to find the correct key. func Search(xs []uint64, k uint64) int16 { - if len(xs) < 8 || (len(xs) % 8 != 0) { + if len(xs) < 8 || (len(xs)%8 != 0) { return Naive(xs, k) } var twos, pk [4]uint64 diff --git a/vendor/github.com/dgraph-io/ristretto/z/simd/search_amd64.s b/vendor/github.com/dgraph-io/ristretto/v2/z/simd/search_amd64.s similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/simd/search_amd64.s rename to vendor/github.com/dgraph-io/ristretto/v2/z/simd/search_amd64.s diff --git a/vendor/github.com/dgraph-io/ristretto/z/simd/stub_search_amd64.go b/vendor/github.com/dgraph-io/ristretto/v2/z/simd/stub_search_amd64.go similarity index 100% rename from vendor/github.com/dgraph-io/ristretto/z/simd/stub_search_amd64.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/simd/stub_search_amd64.go diff --git a/vendor/github.com/dgraph-io/ristretto/z/z.go b/vendor/github.com/dgraph-io/ristretto/v2/z/z.go similarity index 85% rename from vendor/github.com/dgraph-io/ristretto/z/z.go rename to vendor/github.com/dgraph-io/ristretto/v2/z/z.go index 97455586a1..ac424583c5 100644 --- a/vendor/github.com/dgraph-io/ristretto/z/z.go +++ b/vendor/github.com/dgraph-io/ristretto/v2/z/z.go @@ -23,19 +23,20 @@ import ( "github.com/cespare/xxhash/v2" ) -// TODO: Figure out a way to re-use memhash for the second uint64 hash, we -// already know that appending bytes isn't reliable for generating a -// second hash (see Ristretto PR #88). -// -// We also know that while the Go runtime has a runtime memhash128 -// function, it's not possible to use it to generate [2]uint64 or -// anything resembling a 128bit hash, even though that's exactly what -// we need in this situation. -func KeyToHash(key interface{}) (uint64, uint64) { - if key == nil { - return 0, 0 - } - switch k := key.(type) { +type Key interface { + uint64 | string | []byte | byte | int | int32 | uint32 | int64 +} + +// TODO: Figure out a way to re-use memhash for the second uint64 hash, +// we already know that appending bytes isn't reliable for generating a +// second hash (see Ristretto PR #88). +// We also know that while the Go runtime has a runtime memhash128 +// function, it's not possible to use it to generate [2]uint64 or +// anything resembling a 128bit hash, even though that's exactly what +// we need in this situation. +func KeyToHash[K Key](key K) (uint64, uint64) { + keyAsAny := any(key) + switch k := keyAsAny.(type) { case uint64: return k, 0 case string: diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml index ba95cdd15c..ac12e485a1 100644 --- a/vendor/github.com/dustin/go-humanize/.travis.yml +++ b/vendor/github.com/dustin/go-humanize/.travis.yml @@ -1,12 +1,12 @@ sudo: false language: go +go_import_path: github.com/dustin/go-humanize go: - - 1.3.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x + - 1.13.x + - 1.14.x + - 1.15.x + - 1.16.x + - stable - master matrix: allow_failures: @@ -15,7 +15,7 @@ matrix: install: - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). script: - - go get -t -v ./... - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . + - go vet . + - go install -v -race ./... - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown index 91b4ae5646..7d0b16b34f 100644 --- a/vendor/github.com/dustin/go-humanize/README.markdown +++ b/vendor/github.com/dustin/go-humanize/README.markdown @@ -5,7 +5,7 @@ Just a few functions for helping humanize times and sizes. `go get` it as `github.com/dustin/go-humanize`, import it as `"github.com/dustin/go-humanize"`, use it as `humanize`. -See [godoc](https://godoc.org/github.com/dustin/go-humanize) for +See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for complete documentation. ## Sizes diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go index 1a2bf61723..3b015fd59e 100644 --- a/vendor/github.com/dustin/go-humanize/bigbytes.go +++ b/vendor/github.com/dustin/go-humanize/bigbytes.go @@ -28,6 +28,10 @@ var ( BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) // BigYiByte is 1,024 z bytes in bit.Ints BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) + // BigRiByte is 1,024 y bytes in bit.Ints + BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp) + // BigQiByte is 1,024 r bytes in bit.Ints + BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp) ) var ( @@ -51,6 +55,10 @@ var ( BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) // BigYByte is 1,000 SI z bytes in big.Ints BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) + // BigRByte is 1,000 SI y bytes in big.Ints + BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp) + // BigQByte is 1,000 SI r bytes in big.Ints + BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp) ) var bigBytesSizeTable = map[string]*big.Int{ @@ -71,6 +79,10 @@ var bigBytesSizeTable = map[string]*big.Int{ "zb": BigZByte, "yib": BigYiByte, "yb": BigYByte, + "rib": BigRiByte, + "rb": BigRByte, + "qib": BigQiByte, + "qb": BigQByte, // Without suffix "": BigByte, "ki": BigKiByte, @@ -89,6 +101,10 @@ var bigBytesSizeTable = map[string]*big.Int{ "zi": BigZiByte, "y": BigYByte, "yi": BigYiByte, + "r": BigRByte, + "ri": BigRiByte, + "q": BigQByte, + "qi": BigQiByte, } var ten = big.NewInt(10) @@ -115,7 +131,7 @@ func humanateBigBytes(s, base *big.Int, sizes []string) string { // // BigBytes(82854982) -> 83 MB func BigBytes(s *big.Int) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} + sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"} return humanateBigBytes(s, bigSIExp, sizes) } @@ -125,7 +141,7 @@ func BigBytes(s *big.Int) string { // // BigIBytes(82854982) -> 79 MiB func BigIBytes(s *big.Int) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"} return humanateBigBytes(s, bigIECExp, sizes) } diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go index 620690dec7..2bc83a03cf 100644 --- a/vendor/github.com/dustin/go-humanize/commaf.go +++ b/vendor/github.com/dustin/go-humanize/commaf.go @@ -1,3 +1,4 @@ +//go:build go1.6 // +build go1.6 package humanize diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go index 1c62b640d4..bce923f371 100644 --- a/vendor/github.com/dustin/go-humanize/ftoa.go +++ b/vendor/github.com/dustin/go-humanize/ftoa.go @@ -6,6 +6,9 @@ import ( ) func stripTrailingZeros(s string) string { + if !strings.ContainsRune(s, '.') { + return s + } offset := len(s) - 1 for offset > 0 { if s[offset] == '.' { diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go index dec6186599..6470d0d47a 100644 --- a/vendor/github.com/dustin/go-humanize/number.go +++ b/vendor/github.com/dustin/go-humanize/number.go @@ -73,7 +73,7 @@ func FormatFloat(format string, n float64) string { if n > math.MaxFloat64 { return "Infinity" } - if n < -math.MaxFloat64 { + if n < (0.0 - math.MaxFloat64) { return "-Infinity" } diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go index ae659e0e49..8b85019849 100644 --- a/vendor/github.com/dustin/go-humanize/si.go +++ b/vendor/github.com/dustin/go-humanize/si.go @@ -8,6 +8,8 @@ import ( ) var siPrefixTable = map[float64]string{ + -30: "q", // quecto + -27: "r", // ronto -24: "y", // yocto -21: "z", // zepto -18: "a", // atto @@ -25,6 +27,8 @@ var siPrefixTable = map[float64]string{ 18: "E", // exa 21: "Z", // zetta 24: "Y", // yotta + 27: "R", // ronna + 30: "Q", // quetta } var revSIPrefixTable = revfmap(siPrefixTable) diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7a29..0000000000 --- a/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c208a..0000000000 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 00d65f3277..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C test_proto - make -C proto3_proto - make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index a26b046d94..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,258 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go deleted file mode 100644 index 24552483c6..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go +++ /dev/null @@ -1,39 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "reflect" - -type custom interface { - Marshal() ([]byte, error) - Unmarshal(data []byte) error - Size() int -} - -var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08bef..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c09a..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go deleted file mode 100644 index fe1bd7d904..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - default: // E.g., *pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c91cf..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index e748e1730e..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 9581ccd304..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,205 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - sizVar := SizeVarint(uint64(siz)) - p.grow(siz + sizVar) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 0f5fb173e9..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,33 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1c14..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 341c6f57f5..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,605 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - case extensionsBytes: - return slowExtensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if ea, ok := pbi.(slowExtensionAdapter); ok { - pbi = ea.extensionsBytes - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, ok := pb.(extensionsBytes); ok { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - return decodeExtensionFromBytes(extension, *ext) - } - - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if cerr := checkExtensionTypes(epb, extension); cerr != nil { - return nil, cerr - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, ok := pb.(extensionsBytes); ok { - ClearExtension(pb, extension) - newb, err := encodeExtension(extension, value) - if err != nil { - return err - } - bb := epb.GetExtensions() - *bb = append(*bb, newb...) - return nil - } - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index 6f1ae120ec..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,389 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" -) - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -type slowExtensionAdapter struct { - extensionsBytes -} - -func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { - panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") -} - -func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - b := s.GetExtensions() - m, err := BytesToExtensionsMap(*b) - if err != nil { - panic(err) - } - return m, notLocker{} -} - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - if err := this.Encode(); err != nil { - return false - } - if err := that.Encode(); err != nil { - return false - } - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - if err := this.Encode(); err != nil { - return 1 - } - if err := that.Encode(); err != nil { - return -1 - } - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - info := getMarshalInfo(reflect.TypeOf(m)) - return info.sizeV1Extensions(m.extensionsWrite()) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMapBackwards(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[o:], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - o += n - } - return o, nil -} - -func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - end := len(data) - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[end-len(e.enc):], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - end -= n - o += n - } - return o, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - e := m[id] - if err := e.Encode(); err != nil { - return nil, err - } - return e.enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { - u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) - ei := u.getExtElemInfo(extension) - v := value - p := toAddrPointer(&v, ei.isptr) - siz := ei.sizer(p, SizeVarint(ei.wiretag)) - buf := make([]byte, 0, siz) - return ei.marshaler(buf, p, ei.wiretag, false) -} - -func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { - o := 0 - for o < len(buf) { - tag, n := DecodeVarint((buf)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - if o+n > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - l, err := size((buf)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - if o+n+l > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - v, err := decodeExtension((buf)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) -} - -func (this *Extension) Encode() error { - if this.enc == nil { - var err error - this.enc, err = encodeExtension(this.desc, this.value) - if err != nil { - return err - } - } - return nil -} - -func (this Extension) GoString() string { - if err := this.Encode(); err != nil { - return fmt.Sprintf("error encoding extension: %v", err) - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index 80db1c155b..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,973 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or T or []*T or []T - switch f.Kind() { - case reflect.Struct: - setDefaults(f, recur, zeros) - - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.Kind() == reflect.Ptr && e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Struct: - nestedMessage = true // non-nullable - - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr, reflect.Struct: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index b3aa39190a..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,50 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -type Sizer interface { - Size() int -} - -type ProtoSizer interface { - ProtoSize() int -} - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index f48a756761..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad90834..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 7ffd3c29d9..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,59 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" -) - -// TODO: untested, so probably incorrect. - -func (p pointer) getRef() pointer { - return pointer{v: p.v.Addr()} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335d94..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index aca8eed02a..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,56 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func (p pointer) getRef() pointer { - return pointer{p: (unsafe.Pointer)(&p.p)} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index 28da1475fb..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,610 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - CastType string - StdTime bool - StdDuration bool - WktPointer bool - - stype reflect.Type // set for struct types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "casttype="): - p.CastType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - case f == "wktptr": - p.WktPointer = true - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.ctype = typ - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setTag(lockGetProp) - return - } - if p.WktPointer && !isMap { - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - case reflect.Struct: - p.stype = typ - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - case reflect.Struct: - p.stype = t3 - } - case reflect.Struct: - p.stype = t2 - } - - case reflect.Map: - - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.MapValProp.CustomType = p.CustomType - p.MapValProp.StdDuration = p.StdDuration - p.MapValProp.StdTime = p.StdTime - p.MapValProp.WktPointer = p.WktPointer - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - if isOneofMessage { - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index 40ea3dd935..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,36 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() -var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93f7c..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go deleted file mode 100644 index f8babdefab..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,3009 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements - - hassizer bool // has custom sizer - hasprotosizer bool // has custom protosizer - - bytesExtensions field // offset of XXX_extensions where the field type is []byte -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex - - uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - // Uses the message's Size method if available - if u.hassizer { - s := ptr.asPointerTo(u.typ).Interface().(Sizer) - return s.Size() - } - // Uses the message's ProtoSize method if available - if u.hasprotosizer { - s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) - return s.ProtoSize() - } - - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - n += len(s) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - b = append(b, s...) - } - for _, f := range u.fields { - if f.required { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.bytesExtensions = invalidField - u.sizecache = invalidField - isOneofMessage := false - - if reflect.PtrTo(t).Implements(sizerType) { - u.hassizer = true - } - if reflect.PtrTo(t).Implements(protosizerType) { - u.hasprotosizer = true - } - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Tag.Get("protobuf_oneof") != "" { - isOneofMessage = true - } - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - if f.Type.Kind() == reflect.Map { - u.v1extensions = toField(&f) - } else { - u.bytesExtensions = toField(&f) - } - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // get oneof implementers - var oneofImplementers []interface{} - // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler - if isOneofMessage { - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - ctype := false - isTime := false - isDuration := false - isWktPointer := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - if strings.HasPrefix(tags[i], "customtype=") { - ctype = true - } - if tags[i] == "stdtime" { - isTime = true - } - if tags[i] == "stdduration" { - isDuration = true - } - if tags[i] == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - if !proto3 && !pointer && !slice { - nozero = false - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - if pointer { - return makeCustomPtrMarshaler(getMarshalInfo(t)) - } - return makeCustomMarshaler(getMarshalInfo(t)) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeTimePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeTimePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeTimeSliceMarshaler(getMarshalInfo(t)) - } - return makeTimeMarshaler(getMarshalInfo(t)) - } - - if isDuration { - if pointer { - if slice { - return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationPtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeDurationSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationMarshaler(getMarshalInfo(t)) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValueMarshaler(getMarshalInfo(t)) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValueMarshaler(getMarshalInfo(t)) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValueMarshaler(getMarshalInfo(t)) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValueMarshaler(getMarshalInfo(t)) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValueMarshaler(getMarshalInfo(t)) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if pointer { - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } else { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageRefMarshaler(getMarshalInfo(t)) - } - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - tags := strings.Split(f.Tag.Get("protobuf"), ",") - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - stdOptions := false - for _, t := range tags { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "stdduration" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if p.deterministic { - if _, ok := pb.(Marshaler); ok { - return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) - } - } - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] - pp, err = m.XXX_Marshal(pp, p.deterministic) - p.buf = append(p.buf, pp...) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - var b []byte - b, err = m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go deleted file mode 100644 index 997f57c1e1..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go +++ /dev/null @@ -1,388 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -// makeMessageRefMarshaler differs a bit from makeMessageMarshaler -// It marshal a message T instead of a *T -func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - siz := u.size(ptr) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - b = appendVarint(b, wiretag) - siz := u.cachedsize(ptr) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, ptr, deterministic) - } -} - -// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler -// It marshals a slice of messages []T instead of []*T -func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - var err, errreq error - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - b = appendVarint(b, wiretag) - siz := u.size(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - - return b, errreq - } -} - -func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go deleted file mode 100644 index 60dcf70d1e..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_merge.go +++ /dev/null @@ -1,676 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case isSlice && !isPointer: // E.g. []pb.T - mergeInfo := getMergeInfo(tf) - zero := reflect.Zero(tf) - mfi.merge = func(dst, src pointer) { - // TODO: Make this faster? - dstsp := dst.asPointerTo(f.Type) - dsts := dstsp.Elem() - srcs := src.asPointerTo(f.Type).Elem() - for i := 0; i < srcs.Len(); i++ { - dsts = reflect.Append(dsts, zero) - srcElement := srcs.Index(i).Addr() - dstElement := dsts.Index(dsts.Len() - 1).Addr() - mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) - } - if dsts.IsNil() { - dsts = reflect.MakeSlice(f.Type, 0, 0) - } - dstsp.Elem().Set(dsts) - } - case !isPointer: - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - mergeInfo.merge(dst, src) - } - case isSlice: // E.g., []*pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mergeInfo.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mergeInfo.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go deleted file mode 100644 index 937229386a..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2249 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - if u.bytesExtensions.IsValid() { - z = m.offset(u.bytesExtensions).toBytes() - break - } - panic("no extensions field available") - } - } - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - u.bytesExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { - u.oldExtensions = toField(&f) - continue - } else if f.Type == reflect.TypeOf(([]byte)(nil)) { - u.bytesExtensions = toField(&f) - continue - } - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler - if len(oneofFields) > 0 { - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - ctype := false - isTime := false - isDuration := false - isWktPointer := false - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - if strings.HasPrefix(tag, "customtype=") { - ctype = true - } - if tag == "stdtime" { - isTime = true - } - if tag == "stdduration" { - isDuration = true - } - if tag == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) - } - if pointer { - return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalCustom(getUnmarshalInfo(t), name) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTime(getUnmarshalInfo(t), name) - } - - if isDuration { - if pointer { - if slice { - return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDuration(getUnmarshalInfo(t), name) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessage(getUnmarshalInfo(t), name) - } - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - tagArray := strings.Split(f.Tag.Get("protobuf"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - for _, t := range tagArray { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - } - if t == "stdduration" { - valTags = append(valTags, t) - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go deleted file mode 100644 index 00d6c7ad93..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go +++ /dev/null @@ -1,385 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f // gogo: changed from v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.New(sub.typ)) - m := s.Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := reflect.New(sub.typ) - c := m.Interface().(custom) - if err := c.Unmarshal(b[:x]); err != nil { - return nil, err - } - v := valToPointer(m) - f.appendRef(v, sub.typ) - return b[x:], nil - } -} - -func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - m := f.asPointerTo(sub.typ).Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&d)) - return b[x:], nil - } -} - -func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(d)) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&d)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(d)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index 87416afe95..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,930 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if len(props.CastType) > 0 { - if _, ok := v.Interface().(interface { - String() string - }); ok { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - _, err := fmt.Fprintf(w, "%d", v.Interface()) - return err - } - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if v.Type().Implements(textMarshalerType) { - text, err := v.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa0e4..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index f85c0cc81a..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(rune(i)), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int8: - if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int16: - if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint8: - if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint16: - if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f6542b..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index 38439fa990..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go deleted file mode 100644 index b175d1b642..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers.go +++ /dev/null @@ -1,1888 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go deleted file mode 100644 index c1cf7bf85e..0000000000 --- a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go +++ /dev/null @@ -1,113 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -type float64Value struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float64Value) Reset() { *m = float64Value{} } -func (*float64Value) ProtoMessage() {} -func (*float64Value) String() string { return "float64" } - -type float32Value struct { - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float32Value) Reset() { *m = float32Value{} } -func (*float32Value) ProtoMessage() {} -func (*float32Value) String() string { return "float32" } - -type int64Value struct { - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int64Value) Reset() { *m = int64Value{} } -func (*int64Value) ProtoMessage() {} -func (*int64Value) String() string { return "int64" } - -type uint64Value struct { - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint64Value) Reset() { *m = uint64Value{} } -func (*uint64Value) ProtoMessage() {} -func (*uint64Value) String() string { return "uint64" } - -type int32Value struct { - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int32Value) Reset() { *m = int32Value{} } -func (*int32Value) ProtoMessage() {} -func (*int32Value) String() string { return "int32" } - -type uint32Value struct { - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint32Value) Reset() { *m = uint32Value{} } -func (*uint32Value) ProtoMessage() {} -func (*uint32Value) String() string { return "uint32" } - -type boolValue struct { - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *boolValue) Reset() { *m = boolValue{} } -func (*boolValue) ProtoMessage() {} -func (*boolValue) String() string { return "bool" } - -type stringValue struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *stringValue) Reset() { *m = stringValue{} } -func (*stringValue) ProtoMessage() {} -func (*stringValue) String() string { return "string" } - -type bytesValue struct { - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *bytesValue) Reset() { *m = bytesValue{} } -func (*bytesValue) ProtoMessage() {} -func (*bytesValue) String() string { return "[]byte" } - -func init() { - RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") - RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") - RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") - RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") - RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") - RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") - RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") - RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") - RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") -} diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/vendor/github.com/golang/glog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/glog/README.md b/vendor/github.com/golang/glog/README.md deleted file mode 100644 index a4f73883b2..0000000000 --- a/vendor/github.com/golang/glog/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# glog - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/golang/glog)](https://pkg.go.dev/github.com/golang/glog) - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package [_glog_](https://github.com/google/glog). - -By binding methods to booleans it is possible to use the log package without paying the expense of evaluating the arguments to the log. Through the `-vmodule` flag, the package also provides fine-grained -control over logging at the file level. - -The comment from `glog.go` introduces the ideas: - -Package _glog_ implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. It provides the functions Info, Warning, Error, Fatal, plus formatting variants such as Infof. It also provides V-style loggingcontrolled by the `-v` and `-vmodule=file=2` flags. - -Basic examples: - -```go -glog.Info("Prepare to repel boarders") - -glog.Fatalf("Initialization failed: %s", err) -``` - -See the documentation for the V function for an explanation of these examples: - -```go -if glog.V(2) { - glog.Info("Starting transaction...") -} -glog.V(2).Infoln("Processed", nItems, "elements") -``` - -The repository contains an open source version of the log package used inside Google. The master copy of the source lives inside Google, not here. The code in this repo is for export only and is not itself under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go deleted file mode 100644 index 8c00e737a0..0000000000 --- a/vendor/github.com/golang/glog/glog.go +++ /dev/null @@ -1,777 +0,0 @@ -// Go support for leveled logs, analogous to https://github.com/google/glog. -// -// Copyright 2023 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions that have a name matched by regex: -// -// (Info|Warning|Error|Fatal)(Context)?(Depth)?(f)? -// -// If Context is present, function takes context.Context argument. The -// context is used to pass through the Trace Context to log sinks that can make use -// of it. -// It is recommended to use the context variant of the functions over the non-context -// variants if a context is available to make sure the Trace Contexts are present -// in logs. -// -// If Depth is present, this function calls log from a different depth in the call stack. -// This enables a callee to emit logs that use the callsite information of its caller -// or any other callers in the stack. When depth == 0, the original callee's line -// information is emitted. When depth > 0, depth frames are skipped in the call stack -// and the final frame is treated like the original callee to Info. -// -// If 'f' is present, function formats according to a format specifier. -// -// This package also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// glog.Info("Prepare to repel boarders") -// -// glog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if glog.V(2) { -// glog.Info("Starting transaction...") -// } -// -// glog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to files in a temporary directory. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=false -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// A comma-separated list of file and line numbers holding a logging -// statement, such as -// -log_backtrace_at=gopherflakes.go:234 -// A stack trace will be written to the Info log whenever execution -// hits one of these statements. (Unlike with -vmodule, the ".go" -// must bepresent.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin with "gopher", -// and -// -vmodule=/path/to/glog/glog_test=1 -// sets the V level to 1 in the Go file /path/to/glog/glog_test.go. -// If a glob pattern contains a slash, it is matched against the full path, -// and the file name. Otherwise, the pattern is -// matched only against the file's basename. When both -vmodule and -v -// are specified, the -vmodule values take precedence for the specified -// modules. -package glog - -// This file contains the parts of the log package that are shared among all -// implementations (file, envelope, and appengine). - -import ( - "bytes" - "context" - "errors" - "fmt" - stdLog "log" - "os" - "reflect" - "runtime" - "runtime/pprof" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/golang/glog/internal/logsink" - "github.com/golang/glog/internal/stackdump" -) - -var timeNow = time.Now // Stubbed out for testing. - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// ErrNoLog is the error we return if no log file has yet been created -// for the specified log type. -var ErrNoLog = errors.New("log file not yet created") - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [...]*OutputStats{ - logsink.Info: &Stats.Info, - logsink.Warning: &Stats.Warning, - logsink.Error: &Stats.Error, - logsink.Fatal: nil, -} - -// Level specifies a level of verbosity for V logs. The -v flag is of type -// Level and should be modified only through the flag.Value interface. -type Level int32 - -var metaPool sync.Pool // Pool of *logsink.Meta. - -// metaPoolGet returns a *logsink.Meta from metaPool as both an interface and a -// pointer, allocating a new one if necessary. (Returning the interface value -// directly avoids an allocation if there was an existing pointer in the pool.) -func metaPoolGet() (any, *logsink.Meta) { - if metai := metaPool.Get(); metai != nil { - return metai, metai.(*logsink.Meta) - } - meta := new(logsink.Meta) - return meta, meta -} - -type stack bool - -const ( - noStack = stack(false) - withStack = stack(true) -) - -func appendBacktrace(depth int, format string, args []any) (string, []any) { - // Capture a backtrace as a stackdump.Stack (both text and PC slice). - // Structured log sinks can extract the backtrace in whichever format they - // prefer (PCs or text), and Text sinks will include it as just another part - // of the log message. - // - // Use depth instead of depth+1 so that the backtrace always includes the - // log function itself - otherwise the reason for the trace appearing in the - // log may not be obvious to the reader. - dump := stackdump.Caller(depth) - - // Add an arg and an entry in the format string for the stack dump. - // - // Copy the "args" slice to avoid a rare but serious aliasing bug - // (corrupting the caller's slice if they passed it to a non-Fatal call - // using "..."). - format = format + "\n\n%v\n" - args = append(append([]any(nil), args...), dump) - - return format, args -} - -// logf acts as ctxlogf, but doesn't expect a context. -func logf(depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) { - ctxlogf(nil, depth+1, severity, verbose, stack, format, args...) -} - -// ctxlogf writes a log message for a log function call (or log function wrapper) -// at the given depth in the current goroutine's stack. -func ctxlogf(ctx context.Context, depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) { - now := timeNow() - _, file, line, ok := runtime.Caller(depth + 1) - if !ok { - file = "???" - line = 1 - } - - if stack == withStack || backtraceAt(file, line) { - format, args = appendBacktrace(depth+1, format, args) - } - - metai, meta := metaPoolGet() - *meta = logsink.Meta{ - Context: ctx, - Time: now, - File: file, - Line: line, - Depth: depth + 1, - Severity: severity, - Verbose: verbose, - Thread: int64(pid), - } - sinkf(meta, format, args...) - // Clear pointer fields so they can be garbage collected early. - meta.Context = nil - meta.Stack = nil - metaPool.Put(metai) -} - -func sinkf(meta *logsink.Meta, format string, args ...any) { - meta.Depth++ - n, err := logsink.Printf(meta, format, args...) - if stats := severityStats[meta.Severity]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(n)) - } - - if err != nil { - logsink.Printf(meta, "glog: exiting because of error: %s", err) - sinks.file.Flush() - os.Exit(2) - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, err := logsink.ParseSeverity(name) - if err != nil { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): %v", name, err)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// NewStandardLogger returns a Logger that writes to the Google logs for the -// named and lower severities. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, NewStandardLogger panics. -func NewStandardLogger(name string) *stdLog.Logger { - sev, err := logsink.ParseSeverity(name) - if err != nil { - panic(fmt.Sprintf("log.NewStandardLogger(%q): %v", name, err)) - } - return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge logsink.Severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - - // The depth below hard-codes details of how stdlog gets here. The alternative would be to walk - // up the stack looking for src/log/log.go but that seems like it would be - // unfortunately slow. - const stdLogDepth = 4 - - metai, meta := metaPoolGet() - *meta = logsink.Meta{ - Time: timeNow(), - File: file, - Line: line, - Depth: stdLogDepth, - Severity: logsink.Severity(lb), - Thread: int64(pid), - } - - format := "%s" - args := []any{text} - if backtraceAt(file, line) { - format, args = appendBacktrace(meta.Depth, format, args) - } - - sinkf(meta, format, args...) - metaPool.Put(metai) - - return len(b), nil -} - -// defaultFormat returns a fmt.Printf format specifier that formats its -// arguments as if they were passed to fmt.Print. -func defaultFormat(args []any) string { - n := len(args) - switch n { - case 0: - return "" - case 1: - return "%v" - } - - b := make([]byte, 0, n*3-1) - wasString := true // Suppress leading space. - for _, arg := range args { - isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String - if wasString || isString { - b = append(b, "%v"...) - } else { - b = append(b, " %v"...) - } - wasString = isString - } - return string(b) -} - -// lnFormat returns a fmt.Printf format specifier that formats its arguments -// as if they were passed to fmt.Println. -func lnFormat(args []any) string { - if len(args) == 0 { - return "\n" - } - - b := make([]byte, 0, len(args)*3) - for range args { - b = append(b, "%v "...) - } - b[len(b)-1] = '\n' // Replace the last space with a newline. - return string(b) -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// -// if glog.V(2) { glog.Info("log this") } -// -// or -// -// glog.V(2).Info("log this") -// -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at most the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - return VDepth(1, level) -} - -// VDepth acts as V but uses depth to determine which call frame to check vmodule for. -// VDepth(0, level) is the same as V(level). -func VDepth(depth int, level Level) Verbose { - return Verbose(verboseEnabled(depth+1, level)) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...any) { - v.InfoDepth(1, args...) -} - -// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) InfoDepth(depth int, args ...any) { - if v { - logf(depth+1, logsink.Info, true, noStack, defaultFormat(args), args...) - } -} - -// InfoDepthf is equivalent to the global InfoDepthf function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) InfoDepthf(depth int, format string, args ...any) { - if v { - logf(depth+1, logsink.Info, true, noStack, format, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...any) { - if v { - logf(1, logsink.Info, true, noStack, lnFormat(args), args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...any) { - if v { - logf(1, logsink.Info, true, noStack, format, args...) - } -} - -// InfoContext is equivalent to the global InfoContext function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) InfoContext(ctx context.Context, args ...any) { - v.InfoContextDepth(ctx, 1, args...) -} - -// InfoContextf is equivalent to the global InfoContextf function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) InfoContextf(ctx context.Context, format string, args ...any) { - if v { - ctxlogf(ctx, 1, logsink.Info, true, noStack, format, args...) - } -} - -// InfoContextDepth is equivalent to the global InfoContextDepth function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) InfoContextDepth(ctx context.Context, depth int, args ...any) { - if v { - ctxlogf(ctx, depth+1, logsink.Info, true, noStack, defaultFormat(args), args...) - } -} - -// InfoContextDepthf is equivalent to the global InfoContextDepthf function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) InfoContextDepthf(ctx context.Context, depth int, format string, args ...any) { - if v { - ctxlogf(ctx, depth+1, logsink.Info, true, noStack, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...any) { - InfoDepth(1, args...) -} - -// InfoDepth calls Info from a different depth in the call stack. -// This enables a callee to emit logs that use the callsite information of its caller -// or any other callers in the stack. When depth == 0, the original callee's line -// information is emitted. When depth > 0, depth frames are skipped in the call stack -// and the final frame is treated like the original callee to Info. -func InfoDepth(depth int, args ...any) { - logf(depth+1, logsink.Info, false, noStack, defaultFormat(args), args...) -} - -// InfoDepthf acts as InfoDepth but with format string. -func InfoDepthf(depth int, format string, args ...any) { - logf(depth+1, logsink.Info, false, noStack, format, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...any) { - logf(1, logsink.Info, false, noStack, lnFormat(args), args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...any) { - logf(1, logsink.Info, false, noStack, format, args...) -} - -// InfoContext is like [Info], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func InfoContext(ctx context.Context, args ...any) { - InfoContextDepth(ctx, 1, args...) -} - -// InfoContextf is like [Infof], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func InfoContextf(ctx context.Context, format string, args ...any) { - ctxlogf(ctx, 1, logsink.Info, false, noStack, format, args...) -} - -// InfoContextDepth is like [InfoDepth], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func InfoContextDepth(ctx context.Context, depth int, args ...any) { - ctxlogf(ctx, depth+1, logsink.Info, false, noStack, defaultFormat(args), args...) -} - -// InfoContextDepthf is like [InfoDepthf], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func InfoContextDepthf(ctx context.Context, depth int, format string, args ...any) { - ctxlogf(ctx, depth+1, logsink.Info, false, noStack, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...any) { - WarningDepth(1, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...any) { - logf(depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...) -} - -// WarningDepthf acts as Warningf but uses depth to determine which call frame to log. -// WarningDepthf(0, "msg") is the same as Warningf("msg"). -func WarningDepthf(depth int, format string, args ...any) { - logf(depth+1, logsink.Warning, false, noStack, format, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...any) { - logf(1, logsink.Warning, false, noStack, lnFormat(args), args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...any) { - logf(1, logsink.Warning, false, noStack, format, args...) -} - -// WarningContext is like [Warning], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func WarningContext(ctx context.Context, args ...any) { - WarningContextDepth(ctx, 1, args...) -} - -// WarningContextf is like [Warningf], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func WarningContextf(ctx context.Context, format string, args ...any) { - ctxlogf(ctx, 1, logsink.Warning, false, noStack, format, args...) -} - -// WarningContextDepth is like [WarningDepth], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func WarningContextDepth(ctx context.Context, depth int, args ...any) { - ctxlogf(ctx, depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...) -} - -// WarningContextDepthf is like [WarningDepthf], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func WarningContextDepthf(ctx context.Context, depth int, format string, args ...any) { - ctxlogf(ctx, depth+1, logsink.Warning, false, noStack, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...any) { - ErrorDepth(1, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...any) { - logf(depth+1, logsink.Error, false, noStack, defaultFormat(args), args...) -} - -// ErrorDepthf acts as Errorf but uses depth to determine which call frame to log. -// ErrorDepthf(0, "msg") is the same as Errorf("msg"). -func ErrorDepthf(depth int, format string, args ...any) { - logf(depth+1, logsink.Error, false, noStack, format, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...any) { - logf(1, logsink.Error, false, noStack, lnFormat(args), args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...any) { - logf(1, logsink.Error, false, noStack, format, args...) -} - -// ErrorContext is like [Error], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func ErrorContext(ctx context.Context, args ...any) { - ErrorContextDepth(ctx, 1, args...) -} - -// ErrorContextf is like [Errorf], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func ErrorContextf(ctx context.Context, format string, args ...any) { - ctxlogf(ctx, 1, logsink.Error, false, noStack, format, args...) -} - -// ErrorContextDepth is like [ErrorDepth], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func ErrorContextDepth(ctx context.Context, depth int, args ...any) { - ctxlogf(ctx, depth+1, logsink.Error, false, noStack, defaultFormat(args), args...) -} - -// ErrorContextDepthf is like [ErrorDepthf], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func ErrorContextDepthf(ctx context.Context, depth int, format string, args ...any) { - ctxlogf(ctx, depth+1, logsink.Error, false, noStack, format, args...) -} - -func ctxfatalf(ctx context.Context, depth int, format string, args ...any) { - ctxlogf(ctx, depth+1, logsink.Fatal, false, withStack, format, args...) - sinks.file.Flush() - - err := abortProcess() // Should not return. - - // Failed to abort the process using signals. Dump a stack trace and exit. - Errorf("abortProcess returned unexpectedly: %v", err) - sinks.file.Flush() - pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) - os.Exit(2) // Exit with the same code as the default SIGABRT handler. -} - -func fatalf(depth int, format string, args ...any) { - ctxfatalf(nil, depth+1, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(2). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...any) { - FatalDepth(1, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...any) { - fatalf(depth+1, defaultFormat(args), args...) -} - -// FatalDepthf acts as Fatalf but uses depth to determine which call frame to log. -// FatalDepthf(0, "msg") is the same as Fatalf("msg"). -func FatalDepthf(depth int, format string, args ...any) { - fatalf(depth+1, format, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(2). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...any) { - fatalf(1, lnFormat(args), args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(2). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...any) { - fatalf(1, format, args...) -} - -// FatalContext is like [Fatal], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func FatalContext(ctx context.Context, args ...any) { - FatalContextDepth(ctx, 1, args...) -} - -// FatalContextf is like [Fatalf], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func FatalContextf(ctx context.Context, format string, args ...any) { - ctxfatalf(ctx, 1, format, args...) -} - -// FatalContextDepth is like [FatalDepth], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func FatalContextDepth(ctx context.Context, depth int, args ...any) { - ctxfatalf(ctx, depth+1, defaultFormat(args), args...) -} - -// FatalContextDepthf is like [FatalDepthf], but with an extra [context.Context] parameter. -func FatalContextDepthf(ctx context.Context, depth int, format string, args ...any) { - ctxfatalf(ctx, depth+1, format, args...) -} - -func ctxexitf(ctx context.Context, depth int, format string, args ...any) { - ctxlogf(ctx, depth+1, logsink.Fatal, false, noStack, format, args...) - sinks.file.Flush() - os.Exit(1) -} - -func exitf(depth int, format string, args ...any) { - ctxexitf(nil, depth+1, format, args...) -} - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...any) { - ExitDepth(1, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...any) { - exitf(depth+1, defaultFormat(args), args...) -} - -// ExitDepthf acts as Exitf but uses depth to determine which call frame to log. -// ExitDepthf(0, "msg") is the same as Exitf("msg"). -func ExitDepthf(depth int, format string, args ...any) { - exitf(depth+1, format, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...any) { - exitf(1, lnFormat(args), args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...any) { - exitf(1, format, args...) -} - -// ExitContext is like [Exit], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func ExitContext(ctx context.Context, args ...any) { - ExitContextDepth(ctx, 1, args...) -} - -// ExitContextf is like [Exitf], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func ExitContextf(ctx context.Context, format string, args ...any) { - ctxexitf(ctx, 1, format, args...) -} - -// ExitContextDepth is like [ExitDepth], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func ExitContextDepth(ctx context.Context, depth int, args ...any) { - ctxexitf(ctx, depth+1, defaultFormat(args), args...) -} - -// ExitContextDepthf is like [ExitDepthf], but with an extra [context.Context] parameter. The -// context is used to pass the Trace Context to log sinks. -func ExitContextDepthf(ctx context.Context, depth int, format string, args ...any) { - ctxexitf(ctx, depth+1, format, args...) -} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go deleted file mode 100644 index 8eb8b08c60..0000000000 --- a/vendor/github.com/golang/glog/glog_file.go +++ /dev/null @@ -1,418 +0,0 @@ -// Go support for leveled logs, analogous to https://github.com/google/glog. -// -// Copyright 2023 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package glog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "time" - - "github.com/golang/glog/internal/logsink" -) - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -var ( - // If non-empty, overrides the choice of directory in which to write logs. - // See createLogDirs for the full list of possible destinations. - logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") - logLink = flag.String("log_link", "", "If non-empty, add symbolic links in this directory to the log files") - logBufLevel = flag.Int("logbuflevel", int(logsink.Info), "Buffer log messages logged at this level or lower"+ - " (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.") -) - -func createLogDirs() { - if *logDir != "" { - logDirs = append(logDirs, *logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - if u := lookupUser(); u != "" { - userName = u - } - // Sanitize userName since it is used to construct file paths. - userName = strings.Map(func(r rune) rune { - switch { - case r >= 'a' && r <= 'z': - case r >= 'A' && r <= 'Z': - case r >= '0' && r <= '9': - default: - return '_' - } - return r - }, userName) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -func create(tag string, t time.Time) (f *os.File, filename string, err error) { - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := os.Create(fname) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - if *logLink != "" { - lsymlink := filepath.Join(*logLink, link) - os.Remove(lsymlink) // ignore err - os.Symlink(fname, lsymlink) // ignore err - } - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer - filenames() []string -} - -var sinks struct { - stderr stderrSink - file fileSink -} - -func init() { - // Register stderr first: that way if we crash during file-writing at least - // the log will have gone somewhere. - logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file) - - sinks.file.flushChan = make(chan logsink.Severity, 1) - go sinks.file.flushDaemon() -} - -// stderrSink is a logsink.Text that writes log entries to stderr -// if they meet certain conditions. -type stderrSink struct { - mu sync.Mutex - w io.Writer // if nil Emit uses os.Stderr directly -} - -// Enabled implements logsink.Text.Enabled. It returns true if any of the -// various stderr flags are enabled for logs of the given severity, if the log -// message is from the standard "log" package, or if google.Init has not yet run -// (and hence file logging is not yet initialized). -func (s *stderrSink) Enabled(m *logsink.Meta) bool { - return toStderr || alsoToStderr || m.Severity >= stderrThreshold.get() -} - -// Emit implements logsink.Text.Emit. -func (s *stderrSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { - s.mu.Lock() - defer s.mu.Unlock() - w := s.w - if w == nil { - w = os.Stderr - } - dn, err := w.Write(data) - n += dn - return n, err -} - -// severityWriters is an array of flushSyncWriter with a value for each -// logsink.Severity. -type severityWriters [4]flushSyncWriter - -// fileSink is a logsink.Text that prints to a set of Google log files. -type fileSink struct { - mu sync.Mutex - // file holds writer for each of the log types. - file severityWriters - flushChan chan logsink.Severity -} - -// Enabled implements logsink.Text.Enabled. It returns true if google.Init -// has run and both --disable_log_to_disk and --logtostderr are false. -func (s *fileSink) Enabled(m *logsink.Meta) bool { - return !toStderr -} - -// Emit implements logsink.Text.Emit -func (s *fileSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - if err = s.createMissingFiles(m.Severity); err != nil { - return 0, err - } - for sev := m.Severity; sev >= logsink.Info; sev-- { - if _, fErr := s.file[sev].Write(data); fErr != nil && err == nil { - err = fErr // Take the first error. - } - } - n = len(data) - if int(m.Severity) > *logBufLevel { - select { - case s.flushChan <- m.Severity: - default: - } - } - - return n, err -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// s.mu is held for all its methods. -type syncBuffer struct { - sink *fileSink - *bufio.Writer - file *os.File - names []string - sev logsink.Severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - return 0, err - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - return n, err -} - -func (sb *syncBuffer) filenames() []string { - return sb.names -} - -const footer = "\nCONTINUED IN NEXT FILE\n" - -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - var err error - pn := "" - file, name, err := create(sb.sev.String(), now) - - if sb.file != nil { - // The current log file becomes the previous log at the end of - // this block, so save its name for use in the header of the next - // file. - pn = sb.file.Name() - sb.Flush() - // If there's an existing file, write a footer with the name of - // the next file in the chain, followed by the constant string - // \nCONTINUED IN NEXT FILE\n to make continuation detection simple. - sb.file.Write([]byte("Next log: ")) - sb.file.Write([]byte(name)) - sb.file.Write([]byte(footer)) - sb.file.Close() - } - - sb.file = file - sb.names = append(sb.names, name) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Previous log: %s\n", pn) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createMissingFiles creates all the log files for severity from infoLog up to -// upTo that have not already been created. -// s.mu is held. -func (s *fileSink) createMissingFiles(upTo logsink.Severity) error { - if s.file[upTo] != nil { - return nil - } - now := time.Now() - // Files are created in increasing severity order, so we can be assured that - // if a high severity logfile exists, then so do all of lower severity. - for sev := logsink.Info; sev <= upTo; sev++ { - if s.file[sev] != nil { - continue - } - sb := &syncBuffer{ - sink: s, - sev: sev, - } - if err := sb.rotateFile(now); err != nil { - return err - } - s.file[sev] = sb - } - return nil -} - -// flushDaemon periodically flushes the log file buffers. -func (s *fileSink) flushDaemon() { - tick := time.NewTicker(30 * time.Second) - defer tick.Stop() - for { - select { - case <-tick.C: - s.Flush() - case sev := <-s.flushChan: - s.flush(sev) - } - } -} - -// Flush flushes all pending log I/O. -func Flush() { - sinks.file.Flush() -} - -// Flush flushes all the logs and attempts to "sync" their data to disk. -func (s *fileSink) Flush() error { - return s.flush(logsink.Info) -} - -// flush flushes all logs of severity threshold or greater. -func (s *fileSink) flush(threshold logsink.Severity) error { - var firstErr error - updateErr := func(err error) { - if err != nil && firstErr == nil { - firstErr = err - } - } - - // Remember where we flushed, so we can call sync without holding - // the lock. - var files []flushSyncWriter - func() { - s.mu.Lock() - defer s.mu.Unlock() - // Flush from fatal down, in case there's trouble flushing. - for sev := logsink.Fatal; sev >= threshold; sev-- { - if file := s.file[sev]; file != nil { - updateErr(file.Flush()) - files = append(files, file) - } - } - }() - - for _, file := range files { - updateErr(file.Sync()) - } - - return firstErr -} - -// Names returns the names of the log files holding the FATAL, ERROR, -// WARNING, or INFO logs. Returns ErrNoLog if the log for the given -// level doesn't exist (e.g. because no messages of that level have been -// written). This may return multiple names if the log type requested -// has rolled over. -func Names(s string) ([]string, error) { - severity, err := logsink.ParseSeverity(s) - if err != nil { - return nil, err - } - - sinks.file.mu.Lock() - defer sinks.file.mu.Unlock() - f := sinks.file.file[severity] - if f == nil { - return nil, ErrNoLog - } - - return f.filenames(), nil -} diff --git a/vendor/github.com/golang/glog/glog_file_linux.go b/vendor/github.com/golang/glog/glog_file_linux.go deleted file mode 100644 index d795092d03..0000000000 --- a/vendor/github.com/golang/glog/glog_file_linux.go +++ /dev/null @@ -1,39 +0,0 @@ -// Go support for leveled logs, analogous to https://github.com/google/glog. -// -// Copyright 2023 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux - -package glog - -import ( - "errors" - "runtime" - "syscall" -) - -// abortProcess attempts to kill the current process in a way that will dump the -// currently-running goroutines someplace useful (like stderr). -// -// It does this by sending SIGABRT to the current thread. -// -// If successful, abortProcess does not return. -func abortProcess() error { - runtime.LockOSThread() - if err := syscall.Tgkill(syscall.Getpid(), syscall.Gettid(), syscall.SIGABRT); err != nil { - return err - } - return errors.New("log: killed current thread with SIGABRT, but still running") -} diff --git a/vendor/github.com/golang/glog/glog_file_nonwindows.go b/vendor/github.com/golang/glog/glog_file_nonwindows.go deleted file mode 100644 index d5cdb793c5..0000000000 --- a/vendor/github.com/golang/glog/glog_file_nonwindows.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !windows - -package glog - -import "os/user" - -func lookupUser() string { - if current, err := user.Current(); err == nil { - return current.Username - } - return "" -} diff --git a/vendor/github.com/golang/glog/glog_file_other.go b/vendor/github.com/golang/glog/glog_file_other.go deleted file mode 100644 index 9540f14fc0..0000000000 --- a/vendor/github.com/golang/glog/glog_file_other.go +++ /dev/null @@ -1,30 +0,0 @@ -// Go support for leveled logs, analogous to https://github.com/google/glog. -// -// Copyright 2023 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !(unix || windows) - -package glog - -import ( - "fmt" - "runtime" -) - -// abortProcess returns an error on platforms that presumably don't support signals. -func abortProcess() error { - return fmt.Errorf("not sending SIGABRT (%s/%s does not support signals), falling back", runtime.GOOS, runtime.GOARCH) - -} diff --git a/vendor/github.com/golang/glog/glog_file_posix.go b/vendor/github.com/golang/glog/glog_file_posix.go deleted file mode 100644 index c27c7c0e4a..0000000000 --- a/vendor/github.com/golang/glog/glog_file_posix.go +++ /dev/null @@ -1,53 +0,0 @@ -// Go support for leveled logs, analogous to https://github.com/google/glog. -// -// Copyright 2023 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (unix || windows) && !linux - -package glog - -import ( - "os" - "syscall" - "time" -) - -// abortProcess attempts to kill the current process in a way that will dump the -// currently-running goroutines someplace useful (like stderr). -// -// It does this by sending SIGABRT to the current process. Unfortunately, the -// signal may or may not be delivered to the current thread; in order to do that -// portably, we would need to add a cgo dependency and call pthread_kill. -// -// If successful, abortProcess does not return. -func abortProcess() error { - p, err := os.FindProcess(os.Getpid()) - if err != nil { - return err - } - if err := p.Signal(syscall.SIGABRT); err != nil { - return err - } - - // Sent the signal. Now we wait for it to arrive and any SIGABRT handlers to - // run (and eventually terminate the process themselves). - // - // We could just "select{}" here, but there's an outside chance that would - // trigger the runtime's deadlock detector if there happen not to be any - // background goroutines running. So we'll sleep a while first to give - // the signal some time. - time.Sleep(10 * time.Second) - select {} -} diff --git a/vendor/github.com/golang/glog/glog_file_windows.go b/vendor/github.com/golang/glog/glog_file_windows.go deleted file mode 100644 index a9e4f609df..0000000000 --- a/vendor/github.com/golang/glog/glog_file_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -//go:build windows - -package glog - -import ( - "syscall" -) - -// This follows the logic in the standard library's user.Current() function, except -// that it leaves out the potentially expensive calls required to look up the user's -// display name in Active Directory. -func lookupUser() string { - token, err := syscall.OpenCurrentProcessToken() - if err != nil { - return "" - } - defer token.Close() - tokenUser, err := token.GetTokenUser() - if err != nil { - return "" - } - username, _, accountType, err := tokenUser.User.Sid.LookupAccount("") - if err != nil { - return "" - } - if accountType != syscall.SidTypeUser { - return "" - } - return username -} diff --git a/vendor/github.com/golang/glog/glog_flags.go b/vendor/github.com/golang/glog/glog_flags.go deleted file mode 100644 index fa4371afd3..0000000000 --- a/vendor/github.com/golang/glog/glog_flags.go +++ /dev/null @@ -1,398 +0,0 @@ -// Go support for leveled logs, analogous to https://github.com/google/glog. -// -// Copyright 2023 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package glog - -import ( - "bytes" - "errors" - "flag" - "fmt" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - - "github.com/golang/glog/internal/logsink" -) - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - full bool // The pattern wants to match the full path - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(full, file string) bool { - if m.literal { - if m.full { - return full == m.pattern - } - return file == m.pattern - } - if m.full { - match, _ := filepath.Match(m.pattern, full) - return match - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// isFull reports whether the pattern matches the full file path, that is, -// whether it contains /. -func isFull(pattern string) bool { - return strings.ContainsRune(pattern, '/') -} - -// verboseFlags represents the setting of the -v and -vmodule flags. -type verboseFlags struct { - // moduleLevelCache is a sync.Map storing the -vmodule Level for each V() - // call site, identified by PC. If there is no matching -vmodule filter, - // the cached value is exactly v. moduleLevelCache is replaced with a new - // Map whenever the -vmodule or -v flag changes state. - moduleLevelCache atomic.Value - - // mu guards all fields below. - mu sync.Mutex - - // v stores the value of the -v flag. It may be read safely using - // sync.LoadInt32, but is only modified under mu. - v Level - - // module stores the parsed -vmodule flag. - module []modulePat - - // moduleLength caches len(module). If greater than zero, it - // means vmodule is enabled. It may be read safely using sync.LoadInt32, but - // is only modified under mu. - moduleLength int32 -} - -// NOTE: For compatibility with the open-sourced v1 version of this -// package (github.com/golang/glog) we need to retain that flag.Level -// implements the flag.Value interface. See also go/log-vs-glog. - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(l.Get().(Level)), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() any { - if l == &vflags.v { - // l is the value registered for the -v flag. - return Level(atomic.LoadInt32((*int32)(l))) - } - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - if l == &vflags.v { - // l is the value registered for the -v flag. - vflags.mu.Lock() - defer vflags.mu.Unlock() - vflags.moduleLevelCache.Store(&sync.Map{}) - atomic.StoreInt32((*int32)(l), int32(v)) - return nil - } - *l = Level(v) - return nil -} - -// vModuleFlag is the flag.Value for the --vmodule flag. -type vModuleFlag struct{ *verboseFlags } - -func (f vModuleFlag) String() string { - // Do not panic on the zero value. - // https://groups.google.com/g/golang-nuts/c/Atlr8uAjn6U/m/iId17Td5BQAJ. - if f.verboseFlags == nil { - return "" - } - f.mu.Lock() - defer f.mu.Unlock() - - var b bytes.Buffer - for i, f := range f.module { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get returns nil for this flag type since the struct is not exported. -func (f vModuleFlag) Get() any { return nil } - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,foo/bar/baz=1,gfs*=3 -func (f vModuleFlag) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), isFull(pattern), Level(v)}) - } - - f.mu.Lock() - defer f.mu.Unlock() - f.module = filter - atomic.StoreInt32((*int32)(&f.moduleLength), int32(len(f.module))) - f.moduleLevelCache.Store(&sync.Map{}) - return nil -} - -func (f *verboseFlags) levelForPC(pc uintptr) Level { - if level, ok := f.moduleLevelCache.Load().(*sync.Map).Load(pc); ok { - return level.(Level) - } - - f.mu.Lock() - defer f.mu.Unlock() - level := Level(f.v) - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d for - // regular matches, /a/b/c/d for full matches. - file = strings.TrimSuffix(file, ".go") - full := file - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range f.module { - if filter.match(full, file) { - level = filter.level - break // Use the first matching level. - } - } - f.moduleLevelCache.Load().(*sync.Map).Store(pc, level) - return level -} - -func (f *verboseFlags) enabled(callerDepth int, level Level) bool { - if atomic.LoadInt32(&f.moduleLength) == 0 { - // No vmodule values specified, so compare against v level. - return Level(atomic.LoadInt32((*int32)(&f.v))) >= level - } - - pcs := [1]uintptr{} - if runtime.Callers(callerDepth+2, pcs[:]) < 1 { - return false - } - frame, _ := runtime.CallersFrames(pcs[:]).Next() - return f.levelForPC(frame.Entry) >= level -} - -// traceLocation represents an entry in the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -func parseTraceLocation(value string) (traceLocation, error) { - fields := strings.Split(value, ":") - if len(fields) != 2 { - return traceLocation{}, errTraceSyntax - } - file, lineStr := fields[0], fields[1] - if !strings.Contains(file, ".") { - return traceLocation{}, errTraceSyntax - } - line, err := strconv.Atoi(lineStr) - if err != nil { - return traceLocation{}, errTraceSyntax - } - if line < 0 { - return traceLocation{}, errors.New("negative value for line") - } - return traceLocation{file, line}, nil -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -func (t traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t traceLocation) String() string { - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// traceLocations represents the -log_backtrace_at flag. -// Syntax: -log_backtrace_at=recordio.go:234,sstable.go:456 -// Note that unlike vmodule the file extension is included here. -type traceLocations struct { - mu sync.Mutex - locsLen int32 // Safe for atomic read without mu. - locs []traceLocation -} - -func (t *traceLocations) String() string { - t.mu.Lock() - defer t.mu.Unlock() - - var buf bytes.Buffer - for i, tl := range t.locs { - if i > 0 { - buf.WriteString(",") - } - buf.WriteString(tl.String()) - } - return buf.String() -} - -// Get always returns nil for this flag type since the struct is not exported -func (t *traceLocations) Get() any { return nil } - -func (t *traceLocations) Set(value string) error { - var locs []traceLocation - for _, s := range strings.Split(value, ",") { - if s == "" { - continue - } - loc, err := parseTraceLocation(s) - if err != nil { - return err - } - locs = append(locs, loc) - } - - t.mu.Lock() - defer t.mu.Unlock() - atomic.StoreInt32(&t.locsLen, int32(len(locs))) - t.locs = locs - return nil -} - -func (t *traceLocations) match(file string, line int) bool { - if atomic.LoadInt32(&t.locsLen) == 0 { - return false - } - - t.mu.Lock() - defer t.mu.Unlock() - for _, tl := range t.locs { - if tl.match(file, line) { - return true - } - } - return false -} - -// severityFlag is an atomic flag.Value implementation for logsink.Severity. -type severityFlag int32 - -func (s *severityFlag) get() logsink.Severity { - return logsink.Severity(atomic.LoadInt32((*int32)(s))) -} -func (s *severityFlag) String() string { return strconv.FormatInt(int64(*s), 10) } -func (s *severityFlag) Get() any { return s.get() } -func (s *severityFlag) Set(value string) error { - threshold, err := logsink.ParseSeverity(value) - if err != nil { - // Not a severity name. Try a raw number. - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = logsink.Severity(v) - if threshold < logsink.Info || threshold > logsink.Fatal { - return fmt.Errorf("Severity %d out of range (min %d, max %d).", v, logsink.Info, logsink.Fatal) - } - } - atomic.StoreInt32((*int32)(s), int32(threshold)) - return nil -} - -var ( - vflags verboseFlags // The -v and -vmodule flags. - - logBacktraceAt traceLocations // The -log_backtrace_at flag. - - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - stderrThreshold severityFlag // The -stderrthreshold flag. -) - -// verboseEnabled returns whether the caller at the given depth should emit -// verbose logs at the given level, with depth 0 identifying the caller of -// verboseEnabled. -func verboseEnabled(callerDepth int, level Level) bool { - return vflags.enabled(callerDepth+1, level) -} - -// backtraceAt returns whether the logging call at the given function and line -// should also emit a backtrace of the current call stack. -func backtraceAt(file string, line int) bool { - return logBacktraceAt.match(file, line) -} - -func init() { - vflags.moduleLevelCache.Store(&sync.Map{}) - - flag.Var(&vflags.v, "v", "log level for V logs") - flag.Var(vModuleFlag{&vflags}, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - - flag.Var(&logBacktraceAt, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - stderrThreshold = severityFlag(logsink.Error) - - flag.BoolVar(&toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") -} diff --git a/vendor/github.com/golang/glog/internal/logsink/logsink.go b/vendor/github.com/golang/glog/internal/logsink/logsink.go deleted file mode 100644 index 28c38a6abc..0000000000 --- a/vendor/github.com/golang/glog/internal/logsink/logsink.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright 2023 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logsink - -import ( - "bytes" - "context" - "fmt" - "strconv" - "strings" - "sync" - "time" - - "github.com/golang/glog/internal/stackdump" -) - -// MaxLogMessageLen is the limit on length of a formatted log message, including -// the standard line prefix and trailing newline. -// -// Chosen to match C++ glog. -const MaxLogMessageLen = 15000 - -// A Severity is a severity at which a message can be logged. -type Severity int8 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - Info Severity = iota - Warning - Error - - // Fatal contains logs written immediately before the process terminates. - // - // Sink implementations should not terminate the process themselves: the log - // package will perform any necessary cleanup and terminate the process as - // appropriate. - Fatal -) - -func (s Severity) String() string { - switch s { - case Info: - return "INFO" - case Warning: - return "WARNING" - case Error: - return "ERROR" - case Fatal: - return "FATAL" - } - return fmt.Sprintf("%T(%d)", s, s) -} - -// ParseSeverity returns the case-insensitive Severity value for the given string. -func ParseSeverity(name string) (Severity, error) { - name = strings.ToUpper(name) - for s := Info; s <= Fatal; s++ { - if s.String() == name { - return s, nil - } - } - return -1, fmt.Errorf("logsink: invalid severity %q", name) -} - -// Meta is metadata about a logging call. -type Meta struct { - // The context with which the log call was made (or nil). If set, the context - // is only valid during the logsink.Structured.Printf call, it should not be - // retained. - Context context.Context - - // Time is the time at which the log call was made. - Time time.Time - - // File is the source file from which the log entry originates. - File string - // Line is the line offset within the source file. - Line int - // Depth is the number of stack frames between the logsink and the log call. - Depth int - - Severity Severity - - // Verbose indicates whether the call was made via "log.V". Log entries below - // the current verbosity threshold are not sent to the sink. - Verbose bool - - // Thread ID. This can be populated with a thread ID from another source, - // such as a system we are importing logs from. In the normal case, this - // will be set to the process ID (PID), since Go doesn't have threads. - Thread int64 - - // Stack trace starting in the logging function. May be nil. - // A logsink should implement the StackWanter interface to request this. - // - // Even if WantStack returns false, this field may be set (e.g. if another - // sink wants a stack trace). - Stack *stackdump.Stack -} - -// Structured is a logging destination that accepts structured data as input. -type Structured interface { - // Printf formats according to a fmt.Printf format specifier and writes a log - // entry. The precise result of formatting depends on the sink, but should - // aim for consistency with fmt.Printf. - // - // Printf returns the number of bytes occupied by the log entry, which - // may not be equal to the total number of bytes written. - // - // Printf returns any error encountered *if* it is severe enough that the log - // package should terminate the process. - // - // The sink must not modify the *Meta parameter, nor reference it after - // Printf has returned: it may be reused in subsequent calls. - Printf(meta *Meta, format string, a ...any) (n int, err error) -} - -// StackWanter can be implemented by a logsink.Structured to indicate that it -// wants a stack trace to accompany at least some of the log messages it receives. -type StackWanter interface { - // WantStack returns true if the sink requires a stack trace for a log message - // with this metadata. - // - // NOTE: Returning true implies that meta.Stack will be non-nil. Returning - // false does NOT imply that meta.Stack will be nil. - WantStack(meta *Meta) bool -} - -// Text is a logging destination that accepts pre-formatted log lines (instead of -// structured data). -type Text interface { - // Enabled returns whether this sink should output messages for the given - // Meta. If the sink returns false for a given Meta, the Printf function will - // not call Emit on it for the corresponding log message. - Enabled(*Meta) bool - - // Emit writes a pre-formatted text log entry (including any applicable - // header) to the log. It returns the number of bytes occupied by the entry - // (which may differ from the length of the passed-in slice). - // - // Emit returns any error encountered *if* it is severe enough that the log - // package should terminate the process. - // - // The sink must not modify the *Meta parameter, nor reference it after - // Printf has returned: it may be reused in subsequent calls. - // - // NOTE: When developing a text sink, keep in mind the surface in which the - // logs will be displayed, and whether it's important that the sink be - // resistent to tampering in the style of b/211428300. Standard text sinks - // (like `stderrSink`) do not protect against this (e.g. by escaping - // characters) because the cases where they would show user-influenced bytes - // are vanishingly small. - Emit(*Meta, []byte) (n int, err error) -} - -// bufs is a pool of *bytes.Buffer used in formatting log entries. -var bufs sync.Pool // Pool of *bytes.Buffer. - -// textPrintf formats a text log entry and emits it to all specified Text sinks. -// -// The returned n is the maximum across all Emit calls. -// The returned err is the first non-nil error encountered. -// Sinks that are disabled by configuration should return (0, nil). -func textPrintf(m *Meta, textSinks []Text, format string, args ...any) (n int, err error) { - // We expect at most file, stderr, and perhaps syslog. If there are more, - // we'll end up allocating - no big deal. - const maxExpectedTextSinks = 3 - var noAllocSinks [maxExpectedTextSinks]Text - - sinks := noAllocSinks[:0] - for _, s := range textSinks { - if s.Enabled(m) { - sinks = append(sinks, s) - } - } - if len(sinks) == 0 && m.Severity != Fatal { - return 0, nil // No TextSinks specified; don't bother formatting. - } - - bufi := bufs.Get() - var buf *bytes.Buffer - if bufi == nil { - buf = bytes.NewBuffer(nil) - bufi = buf - } else { - buf = bufi.(*bytes.Buffer) - buf.Reset() - } - - // Lmmdd hh:mm:ss.uuuuuu PID/GID file:line] - // - // The "PID" entry arguably ought to be TID for consistency with other - // environments, but TID is not meaningful in a Go program due to the - // multiplexing of goroutines across threads. - // - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - const severityChar = "IWEF" - buf.WriteByte(severityChar[m.Severity]) - - _, month, day := m.Time.Date() - hour, minute, second := m.Time.Clock() - twoDigits(buf, int(month)) - twoDigits(buf, day) - buf.WriteByte(' ') - twoDigits(buf, hour) - buf.WriteByte(':') - twoDigits(buf, minute) - buf.WriteByte(':') - twoDigits(buf, second) - buf.WriteByte('.') - nDigits(buf, 6, uint64(m.Time.Nanosecond()/1000), '0') - buf.WriteByte(' ') - - nDigits(buf, 7, uint64(m.Thread), ' ') - buf.WriteByte(' ') - - { - file := m.File - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - buf.WriteString(file) - } - - buf.WriteByte(':') - { - var tmp [19]byte - buf.Write(strconv.AppendInt(tmp[:0], int64(m.Line), 10)) - } - buf.WriteString("] ") - - msgStart := buf.Len() - fmt.Fprintf(buf, format, args...) - if buf.Len() > MaxLogMessageLen-1 { - buf.Truncate(MaxLogMessageLen - 1) - } - msgEnd := buf.Len() - if b := buf.Bytes(); b[len(b)-1] != '\n' { - buf.WriteByte('\n') - } - - for _, s := range sinks { - sn, sErr := s.Emit(m, buf.Bytes()) - if sn > n { - n = sn - } - if sErr != nil && err == nil { - err = sErr - } - } - - if m.Severity == Fatal { - savedM := *m - fatalMessageStore(savedEntry{ - meta: &savedM, - msg: buf.Bytes()[msgStart:msgEnd], - }) - } else { - bufs.Put(bufi) - } - return n, err -} - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer to buf. -func twoDigits(buf *bytes.Buffer, d int) { - buf.WriteByte(digits[(d/10)%10]) - buf.WriteByte(digits[d%10]) -} - -// nDigits formats an n-digit integer to buf, padding with pad on the left. It -// assumes d != 0. -func nDigits(buf *bytes.Buffer, n int, d uint64, pad byte) { - var tmp [20]byte - - cutoff := len(tmp) - n - j := len(tmp) - 1 - for ; d > 0; j-- { - tmp[j] = digits[d%10] - d /= 10 - } - for ; j >= cutoff; j-- { - tmp[j] = pad - } - j++ - buf.Write(tmp[j:]) -} - -// Printf writes a log entry to all registered TextSinks in this package, then -// to all registered StructuredSinks. -// -// The returned n is the maximum across all Emit and Printf calls. -// The returned err is the first non-nil error encountered. -// Sinks that are disabled by configuration should return (0, nil). -func Printf(m *Meta, format string, args ...any) (n int, err error) { - m.Depth++ - n, err = textPrintf(m, TextSinks, format, args...) - - for _, sink := range StructuredSinks { - // TODO: Support TextSinks that implement StackWanter? - if sw, ok := sink.(StackWanter); ok && sw.WantStack(m) { - if m.Stack == nil { - // First, try to find a stacktrace in args, otherwise generate one. - for _, arg := range args { - if stack, ok := arg.(stackdump.Stack); ok { - m.Stack = &stack - break - } - } - if m.Stack == nil { - stack := stackdump.Caller( /* skipDepth = */ m.Depth) - m.Stack = &stack - } - } - } - sn, sErr := sink.Printf(m, format, args...) - if sn > n { - n = sn - } - if sErr != nil && err == nil { - err = sErr - } - } - return n, err -} - -// The sets of sinks to which logs should be written. -// -// These must only be modified during package init, and are read-only thereafter. -var ( - // StructuredSinks is the set of Structured sink instances to which logs - // should be written. - StructuredSinks []Structured - - // TextSinks is the set of Text sink instances to which logs should be - // written. - // - // These are registered separately from Structured sink implementations to - // avoid the need to repeat the work of formatting a message for each Text - // sink that writes it. The package-level Printf function writes to both sets - // independenty, so a given log destination should only register a Structured - // *or* a Text sink (not both). - TextSinks []Text -) - -type savedEntry struct { - meta *Meta - msg []byte -} - -// StructuredTextWrapper is a Structured sink which forwards logs to a set of Text sinks. -// -// The purpose of this sink is to allow applications to intercept logging calls before they are -// serialized and sent to Text sinks. For example, if one needs to redact PII from logging -// arguments before they reach STDERR, one solution would be to do the redacting in a Structured -// sink that forwards logs to a StructuredTextWrapper instance, and make STDERR a child of that -// StructuredTextWrapper instance. This is how one could set this up in their application: -// -// func init() { -// -// wrapper := logsink.StructuredTextWrapper{TextSinks: logsink.TextSinks} -// // sanitizersink will intercept logs and remove PII -// sanitizer := sanitizersink{Sink: &wrapper} -// logsink.StructuredSinks = append(logsink.StructuredSinks, &sanitizer) -// logsink.TextSinks = nil -// -// } -type StructuredTextWrapper struct { - // TextSinks is the set of Text sinks that should receive logs from this - // StructuredTextWrapper instance. - TextSinks []Text -} - -// Printf forwards logs to all Text sinks registered in the StructuredTextWrapper. -func (w *StructuredTextWrapper) Printf(meta *Meta, format string, args ...any) (n int, err error) { - return textPrintf(meta, w.TextSinks, format, args...) -} diff --git a/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go b/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go deleted file mode 100644 index 3dc269abc2..0000000000 --- a/vendor/github.com/golang/glog/internal/logsink/logsink_fatal.go +++ /dev/null @@ -1,35 +0,0 @@ -package logsink - -import ( - "sync/atomic" - "unsafe" -) - -func fatalMessageStore(e savedEntry) { - // Only put a new one in if we haven't assigned before. - atomic.CompareAndSwapPointer(&fatalMessage, nil, unsafe.Pointer(&e)) -} - -var fatalMessage unsafe.Pointer // savedEntry stored with CompareAndSwapPointer - -// FatalMessage returns the Meta and message contents of the first message -// logged with Fatal severity, or false if none has occurred. -func FatalMessage() (*Meta, []byte, bool) { - e := (*savedEntry)(atomic.LoadPointer(&fatalMessage)) - if e == nil { - return nil, nil, false - } - return e.meta, e.msg, true -} - -// DoNotUseRacyFatalMessage is FatalMessage, but worse. -// -//go:norace -//go:nosplit -func DoNotUseRacyFatalMessage() (*Meta, []byte, bool) { - e := (*savedEntry)(fatalMessage) - if e == nil { - return nil, nil, false - } - return e.meta, e.msg, true -} diff --git a/vendor/github.com/golang/glog/internal/stackdump/stackdump.go b/vendor/github.com/golang/glog/internal/stackdump/stackdump.go deleted file mode 100644 index 3427c9d6bd..0000000000 --- a/vendor/github.com/golang/glog/internal/stackdump/stackdump.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2023 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package stackdump provides wrappers for runtime.Stack and runtime.Callers -// with uniform support for skipping caller frames. -// -// ⚠ Unlike the functions in the runtime package, these may allocate a -// non-trivial quantity of memory: use them with care. ⚠ -package stackdump - -import ( - "bytes" - "runtime" -) - -// runtimeStackSelfFrames is 1 if runtime.Stack includes the call to -// runtime.Stack itself or 0 if it does not. -// -// As of 2016-04-27, the gccgo compiler includes runtime.Stack but the gc -// compiler does not. -var runtimeStackSelfFrames = func() int { - for n := 1 << 10; n < 1<<20; n *= 2 { - buf := make([]byte, n) - n := runtime.Stack(buf, false) - if bytes.Contains(buf[:n], []byte("runtime.Stack")) { - return 1 - } else if n < len(buf) || bytes.Count(buf, []byte("\n")) >= 3 { - return 0 - } - } - return 0 -}() - -// Stack is a stack dump for a single goroutine. -type Stack struct { - // Text is a representation of the stack dump in a human-readable format. - Text []byte - - // PC is a representation of the stack dump using raw program counter values. - PC []uintptr -} - -func (s Stack) String() string { return string(s.Text) } - -// Caller returns the Stack dump for the calling goroutine, starting skipDepth -// frames before the caller of Caller. (Caller(0) provides a dump starting at -// the caller of this function.) -func Caller(skipDepth int) Stack { - return Stack{ - Text: CallerText(skipDepth + 1), - PC: CallerPC(skipDepth + 1), - } -} - -// CallerText returns a textual dump of the stack starting skipDepth frames before -// the caller. (CallerText(0) provides a dump starting at the caller of this -// function.) -func CallerText(skipDepth int) []byte { - for n := 1 << 10; ; n *= 2 { - buf := make([]byte, n) - n := runtime.Stack(buf, false) - if n < len(buf) { - return pruneFrames(skipDepth+1+runtimeStackSelfFrames, buf[:n]) - } - } -} - -// CallerPC returns a dump of the program counters of the stack starting -// skipDepth frames before the caller. (CallerPC(0) provides a dump starting at -// the caller of this function.) -func CallerPC(skipDepth int) []uintptr { - for n := 1 << 8; ; n *= 2 { - buf := make([]uintptr, n) - n := runtime.Callers(skipDepth+2, buf) - if n < len(buf) { - return buf[:n] - } - } -} - -// pruneFrames removes the topmost skipDepth frames of the first goroutine in a -// textual stack dump. It overwrites the passed-in slice. -// -// If there are fewer than skipDepth frames in the first goroutine's stack, -// pruneFrames prunes it to an empty stack and leaves the remaining contents -// intact. -func pruneFrames(skipDepth int, stack []byte) []byte { - headerLen := 0 - for i, c := range stack { - if c == '\n' { - headerLen = i + 1 - break - } - } - if headerLen == 0 { - return stack // No header line - not a well-formed stack trace. - } - - skipLen := headerLen - skipNewlines := skipDepth * 2 - for ; skipLen < len(stack) && skipNewlines > 0; skipLen++ { - c := stack[skipLen] - if c != '\n' { - continue - } - skipNewlines-- - skipLen++ - if skipNewlines == 0 || skipLen == len(stack) || stack[skipLen] == '\n' { - break - } - } - - pruned := stack[skipLen-headerLen:] - copy(pruned, stack[:headerLen]) - return pruned -} diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea12879a0..0000000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 23c6e26c6b..0000000000 --- a/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go deleted file mode 100644 index 2f672be557..0000000000 --- a/vendor/github.com/golang/snappy/decode_other.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!arm64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 7f23657076..0000000000 --- a/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979fe2..0000000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s deleted file mode 100644 index f8d54adfc5..0000000000 --- a/vendor/github.com/golang/snappy/encode_arm64.s +++ /dev/null @@ -1,722 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - R3 len(lit) -// - R4 n -// - R6 return value -// - R8 &dst[i] -// - R10 &lit[0] -// -// The 32 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $32-56 - MOVD dst_base+0(FP), R8 - MOVD lit_base+24(FP), R10 - MOVD lit_len+32(FP), R3 - MOVD R3, R6 - MOVW R3, R4 - SUBW $1, R4, R4 - - CMPW $60, R4 - BLT oneByte - CMPW $256, R4 - BLT twoBytes - -threeBytes: - MOVD $0xf4, R2 - MOVB R2, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - ADD $3, R6, R6 - B memmove - -twoBytes: - MOVD $0xf0, R2 - MOVB R2, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - ADD $2, R6, R6 - B memmove - -oneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - ADD $1, R6, R6 - -memmove: - MOVD R6, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - R3 length -// - R7 &dst[0] -// - R8 &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVD dst_base+0(FP), R8 - MOVD R8, R7 - MOVD offset+24(FP), R11 - MOVD length+32(FP), R3 - -loop0: - // for length >= 68 { etc } - CMPW $68, R3 - BLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $64, R3, R3 - B loop0 - -step1: - // if length > 64 { etc } - CMP $64, R3 - BLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $60, R3, R3 - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMP $12, R3 - BGE step3 - CMPW $2048, R11 - BGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $3, R11, R11 - AND $0xe0, R11, R11 - SUB $4, R3, R3 - LSLW $2, R3 - AND $0xff, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUB $1, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - R6 &src[0] -// - R7 &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVD src_base+0(FP), R6 - MOVD src_len+8(FP), R14 - MOVD i+24(FP), R15 - MOVD j+32(FP), R7 - ADD R6, R14, R14 - ADD R6, R15, R15 - ADD R6, R7, R7 - MOVD R14, R13 - SUB $8, R13, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI cmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE bsf - ADD $8, R15, R15 - ADD $8, R7, R7 - B cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS extendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE extendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - R3 . . -// - R4 . . -// - R5 64 shift -// - R6 72 &src[0], tableSize -// - R7 80 &src[s] -// - R8 88 &dst[d] -// - R9 96 sLimit -// - R10 . &src[nextEmit] -// - R11 104 prevHash, currHash, nextHash, offset -// - R12 112 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 120 candidate -// - R16 . hash constant, 0x1e35a7bd -// - R17 . &table -// - . 128 table -// -// The second column (64, 72, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. -TEXT ·encodeBlock(SB), 0, $32896-56 - MOVD dst_base+0(FP), R8 - MOVD src_base+24(FP), R7 - MOVD src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVD $24, R5 - MOVD $256, R6 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - MOVD $16384, R2 - CMP R2, R6 - BGE varTable - CMP R14, R6 - BGE varTable - SUB $1, R5, R5 - LSL $1, R6, R6 - B calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each - // iterations writes 64 bytes, so we can do only tableSize/32 writes - // instead of the 2048 writes that would zero-initialize all of table's - // 32768 bytes. This clear could overrun the first tableSize elements, but - // it won't overrun the allocated stack size. - ADD $128, RSP, R17 - MOVD R17, R4 - - // !!! R6 = &src[tableSize] - ADD R6<<1, R17, R6 - -memclr: - STP.P (ZR, ZR), 64(R4) - STP (ZR, ZR), -48(R4) - STP (ZR, ZR), -32(R4) - STP (ZR, ZR), -16(R4) - CMP R4, R6 - BHI memclr - - // !!! R6 = &src[0] - MOVD R7, R6 - - // sLimit := len(src) - inputMargin - MOVD R14, R9 - SUB $15, R9, R9 - - // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't - // change for the rest of the function. - MOVD R5, 64(RSP) - MOVD R6, 72(RSP) - MOVD R9, 96(RSP) - - // nextEmit := 0 - MOVD R6, R10 - - // s := 1 - ADD $1, R7, R7 - - // nextHash := hash(load32(src, s), shift) - MOVW 0(R7), R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - -outer: - // for { etc } - - // skip := 32 - MOVD $32, R12 - - // nextS := s - MOVD R7, R13 - - // candidate := 0 - MOVD $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVD R13, R7 - - // bytesBetweenHashLookups := skip >> 5 - MOVD R12, R14 - LSR $5, R14, R14 - - // nextS = s + bytesBetweenHashLookups - ADD R14, R13, R13 - - // skip += bytesBetweenHashLookups - ADD R14, R12, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVD R13, R3 - SUB R6, R3, R3 - CMP R9, R3 - BHI emitRemainder - - // candidate = int(table[nextHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[nextHash] = uint16(s) - MOVD R7, R3 - SUB R6, R3, R3 - - MOVH R3, 0(R17)(R11<<1) - - // nextHash = hash(load32(src, nextS), shift) - MOVW 0(R13), R11 - MULW R16, R11 - LSRW R5, R11, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVW 0(R7), R3 - MOVW (R6)(R15), R4 - CMPW R4, R3 - BNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVD R7, R3 - SUB R10, R3, R3 - CMP $16, R3 - BLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVW R3, R4 - SUBW $1, R4, R4 - - MOVW $60, R2 - CMPW R2, R4 - BLT inlineEmitLiteralOneByte - MOVW $256, R2 - CMPW R2, R4 - BLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVD $0xf4, R1 - MOVB R1, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVD $0xf0, R1 - MOVB R1, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADD R3, R8, R8 - MOVD R7, 80(RSP) - MOVD R8, 88(RSP) - MOVD R15, 120(RSP) - CALL runtime·memmove(SB) - MOVD 64(RSP), R5 - MOVD 72(RSP), R6 - MOVD 80(RSP), R7 - MOVD 88(RSP), R8 - MOVD 96(RSP), R9 - MOVD 120(RSP), R15 - ADD $128, RSP, R17 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - B inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB R3, R4 - SUBW $1, R4, R4 - AND $0xff, R4, R4 - LSLW $2, R4, R4 - MOVB R4, (R8) - ADD $1, R8, R8 - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R10), (R0, R1) - STP (R0, R1), 0(R8) - ADD R3, R8, R8 - -inner1: - // for { etc } - - // base := s - MOVD R7, R12 - - // !!! offset := base - candidate - MOVD R12, R11 - SUB R15, R11, R11 - SUB R6, R11, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVD src_len+32(FP), R14 - ADD R6, R14, R14 - - // !!! R13 = &src[len(src) - 8] - MOVD R14, R13 - SUB $8, R13, R13 - - // !!! R15 = &src[candidate + 4] - ADD $4, R15, R15 - ADD R6, R15, R15 - - // !!! s += 4 - ADD $4, R7, R7 - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI inlineExtendMatchCmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchBSF - ADD $8, R15, R15 - ADD $8, R7, R7 - B inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - B inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS inlineExtendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVD R7, R3 - SUB R12, R3, R3 - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - MOVW $68, R2 - CMPW R2, R3 - BLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $64, R3, R3 - B inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - MOVW $64, R2 - CMPW R2, R3 - BLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $60, R3, R3 - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - MOVW $12, R2 - CMPW R2, R3 - BGE inlineEmitCopyStep3 - MOVW $2048, R2 - CMPW R2, R11 - BGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $8, R11, R11 - LSLW $5, R11, R11 - SUBW $4, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - B inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBW $1, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVD R7, R10 - - // if s >= sLimit { goto emitRemainder } - MOVD R7, R3 - SUB R6, R3, R3 - CMP R3, R9 - BLS emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVD -1(R7), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // table[prevHash] = uint16(s-1) - MOVD R7, R3 - SUB R6, R3, R3 - SUB $1, R3, R3 - - MOVHU R3, 0(R17)(R11<<1) - - // currHash := hash(uint32(x>>8), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // candidate = int(table[currHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[currHash] = uint16(s) - ADD $1, R3, R3 - MOVHU R3, 0(R17)(R11<<1) - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVW (R6)(R15), R4 - CMPW R4, R14 - BEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // s++ - ADD $1, R7, R7 - - // break out of the inner1 for loop, i.e. continue the outer loop. - B outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVD src_len+32(FP), R3 - ADD R6, R3, R3 - CMP R3, R10 - BEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVD R8, 8(RSP) - MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD R10, 32(RSP) - SUB R10, R3, R3 - MOVD R3, 40(RSP) - MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVD R8, 88(RSP) - CALL ·emitLiteral(SB) - MOVD 88(RSP), R8 - - // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVD 56(RSP), R1 - ADD R1, R8, R8 - -encodeBlockEnd: - MOVD dst_base+0(FP), R3 - SUB R3, R8, R8 - MOVD R8, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_asm.go b/vendor/github.com/golang/snappy/encode_asm.go deleted file mode 100644 index 107c1e7141..0000000000 --- a/vendor/github.com/golang/snappy/encode_asm.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm -// +build amd64 arm64 - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go deleted file mode 100644 index 296d7f0beb..0000000000 --- a/vendor/github.com/golang/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!arm64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/google/flatbuffers/LICENSE.txt b/vendor/github.com/google/flatbuffers/LICENSE similarity index 100% rename from vendor/github.com/google/flatbuffers/LICENSE.txt rename to vendor/github.com/google/flatbuffers/LICENSE diff --git a/vendor/github.com/google/flatbuffers/go/builder.go b/vendor/github.com/google/flatbuffers/go/builder.go index 0e763d7ac1..5d90e8ef98 100644 --- a/vendor/github.com/google/flatbuffers/go/builder.go +++ b/vendor/github.com/google/flatbuffers/go/builder.go @@ -1,5 +1,7 @@ package flatbuffers +import "sort" + // Builder is a state machine for creating FlatBuffer objects. // Use a Builder to construct object(s) starting from leaf nodes. // @@ -22,6 +24,7 @@ type Builder struct { } const fileIdentifierLength = 4 +const sizePrefixLength = 4 // NewBuilder initializes a Builder of size `initial_size`. // The internal buffer is grown as needed. @@ -53,6 +56,12 @@ func (b *Builder) Reset() { b.vtable = b.vtable[:0] } + if b.sharedStrings != nil { + for key := range b.sharedStrings { + delete(b.sharedStrings, key) + } + } + b.head = UOffsetT(len(b.Bytes)) b.minalign = 1 b.nested = false @@ -308,6 +317,25 @@ func (b *Builder) EndVector(vectorNumElems int) UOffsetT { return b.Offset() } +// CreateVectorOfTables serializes slice of table offsets into a vector. +func (b *Builder) CreateVectorOfTables(offsets []UOffsetT) UOffsetT { + b.assertNotNested() + b.StartVector(4, len(offsets), 4) + for i := len(offsets) - 1; i >= 0; i-- { + b.PrependUOffsetT(offsets[i]) + } + return b.EndVector(len(offsets)) +} + +type KeyCompare func(o1, o2 UOffsetT, buf []byte) bool + +func (b *Builder) CreateVectorOfSortedTables(offsets []UOffsetT, keyCompare KeyCompare) UOffsetT { + sort.Slice(offsets, func(i, j int) bool { + return keyCompare(offsets[i], offsets[j], b.Bytes) + }) + return b.CreateVectorOfTables(offsets) +} + // CreateSharedString Checks if the string is already written // to the buffer before calling CreateString func (b *Builder) CreateSharedString(s string) UOffsetT { @@ -574,11 +602,53 @@ func (b *Builder) FinishWithFileIdentifier(rootTable UOffsetT, fid []byte) { b.Finish(rootTable) } +// FinishSizePrefixed finalizes a buffer, pointing to the given `rootTable`. +// The buffer is prefixed with the size of the buffer, excluding the size +// of the prefix itself. +func (b *Builder) FinishSizePrefixed(rootTable UOffsetT) { + b.finish(rootTable, true) +} + +// FinishSizePrefixedWithFileIdentifier finalizes a buffer, pointing to the given `rootTable` +// and applies a file identifier. The buffer is prefixed with the size of the buffer, +// excluding the size of the prefix itself. +func (b *Builder) FinishSizePrefixedWithFileIdentifier(rootTable UOffsetT, fid []byte) { + if fid == nil || len(fid) != fileIdentifierLength { + panic("incorrect file identifier length") + } + // In order to add a file identifier and size prefix to the flatbuffer message, + // we need to prepare an alignment, a size prefix length, and file identifier length + b.Prep(b.minalign, SizeInt32+fileIdentifierLength+sizePrefixLength) + for i := fileIdentifierLength - 1; i >= 0; i-- { + // place the file identifier + b.PlaceByte(fid[i]) + } + // finish + b.finish(rootTable, true) +} + // Finish finalizes a buffer, pointing to the given `rootTable`. func (b *Builder) Finish(rootTable UOffsetT) { + b.finish(rootTable, false) +} + +// finish finalizes a buffer, pointing to the given `rootTable` +// with an optional size prefix. +func (b *Builder) finish(rootTable UOffsetT, sizePrefix bool) { b.assertNotNested() - b.Prep(b.minalign, SizeUOffsetT) + + if sizePrefix { + b.Prep(b.minalign, SizeUOffsetT+sizePrefixLength) + } else { + b.Prep(b.minalign, SizeUOffsetT) + } + b.PrependUOffsetT(rootTable) + + if sizePrefix { + b.PlaceUint32(uint32(b.Offset())) + } + b.finished = true } diff --git a/vendor/github.com/google/flatbuffers/go/encode.go b/vendor/github.com/google/flatbuffers/go/encode.go index 72d4f3a152..a2a5798125 100644 --- a/vendor/github.com/google/flatbuffers/go/encode.go +++ b/vendor/github.com/google/flatbuffers/go/encode.go @@ -118,7 +118,7 @@ func GetFloat64(buf []byte) float64 { // GetUOffsetT decodes a little-endian UOffsetT from a byte slice. func GetUOffsetT(buf []byte) UOffsetT { - return UOffsetT(GetInt32(buf)) + return UOffsetT(GetUint32(buf)) } // GetSOffsetT decodes a little-endian SOffsetT from a byte slice. diff --git a/vendor/github.com/google/flatbuffers/go/lib.go b/vendor/github.com/google/flatbuffers/go/lib.go index adfce52efe..a4e99de101 100644 --- a/vendor/github.com/google/flatbuffers/go/lib.go +++ b/vendor/github.com/google/flatbuffers/go/lib.go @@ -11,3 +11,40 @@ func GetRootAs(buf []byte, offset UOffsetT, fb FlatBuffer) { n := GetUOffsetT(buf[offset:]) fb.Init(buf, n+offset) } + +// GetSizePrefixedRootAs is a generic helper to initialize a FlatBuffer with the provided size-prefixed buffer +// bytes and its data offset +func GetSizePrefixedRootAs(buf []byte, offset UOffsetT, fb FlatBuffer) { + n := GetUOffsetT(buf[offset+sizePrefixLength:]) + fb.Init(buf, n+offset+sizePrefixLength) +} + +// GetSizePrefix reads the size from a size-prefixed flatbuffer +func GetSizePrefix(buf []byte, offset UOffsetT) uint32 { + return GetUint32(buf[offset:]) +} + +// GetIndirectOffset retrives the relative offset in the provided buffer stored at `offset`. +func GetIndirectOffset(buf []byte, offset UOffsetT) UOffsetT { + return offset + GetUOffsetT(buf[offset:]) +} + +// GetBufferIdentifier returns the file identifier as string +func GetBufferIdentifier(buf []byte) string { + return string(buf[SizeUOffsetT:][:fileIdentifierLength]) +} + +// GetBufferIdentifier returns the file identifier as string for a size-prefixed buffer +func GetSizePrefixedBufferIdentifier(buf []byte) string { + return string(buf[SizeUOffsetT+sizePrefixLength:][:fileIdentifierLength]) +} + +// BufferHasIdentifier checks if the identifier in a buffer has the expected value +func BufferHasIdentifier(buf []byte, identifier string) bool { + return GetBufferIdentifier(buf) == identifier +} + +// BufferHasIdentifier checks if the identifier in a buffer has the expected value for a size-prefixed buffer +func SizePrefixedBufferHasIdentifier(buf []byte, identifier string) bool { + return GetSizePrefixedBufferIdentifier(buf) == identifier +} diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index a22953805c..4528059ca6 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -1,5 +1,5 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com +version: 2 + before: hooks: - ./gen.sh @@ -99,7 +99,7 @@ archives: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ .Tag }}-next" + version_template: "{{ .Tag }}-next" changelog: sort: asc filters: diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 05c7359e48..de264c85a5 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,27 @@ This package provides various compression algorithms. # changelog +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + * Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 @@ -81,7 +102,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -136,7 +157,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -339,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -518,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f7ea..0c7dd4ffef 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b25c..0f56b02d74 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/vendor/github.com/klauspost/compress/internal/race/norace.go b/vendor/github.com/klauspost/compress/internal/race/norace.go new file mode 100644 index 0000000000..affbbbb595 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/norace.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package race + +func ReadSlice[T any](s []T) { +} + +func WriteSlice[T any](s []T) { +} diff --git a/vendor/github.com/klauspost/compress/internal/race/race.go b/vendor/github.com/klauspost/compress/internal/race/race.go new file mode 100644 index 0000000000..f5e240dcde --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/race.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package race + +import ( + "runtime" + "unsafe" +) + +func ReadSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceReadRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} + +func WriteSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceWriteRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} diff --git a/vendor/github.com/klauspost/compress/s2/.gitignore b/vendor/github.com/klauspost/compress/s2/.gitignore new file mode 100644 index 0000000000..3a89c6e3e2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/.gitignore @@ -0,0 +1,15 @@ +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/klauspost/compress/s2/LICENSE similarity index 73% rename from vendor/github.com/gogo/protobuf/LICENSE rename to vendor/github.com/klauspost/compress/s2/LICENSE index f57de90da8..1d2d645bd9 100644 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ b/vendor/github.com/klauspost/compress/s2/LICENSE @@ -1,23 +1,17 @@ -Copyright (c) 2013, The GoGo Authors. All rights reserved. - -Protocol Buffers for Go with Gadgets - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -32,4 +26,3 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md new file mode 100644 index 0000000000..8284bb0810 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/README.md @@ -0,0 +1,1120 @@ +# S2 Compression + +S2 is an extension of [Snappy](https://github.com/google/snappy). + +S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads. + +Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy. +This means that S2 can seamlessly replace Snappy without converting compressed content. + +S2 can produce Snappy compatible output, faster and better than Snappy. +If you want full benefit of the changes you should use s2 without Snappy compatibility. + +S2 is designed to have high throughput on content that cannot be compressed. +This is important, so you don't have to worry about spending CPU cycles on already compressed data. + +## Benefits over Snappy + +* Better compression +* Adjustable compression (3 levels) +* Concurrent stream compression +* Faster decompression, even for Snappy compatible content +* Concurrent Snappy/S2 stream decompression +* Skip forward in compressed stream +* Random seeking with indexes +* Compatible with reading Snappy compressed content +* Smaller block size overhead on incompressible blocks +* Block concatenation +* Block Dictionary support +* Uncompressed stream mode +* Automatic stream size padding +* Snappy compatible block compression + +## Drawbacks over Snappy + +* Not optimized for 32 bit systems +* Streams use slightly more memory due to larger blocks and concurrency (configurable) + +# Usage + +Installation: `go get -u github.com/klauspost/compress/s2` + +Full package documentation: + +[![godoc][1]][2] + +[1]: https://godoc.org/github.com/klauspost/compress?status.svg +[2]: https://godoc.org/github.com/klauspost/compress/s2 + +## Compression + +```Go +func EncodeStream(src io.Reader, dst io.Writer) error { + enc := s2.NewWriter(dst) + _, err := io.Copy(enc, src) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete. + +For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method. + +The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2. +It is possible to flush any buffered data using the `Flush()` method. +This will block until all data sent to the encoder has been written to the output. + +S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader. + +As a final method to compress data, if you have a single block of data you would like to have encoded as a stream, +a slightly more efficient method is to use the `EncodeBuffer` method. +This will take ownership of the buffer until the stream is closed. + +```Go +func EncodeStream(src []byte, dst io.Writer) error { + enc := s2.NewWriter(dst) + // The encoder owns the buffer until Flush or Close is called. + err := enc.EncodeBuffer(buf) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +Each call to `EncodeBuffer` will result in discrete blocks being created without buffering, +so it should only be used a single time per stream. +If you need to write several blocks, you should use the regular io.Writer interface. + + +## Decompression + +```Go +func DecodeStream(src io.Reader, dst io.Writer) error { + dec := s2.NewReader(src) + _, err := io.Copy(dst, dec) + return err +} +``` + +Similar to the Writer, a Reader can be reused using the `Reset` method. + +For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available. +However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed. + +For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`. +Do however note that these functions (similar to Snappy) does not provide validation of data, +so data corruption may be undetected. Stream encoding provides CRC checks of data. + +It is possible to efficiently skip forward in a compressed stream using the `Skip()` method. +For big skips the decompressor is able to skip blocks without decompressing them. + +## Single Blocks + +Similar to Snappy S2 offers single block compression. +Blocks do not offer the same flexibility and safety as streams, +but may be preferable for very small payloads, less than 100K. + +Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result. +It is possible to provide a destination buffer. +If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used. +If not a new will be allocated. + +Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression. + +Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`. +Again an optional destination buffer can be supplied. +The `s2.DecodedLen(src)` can be used to get the minimum capacity needed. +If that is not satisfied a new buffer will be allocated. + +Block function always operate on a single goroutine since it should only be used for small payloads. + +# Commandline tools + +Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression. + +Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases). + +Installing then requires Go to be installed. To install them, use: + +`go install github.com/klauspost/compress/s2/cmd/s2c@latest && go install github.com/klauspost/compress/s2/cmd/s2d@latest` + +To build binaries to the current folder use: + +`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d` + + +## s2c + +``` +Usage: s2c [options] file1 file2 + +Compresses all files supplied as input separately. +Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'. +By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and compressed. +Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -blocksize string + Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -faster + Compress faster, but with a minor compression loss + -help + Display help + -index + Add seek index (default true) + -o string + Write output to another file. Single input file only + -pad string + Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -slower + Compress more, but a lot slower + -snappy + Generate Snappy compatible output stream + -verify + Verify written files + +``` + +## s2d + +``` +Usage: s2d [options] file1 file2 + +Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'. +Output file names have the extension removed. By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and decompressed. +Extensions on downloaded files are ignored. Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -c Write all output to stdout. Multiple input files will be concatenated + -help + Display help + -o string + Write output to another file. Single input file only + -offset string + Start at offset. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful decompression + -safe + Do not overwrite output files + -tail string + Return last of compressed file. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -verify + Verify files, but do not write output +``` + +## s2sx: self-extracting archives + +s2sx allows creating self-extracting archives with no dependencies. + +By default, executables are created for the same platforms as the host os, +but this can be overridden with `-os` and `-arch` parameters. + +Extracted files have 0666 permissions, except when untar option used. + +``` +Usage: s2sx [options] file1 file2 + +Compresses all files supplied as input separately. +If files have '.s2' extension they are assumed to be compressed already. +Output files are written as 'filename.s2sx' and with '.exe' for windows targets. +If output is big, an additional file with ".more" is written. This must be included as well. +By default output files will be overwritten. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +Options: + -arch string + Destination architecture (default "amd64") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -help + Display help + -max string + Maximum executable size. Rest will be written to another file. (default "1G") + -os string + Destination operating system (default "windows") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -untar + Untar on destination +``` + +Available platforms are: + + * darwin-amd64 + * darwin-arm64 + * linux-amd64 + * linux-arm + * linux-arm64 + * linux-mips64 + * linux-ppc64le + * windows-386 + * windows-amd64 + +By default, there is a size limit of 1GB for the output executable. + +When this is exceeded the remaining file content is written to a file called +output+`.more`. This file must be included for a successful extraction and +placed alongside the executable for a successful extraction. + +This file *must* have the same name as the executable, so if the executable is renamed, +so must the `.more` file. + +This functionality is disabled with stdin/stdout. + +### Self-extracting TAR files + +If you wrap a TAR file you can specify `-untar` to make it untar on the destination host. + +Files are extracted to the current folder with the path specified in the tar file. + +Note that tar files are not validated before they are wrapped. + +For security reasons files that move below the root folder are not allowed. + +# Performance + +This section will focus on comparisons to Snappy. +This package is solely aimed at replacing Snappy as a high speed compression package. +If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) +gives better compression, but typically at speeds slightly below "better" mode in this package. + +Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation. + +Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput. + +A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain. +The content compressed in this mode is fully compatible with the standard decoder. + +Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): + +| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | +|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% | +| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% | +| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% | +| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% | +| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - | +| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% | +| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% | +| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% | +| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - | +| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% | +| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - | +| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% | +| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% | +| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - | +| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% | +| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - | + +### Legend + +* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. +* `S2 Throughput`: Throughput of S2 in MB/s. +* `S2 % smaller`: How many percent of the Snappy output size is S2 better. +* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression. + +There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. + +Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size. + +The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. + +Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup. +This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above). + +## Decompression + +S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used. + +S2 vs Snappy **decompression** speed. Both operating on single core: + +| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy | +|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x | +| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x | +| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x | +| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x | +| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x | + +### Legend + +* `S2 Throughput`: Decompression speed of S2 encoded content. +* `Better Throughput`: Decompression speed of S2 "better" encoded content. +* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed. + + +While the decompression code hasn't changed, there is a significant speedup in decompression speed. +S2 prefers longer matches and will typically only find matches that are 6 bytes or longer. +While this reduces compression a bit, it improves decompression speed. + +The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy. + +Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: + +| File | S2 Throughput | S2 throughput | +|--------------------------------|---------------|---------------| +| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | +| 10gb.tar.s2 | 1.30x | 867.07 MB/s | +| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | +| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | +| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | +| enwik9.s2 | 1.67x | 681.53 MB/s | +| adresser.json.s2 | 3.41x | 4230.53 MB/s | +| silesia.tar.s2 | 1.52x | 811.58 | + +Even though S2 typically compresses better than Snappy, decompression speed is always better. + +### Concurrent Stream Decompression + +For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent) +that will decode a full stream using multiple goroutines. + +Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 `, best of 3: + +| Input | `-cpu=1` | `-cpu=2` | `-cpu=4` | `-cpu=8` | `-cpu=16` | +|-------------------------------------------|------------|------------|------------|------------|-------------| +| enwik10.snappy | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s | +| enwik10.s2 | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s | +| sofia-air-quality-dataset.tar.snappy | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s | +| sofia-air-quality-dataset.tar.s2 | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s | +| sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s | + +Scaling can be expected to be pretty linear until memory bandwidth is saturated. + +For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads. + +## Block compression + + +When compressing blocks no concurrent compression is performed just as Snappy. +This is because blocks are for smaller payloads and generally will not benefit from concurrent compression. + +An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input. +In rare, worst case scenario Snappy blocks could be significantly bigger than the input. + +### Mixed content blocks + +The most reliable is a wide dataset. +For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|------------|------------| +| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** | +| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 | +| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 | +| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 | +| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 | +| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 | + +S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". +"Better" mode provides the same compression speed as LZ4 with better compression ratio. + +When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression. + +As can be seen from the other benchmarks decompression should also be easier on the S2 generated output. + +Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for +other Go compressors: + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|-----------|--------| +| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 | +| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 | +| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 | +| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 | + +### Standard block compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above. + +Block compression. Parallel benchmark running on 16 cores, 16 goroutines. + +AMD64 assembly is use for both S2 and Snappy. + +| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | +|-----------------------|-------------|---------|--------------|-------------|-------------|-------------| +| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s | +| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s | +| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s | +| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s | +| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s | +| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s | +| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s | +| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s | +| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s | +| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s | +| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s | +| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s | +| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s | +| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s | +| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s | + + +Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. + +Decompression speed is better than Snappy, except in one case. + +Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline. + +Size is on average around Snappy, but varies on content type. +In cases where compression is worse, it usually is compensated by a speed boost. + + +### Better compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | +|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| +| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s | +| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s | +| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s | +| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s | +| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s | +| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s | +| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s | +| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s | +| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s | +| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s | +| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s | +| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s | +| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s | +| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s | +| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s | + + +Except for the mostly incompressible JPEG image compression is better and usually in the +double digits in terms of percentage reduction over Snappy. + +The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder +to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down. + +This mode aims to provide better compression at the expense of performance and achieves that +without a huge performance penalty, except on very small blocks. + +Decompression speed suffers a little compared to the regular S2 mode, +but still manages to be close to Snappy in spite of increased compression. + +# Best compression mode + +S2 offers a "best" compression mode. + +This will compress as much as possible with little regard to CPU usage. + +Mainly for offline compression, but where decompression speed should still +be high and compatible with other S2 compressed data. + +Some examples compared on 16 core CPU, amd64 assembly used: + +``` +* enwik10 +Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s +Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s +Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s + +* github-june-2days-2019.json +Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s +Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s +Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s + +* nyc-taxi-data-10M.csv +Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s +Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s +Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s + +* 10gb.tar +Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s +Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s +Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/ + +* consensus.db.10gb +Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s +Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s +Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s +``` + +Decompression speed should be around the same as using the 'better' compression mode. + +## Dictionaries + +*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for +neither encoding nor decoding. Performance improvements can be expected in the future.* + +Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. + +The same dictionary *must* be used for both encoding and decoding. +S2 does not keep track of whether the same dictionary is used, +and using the wrong dictionary will most often not result in an error when decompressing. + +Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary. +This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries +and treat the blocks similarly. + +Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression), +the same usage scenario applies to S2 dictionaries. + +> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file. + +S2 further limits the dictionary to only be enabled on the first 64KB of a block. +This will remove any negative (speed) impacts of the dictionaries on bigger blocks. + +### Compression + +Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst) +and a 64KB dictionary trained with zStandard the following sizes can be achieved. + +| | Default | Better | Best | +|--------------------|------------------|------------------|-----------------------| +| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) | +| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) | + +So for highly repetitive content, this case provides an almost 3x reduction in size. + +For less uniform data we will use the Go source code tree. +Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input: + +| | Default | Better | Best | +|--------------------|-------------------|-------------------|-------------------| +| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) | +| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) | +| Saving/file | 362 bytes | 428 bytes | 472 bytes | + + +### Creating Dictionaries + +There are no tools to create dictionaries in S2. +However, there are multiple ways to create a useful dictionary: + +#### Using a Sample File + +If your input is very uniform, you can just use a sample file as the dictionary. + +For example in the `github_users_sample_set` above, the average compression only goes up from +10.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary. + +```Go + // Read a sample + sample, err := os.ReadFile("sample.json") + + // Create a dictionary. + dict := s2.MakeDict(sample, nil) + + // b := dict.Bytes() will provide a dictionary that can be saved + // and reloaded with s2.NewDict(b). + + // To encode: + encoded := dict.Encode(nil, file) + + // To decode: + decoded, err := dict.Decode(nil, file) +``` + +#### Using Zstandard + +Zstandard dictionaries can easily be converted to S2 dictionaries. + +This can be helpful to generate dictionaries for files that don't have a fixed structure. + + +Example, with training set files placed in `./training-set`: + +`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict` + +This will create a dictionary of 64KB, that can be converted to a dictionary like this: + +```Go + // Decode the Zstandard dictionary. + insp, err := zstd.InspectDictionary(zdict) + if err != nil { + panic(err) + } + + // We are only interested in the contents. + // Assume that files start with "// Copyright (c) 2023". + // Search for the longest match for that. + // This may save a few bytes. + dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023")) + + // b := dict.Bytes() will provide a dictionary that can be saved + // and reloaded with s2.NewDict(b). + + // We can now encode using this dictionary + encodedWithDict := dict.Encode(nil, payload) + + // To decode content: + decoded, err := dict.Decode(nil, encodedWithDict) +``` + +It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary. + +This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized. + +Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files. +This can be omitted, at the expense of a few bytes. + +# Snappy Compatibility + +S2 now offers full compatibility with Snappy. + +This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output. + +There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by +simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`. +This uses "better" mode for all operations. +If you would like more control, you can use the s2 package as described below: + +## Blocks + +Snappy compatible blocks can be generated with the S2 encoder. +Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace + +| Snappy | S2 replacement | +|---------------------------|-----------------------| +| snappy.Encode(...) | s2.EncodeSnappy(...) | +| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | + +`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. + +`s2.ConcatBlocks` is compatible with snappy blocks. + +Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: + +| Encoder | Size | MB/s | Reduction | +|-----------------------|------------|------------|------------| +| snappy.Encode | 1128706759 | 725.59 | 71.89% | +| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | +| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | +| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** | + +## Streams + +For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`. +All other options are available, but note that block size limit is different for snappy. + +Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput: + +| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best | +|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------| +| nyc-taxi-data-10M.csv | 1316042016 - 539.47MB/s | 1307003093 - 10132.73MB/s | 1174534014 - 5002.44MB/s | 1115904679 - 177.97MB/s | +| enwik10 (xml) | 5088294643 - 451.13MB/s | 5175840939 - 9440.69MB/s | 4560784526 - 4487.21MB/s | 4340299103 - 158.92MB/s | +| 10gb.tar (mixed) | 6056946612 - 729.73MB/s | 6208571995 - 9978.05MB/s | 5741646126 - 4919.98MB/s | 5548973895 - 180.44MB/s | +| github-june-2days-2019.json | 1525176492 - 933.00MB/s | 1476519054 - 13150.12MB/s | 1400547532 - 5803.40MB/s | 1321887137 - 204.29MB/s | +| consensus.db.10gb (db) | 5412897703 - 1102.14MB/s | 5354073487 - 13562.91MB/s | 5335069899 - 5294.73MB/s | 5201000954 - 175.72MB/s | + +# Decompression + +All decompression functions map directly to equivalent s2 functions. + +| Snappy | S2 replacement | +|------------------------|--------------------| +| snappy.Decode(...) | s2.Decode(...) | +| snappy.DecodedLen(...) | s2.DecodedLen(...) | +| snappy.NewReader(...) | s2.NewReader(...) | + +Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip) +are also available for Snappy streams. + +If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize) +on your Reader will reduce memory consumption. + +# Concatenating blocks and streams. + +Concatenating streams will concatenate the output of both without recompressing them. +While this is inefficient in terms of compression it might be usable in certain scenarios. +The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement. + +Blocks can be concatenated using the `ConcatBlocks` function. + +Snappy blocks/streams can safely be concatenated with S2 blocks and streams. +Streams with indexes (see below) will currently not work on concatenated streams. + +# Stream Seek Index + +S2 and Snappy streams can have indexes. These indexes will allow random seeking within the compressed data. + +The index can either be appended to the stream as a skippable block or returned for separate storage. + +When the index is appended to a stream it will be skipped by regular decoders, +so the output remains compatible with other decoders. + +## Creating an Index + +To automatically add an index to a stream, add `WriterAddIndex()` option to your writer. +Then the index will be added to the stream when `Close()` is called. + +``` + // Add Index to stream... + enc := s2.NewWriter(w, s2.WriterAddIndex()) + io.Copy(enc, r) + enc.Close() +``` + +If you want to store the index separately, you can use `CloseIndex()` instead of the regular `Close()`. +This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`. + +``` + // Get index for separate storage... + enc := s2.NewWriter(w) + io.Copy(enc, r) + index, err := enc.CloseIndex() +``` + +The `index` can then be used needing to read from the stream. +This means the index can be used without needing to seek to the end of the stream +or for manually forwarding streams. See below. + +Finally, an existing S2/Snappy stream can be indexed using the `s2.IndexStream(r io.Reader)` function. + +## Using Indexes + +To use indexes there is a `ReadSeeker(random bool, index []byte) (*ReadSeeker, error)` function available. + +Calling ReadSeeker will return an [io.ReadSeeker](https://pkg.go.dev/io#ReadSeeker) compatible version of the reader. + +If 'random' is specified the returned io.Seeker can be used for random seeking, otherwise only forward seeking is supported. +Enabling random seeking requires the original input to support the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, nil) + rs.Seek(wantOffset, io.SeekStart) +``` + +Get a seeker to seek forward. Since no index is provided, the index is read from the stream. +This requires that an index was added and that `r` supports the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +A custom index can be specified which will be used if supplied. +When using a custom index, it will not be read from the input stream. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +This will read the index from `index`. Since we specify non-random (forward only) seeking `r` does not have to be an io.Seeker + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(true, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +Finally, since we specify that we want to do random seeking `r` must be an io.Seeker. + +The returned [ReadSeeker](https://pkg.go.dev/github.com/klauspost/compress/s2#ReadSeeker) contains a shallow reference to the existing Reader, +meaning changes performed to one is reflected in the other. + +To check if a stream contains an index at the end, the `(*Index).LoadStream(rs io.ReadSeeker) error` can be used. + +## Manually Forwarding Streams + +Indexes can also be read outside the decoder using the [Index](https://pkg.go.dev/github.com/klauspost/compress/s2#Index) type. +This can be used for parsing indexes, either separate or in streams. + +In some cases it may not be possible to serve a seekable stream. +This can for instance be an HTTP stream, where the Range request +is sent at the start of the stream. + +With a little bit of extra code it is still possible to use indexes +to forward to specific offset with a single forward skip. + +It is possible to load the index manually like this: +``` + var index s2.Index + _, err = index.Load(idxBytes) +``` + +This can be used to figure out how much to offset the compressed stream: + +``` + compressedOffset, uncompressedOffset, err := index.Find(wantOffset) +``` + +The `compressedOffset` is the number of bytes that should be skipped +from the beginning of the compressed file. + +The `uncompressedOffset` will then be offset of the uncompressed bytes returned +when decoding from that position. This will always be <= wantOffset. + +When creating a decoder it must be specified that it should *not* expect a stream identifier +at the beginning of the stream. Assuming the io.Reader `r` has been forwarded to `compressedOffset` +we create the decoder like this: + +``` + dec := s2.NewReader(r, s2.ReaderIgnoreStreamIdentifier()) +``` + +We are not completely done. We still need to forward the stream the uncompressed bytes we didn't want. +This is done using the regular "Skip" function: + +``` + err = dec.Skip(wantOffset - uncompressedOffset) +``` + +This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset. + +# Compact storage + +For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from +a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load). + +This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors. + +## Index Format: + +Each block is structured as a snappy skippable block, with the chunk ID 0x99. + +The block can be read from the front, but contains information so it can be read from the back as well. + +Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), +with un-encoded value length of 64 bits, unless other limits are specified. + +| Content | Format | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| +| ID, `[1]byte` | Always 0x99. | +| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | +| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | +| UncompressedSize, Varint | Total Uncompressed size. | +| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | +| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | +| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | +| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | +| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | +| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | +| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | +| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | + +For regular streams the uncompressed offsets are fully predictable, +so `HasUncompressedOffsets` allows to specify that compressed blocks all have +exactly `EstBlockSize` bytes of uncompressed content. + +Entries *must* be in order, starting with the lowest offset, +and there *must* be no uncompressed offset duplicates. +Entries *may* point to the start of a skippable block, +but it is then not allowed to also have an entry for the next block since +that would give an uncompressed offset duplicate. + +There is no requirement for all blocks to be represented in the index. +In fact there is a maximum of 65536 block entries in an index. + +The writer can use any method to reduce the number of entries. +An implicit block start at 0,0 can be assumed. + +### Decoding entries: + +``` +// Read Uncompressed entries. +// Each assumes EstBlockSize delta from previous. +for each entry { + uOff = 0 + if HasUncompressedOffsets == 1 { + uOff = ReadVarInt // Read value from stream + } + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].UncompressedOffset = uOff + continue + } + + // Uncompressed uses previous offset and adds EstBlockSize + entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff +} + + +// Guess that the first block will be 50% of uncompressed size. +// Integer truncating division must be used. +CompressGuess := EstBlockSize / 2 + +// Read Compressed entries. +// Each assumes CompressGuess delta from previous. +// CompressGuess is adjusted for each value. +for each entry { + cOff = ReadVarInt // Read value from stream + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].CompressedOffset = cOff + continue + } + + // Compressed uses previous and our estimate. + entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + cOff + + // Adjust compressed offset for next loop, integer truncating division must be used. + CompressGuess += cOff/2 +} +``` + +To decode from any given uncompressed offset `(wantOffset)`: + +* Iterate entries until `entry[n].UncompressedOffset > wantOffset`. +* Start decoding from `entry[n-1].CompressedOffset`. +* Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream. + +See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface. + + +# Format Extensions + +* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. +* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB). +* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset. + +Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0. + +The length is specified by reading the 3-bit length specified in the tag and decode using this table: + +| Length | Actual Length | +|--------|----------------------| +| 0 | 4 | +| 1 | 5 | +| 2 | 6 | +| 3 | 7 | +| 4 | 8 | +| 5 | 8 + read 1 byte | +| 6 | 260 + read 2 bytes | +| 7 | 65540 + read 3 bytes | + +This allows any repeat offset + length to be represented by 2 to 5 bytes. +It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies. + +Lengths are stored as little endian values. + +The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams. + +Default streaming block size is 1MB. + +# Dictionary Encoding + +Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. + +A dictionary provides an initial repeat value that can be used to point to a common header. + +Other than that the dictionary contains values that can be used as back-references. + +Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller. + +## Format + +Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes). + +Encoding: `[repeat value (uvarint)][dictionary content...]` + +Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset. +This value is an offset into the dictionary content and not a back-reference offset, +so setting this to 0 will make the repeat value point to the first value of the dictionary. + +The value must be less than the dictionary length-8 + +## Encoding + +From the decoder point of view the dictionary content is seen as preceding the encoded content. + +`[dictionary content][decoded output]` + +Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block. + +Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data. +However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed. + +The first match can be a repeat value, which will use the repeat offset stored in the dictionary. + +When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary, +neither by a copy nor repeat operations. +If the boundary is crossed while copying from the dictionary, the operation should complete, +but the next instruction is not allowed to reference the dictionary. + +Valid blocks encoded *without* a dictionary can be decoded with any dictionary. +There are no checks whether the supplied dictionary is the correct for a block. +Because of this there is no overhead by using a dictionary. + +## Example + +This is the dictionary content. Elements are separated by `[]`. + +Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`. + +Initial repeat offset is set at 10, which is the letter `2`. + +Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]` + +Decoded: `[10][ bananas w][hich][ were ][brown ][were added]` + +Output: `10 bananas which were brown were added` + + +## Streams + +For streams each block can use the dictionary. + +The dictionary cannot not currently be provided on the stream. + + +# LICENSE + +This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation. + +Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go new file mode 100644 index 0000000000..264ffd0a9b --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -0,0 +1,443 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "errors" + "fmt" + "strconv" + + "github.com/klauspost/compress/internal/race" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("s2: corrupt input") + // ErrCRC reports that the input failed CRC validation (streams only) + ErrCRC = errors.New("s2: corrupt input, crc mismatch") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("s2: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("s2: unsupported input") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= cap(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + + race.WriteSlice(dst) + race.ReadSlice(src[s:]) + + if s2Decode(dst, src[s:]) != 0 { + return nil, ErrCorrupt + } + return dst, nil +} + +// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func s2DecodeDict(dst, src []byte, dict *Dict) int { + if dict == nil { + return s2Decode(dst, src) + } + const debug = false + const debugErrs = debug + + if debug { + fmt.Println("Starting decode, dst len:", len(dst)) + } + var d, s, length int + offset := len(dict.dict) - dict.repeat + + // As long as we can read at least 5 bytes... + for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + x = uint32(src[s-1]) + case x == 61: + in := src[s : s+3] + x = uint32(in[1]) | uint32(in[2])<<8 + s += 3 + case x == 62: + in := src[s : s+4] + // Load as 32 bit and shift down. + x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x >>= 8 + s += 4 + case x == 63: + in := src[s : s+5] + x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 + s += 5 + } + length = int(x) + 1 + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debugErrs { + fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) + } + return decodeErrCodeCorrupt + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + length = int(src[s]) + 4 + s += 1 + case 6: + in := src[s : s+2] + length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) + s += 2 + case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) + s += 3 + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + in := src[s : s+3] + offset = int(uint32(in[1]) | uint32(in[2])<<8) + length = 1 + int(in[0])>>2 + s += 3 + + case tagCopy4: + in := src[s : s+5] + offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) + length = 1 + int(in[0])>>2 + s += 5 + } + + if offset <= 0 || length > len(dst)-d { + if debugErrs { + fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) + } + return decodeErrCodeCorrupt + } + + // copy from dict + if d < offset { + if d > MaxDictSrcOffset { + if debugErrs { + fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) + } + return decodeErrCodeCorrupt + } + startOff := len(dict.dict) - offset + d + if startOff < 0 || startOff+length > len(dict.dict) { + if debugErrs { + fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict)) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff) + } + copy(dst[d:d+length], dict.dict[startOff:]) + d += length + continue + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + // Remaining with extra checks... + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debugErrs { + fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || length > len(dst)-d { + if debugErrs { + fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) + } + return decodeErrCodeCorrupt + } + + // copy from dict + if d < offset { + if d > MaxDictSrcOffset { + if debugErrs { + fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) + } + return decodeErrCodeCorrupt + } + rOff := len(dict.dict) - (offset - d) + if debug { + fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff) + } + if rOff+length > len(dict.dict) { + if debugErrs { + fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length) + } + return decodeErrCodeCorrupt + } + if rOff < 0 { + if debugErrs { + fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length) + } + return decodeErrCodeCorrupt + } + copy(dst[d:d+length], dict.dict[rOff:]) + d += length + continue + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + if d != len(dst) { + if debugErrs { + fmt.Println("wanted length", len(dst), "got", d) + } + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/klauspost/compress/s2/decode_amd64.s similarity index 60% rename from vendor/github.com/golang/snappy/decode_amd64.s rename to vendor/github.com/klauspost/compress/s2/decode_amd64.s index e6179f65e3..9b105e03c5 100644 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ b/vendor/github.com/klauspost/compress/s2/decode_amd64.s @@ -1,4 +1,5 @@ // Copyright 2016 The Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,6 +9,21 @@ #include "textflag.h" +#define R_TMP0 AX +#define R_TMP1 BX +#define R_LEN CX +#define R_OFF DX +#define R_SRC SI +#define R_DST DI +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + // The asm code generally follows the pure Go code in decode_other.go, except // where marked with a "!!!". @@ -15,51 +31,52 @@ // // All local variables fit into registers. The non-zero stack size is only to // spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x (shared) +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy // -// The registers R8-R13 (marked with a "+") are set at the start of the +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the // function, and after a CALL returns, and are not otherwise modified. // -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $48-56 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DST + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SRC + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND + XORQ R_OFF, R_OFF loop: // for s < len(src) - CMPQ SI, R13 + CMPQ R_SRC, R_SEND JEQ end - // CX = uint32(src[s]) + // R_LEN = uint32(src[s]) // // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 + MOVBLZX (R_SRC), R_LEN + MOVL R_LEN, R_TMP1 + ANDL $3, R_TMP1 + CMPL R_TMP1, $1 JAE tagCopy // ---------------------------------------- @@ -68,35 +85,35 @@ loop: // case tagLiteral: // x := uint32(src[s] >> 2) // switch - SHRL $2, CX - CMPL CX, $60 + SHRL $2, R_LEN + CMPL R_LEN, $60 JAE tagLit60Plus // case x < 60: // s++ - INCQ SI + INCQ R_SRC doLit: // This is the end of the inner "switch", when we have a literal tag. // - // We assume that CX == x and x fits in a uint32, where x is the variable + // We assume that R_LEN == x and x fits in a uint32, where x is the variable // used in the pure Go decode_other.go code. // length = int(x) + 1 // // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX + // R_LEN can hold 64 bits, so the increment cannot overflow. + INCQ R_LEN // Prepare to check if copying length bytes will run past the end of dst or // src. // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVQ R_DEND, R_TMP0 + SUBQ R_DST, R_TMP0 + MOVQ R_SEND, R_TMP1 + SUBQ R_SRC, R_TMP1 // !!! Try a faster technique for short (16 or fewer bytes) copies. // @@ -109,11 +126,11 @@ doLit: // is contiguous in memory and so it needs to leave enough source bytes to // read the next tag without refilling buffers, but Go's Decode assumes // contiguousness (the src argument is a []byte). - CMPQ CX, $16 + CMPQ R_LEN, $16 JGT callMemmove - CMPQ AX, $16 + CMPQ R_TMP0, $16 JLT callMemmove - CMPQ BX, $16 + CMPQ R_TMP1, $16 JLT callMemmove // !!! Implement the copy from src to dst as a 16-byte load and store. @@ -127,53 +144,55 @@ doLit: // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or // 16-byte loads and stores. This technique probably wouldn't be as // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) + MOVOU 0(R_SRC), X0 + MOVOU X0, 0(R_DST) // d += length // s += length - ADDQ CX, DI - ADDQ CX, SI + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC JMP loop callMemmove: // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX + CMPQ R_LEN, R_TMP0 JGT errCorrupt - CMPQ CX, BX + CMPQ R_LEN, R_TMP1 JGT errCorrupt // copy(dst[d:], src[s:s+length]) // // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) + MOVQ R_DST, 0(SP) + MOVQ R_SRC, 8(SP) + MOVQ R_LEN, 16(SP) + MOVQ R_DST, 24(SP) + MOVQ R_SRC, 32(SP) + MOVQ R_LEN, 40(SP) + MOVQ R_OFF, 48(SP) CALL runtime·memmove(SB) // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 + // re-calculate R_DBASE-R_SEND. + MOVQ 24(SP), R_DST + MOVQ 32(SP), R_SRC + MOVQ 40(SP), R_LEN + MOVQ 48(SP), R_OFF + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND // d += length // s += length - ADDQ CX, DI - ADDQ CX, SI + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC JMP loop tagLit60Plus: @@ -182,44 +201,42 @@ tagLit60Plus: // s += x - 58; if uint(s) > uint(len(src)) { etc } // // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + ADDQ R_LEN, R_SRC + SUBQ $58, R_SRC + CMPQ R_SRC, R_SEND JA errCorrupt // case x == 60: - CMPL CX, $61 + CMPL R_LEN, $61 JEQ tagLit61 JA tagLit62Plus // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX + MOVBLZX -1(R_SRC), R_LEN JMP doLit tagLit61: // case x == 61: // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX + MOVWLZX -2(R_SRC), R_LEN JMP doLit tagLit62Plus: - CMPL CX, $62 + CMPL R_LEN, $62 JA tagLit63 // case x == 62: // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit + // We read one byte, safe to read one back, since we are just reading tag. + // x = binary.LittleEndian.Uint32(src[s-1:]) >> 8 + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + JMP doLit tagLit63: // case x == 63: // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX + MOVL -4(R_SRC), R_LEN JMP doLit // The code above handles literal tags. @@ -229,103 +246,161 @@ tagLit63: tagCopy4: // case tagCopy4: // s += 5 - ADDQ $5, SI + ADDQ $5, R_SRC // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + CMPQ R_SRC, R_SEND JA errCorrupt // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX + SHRQ $2, R_LEN + INCQ R_LEN // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX + MOVLQZX -4(R_SRC), R_OFF JMP doCopy tagCopy2: // case tagCopy2: // s += 3 - ADDQ $3, SI + ADDQ $3, R_SRC // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + CMPQ R_SRC, R_SEND JA errCorrupt // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX + SHRQ $2, R_LEN + INCQ R_LEN // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX + MOVWQZX -2(R_SRC), R_OFF JMP doCopy tagCopy: // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMPQ R_TMP1, $2 JEQ tagCopy2 JA tagCopy4 // case tagCopy1: // s += 2 - ADDQ $2, SI + ADDQ $2, R_SRC // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 + CMPQ R_SRC, R_SEND JA errCorrupt // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX + MOVBQZX -1(R_SRC), R_TMP1 + MOVQ R_LEN, R_TMP0 + SHRQ $2, R_LEN + ANDQ $0xe0, R_TMP0 + ANDQ $7, R_LEN + SHLQ $3, R_TMP0 + ADDQ $4, R_LEN + ORQ R_TMP1, R_TMP0 + + // check if repeat code, ZF set by ORQ. + JZ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (length) + MOVQ R_TMP0, R_OFF + JMP doCopy + +// This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMPQ R_LEN, $9 + JL doCopyRepeat + + // Read additional bytes for length. + JE repeatLen1 + + // Rare, so the extra branch shouldn't hurt too much. + CMPQ R_LEN, $10 + JE repeatLen2 + JMP repeatLen3 + +// Read repeat lengths. +repeatLen1: + // s ++ + ADDQ $1, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = src[s-1] + 8 + MOVBQZX -1(R_SRC), R_LEN + ADDL $8, R_LEN + JMP doCopyRepeat + +repeatLen2: + // s +=2 + ADDQ $2, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8) + MOVWQZX -2(R_SRC), R_LEN + ADDL $260, R_LEN + JMP doCopyRepeat + +repeatLen3: + // s +=3 + ADDQ $3, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16) + // Read one byte further back (just part of the tag, shifted out) + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + ADDL $65540, R_LEN + JMP doCopyRepeat doCopy: // This is the end of the outer "switch", when we have a copy tag. // // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX + MOVQ R_DST, R_TMP1 + SUBQ R_DBASE, R_TMP1 + CMPQ R_TMP1, R_OFF JLT errCorrupt + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + // if offset <= 0 { etc } + CMPQ R_OFF, $0 + JLE errCorrupt + // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX + MOVQ R_DEND, R_TMP1 + SUBQ R_DST, R_TMP1 + CMPQ R_LEN, R_TMP1 JGT errCorrupt // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length // // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVQ R_DEND, R_TMP2 + SUBQ R_DST, R_TMP2 + MOVQ R_DST, R_TMP3 + SUBQ R_OFF, R_TMP3 // !!! Try a faster technique for short (16 or fewer bytes) forward copies. // @@ -340,17 +415,17 @@ doCopy: // } // copy 16 bytes // d += length - CMPQ CX, $16 + CMPQ R_LEN, $16 JGT slowForwardCopy - CMPQ DX, $8 + CMPQ R_OFF, $8 JLT slowForwardCopy - CMPQ R14, $16 + CMPQ R_TMP2, $16 JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI + MOVQ 0(R_TMP3), R_TMP0 + MOVQ R_TMP0, 0(R_DST) + MOVQ 8(R_TMP3), R_TMP1 + MOVQ R_TMP1, 8(R_DST) + ADDQ R_LEN, R_DST JMP loop slowForwardCopy: @@ -402,10 +477,13 @@ slowForwardCopy: // if length > len(dst)-d-10 { // goto verySlowForwardCopy // } - SUBQ $10, R14 - CMPQ CX, R14 + SUBQ $10, R_TMP2 + CMPQ R_LEN, R_TMP2 JGT verySlowForwardCopy + // We want to keep the offset, so we use R_TMP2 from here. + MOVQ R_OFF, R_TMP2 + makeOffsetAtLeast8: // !!! As above, expand the pattern so that offset >= 8 and we can use // 8-byte load/stores. @@ -416,37 +494,37 @@ makeOffsetAtLeast8: // d += offset // offset += offset // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. + // // R_TMP3, is unchanged. // } - CMPQ DX, $8 + CMPQ R_TMP2, $8 JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_DST) + SUBQ R_TMP2, R_LEN + ADDQ R_TMP2, R_DST + ADDQ R_TMP2, R_TMP2 JMP makeOffsetAtLeast8 fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being + // !!! Add length (which might be negative now) to d (implied by R_DST being // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if // length is positive, copying the remaining length bytes will write to the // right place. - MOVQ DI, AX - ADDQ CX, DI + MOVQ R_DST, R_TMP0 + ADDQ R_LEN, R_DST finishSlowForwardCopy: // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative // length means that we overrun, but as above, that will be fixed up by // subsequent iterations of the outermost loop. - CMPQ CX, $0 + CMPQ R_LEN, $0 JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_TMP0) + ADDQ $8, R_TMP3 + ADDQ $8, R_TMP0 + SUBQ $8, R_LEN JMP finishSlowForwardCopy verySlowForwardCopy: @@ -462,11 +540,11 @@ verySlowForwardCopy: // break // } // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + INCQ R_TMP3 + INCQ R_DST + DECQ R_LEN JNZ verySlowForwardCopy JMP loop @@ -477,7 +555,7 @@ end: // This is the end of the "for s < len(src)". // // if d != len(dst) { etc } - CMPQ DI, R10 + CMPQ R_DST, R_DEND JNE errCorrupt // return 0 diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s similarity index 59% rename from vendor/github.com/golang/snappy/decode_arm64.s rename to vendor/github.com/klauspost/compress/s2/decode_arm64.s index 7a3ead17ea..78e463f342 100644 --- a/vendor/github.com/golang/snappy/decode_arm64.s +++ b/vendor/github.com/klauspost/compress/s2/decode_arm64.s @@ -8,6 +8,31 @@ #include "textflag.h" +#define R_TMP0 R2 +#define R_TMP1 R3 +#define R_LEN R4 +#define R_OFF R5 +#define R_SRC R6 +#define R_DST R7 +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + +// TEST_SRC will check if R_SRC is <= SRC_END +#define TEST_SRC() \ + CMP R_SEND, R_SRC \ + BGT errCorrupt + +// MOVD R_SRC, R_TMP1 +// SUB R_SBASE, R_TMP1, R_TMP1 +// CMP R_SLEN, R_TMP1 +// BGT errCorrupt + // The asm code generally follows the pure Go code in decode_other.go, except // where marked with a "!!!". @@ -15,52 +40,53 @@ // // All local variables fit into registers. The non-zero stack size is only to // spill registers and push args when issuing a CALL. The register allocation: -// - R2 scratch -// - R3 scratch -// - R4 length or x -// - R5 offset -// - R6 &src[s] -// - R7 &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy // -// The registers R8-R13 (marked with a "+") are set at the start of the +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the // function, and after a CALL returns, and are not otherwise modified. // -// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. -// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. -TEXT ·decode(SB), NOSPLIT, $56-56 - // Initialize R6, R7 and R8-R13. - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R7 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R6 - MOVD R11, R13 - ADD R12, R13, R13 +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $56-56 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DST + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SRC + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND + MOVD $0, R_OFF loop: // for s < len(src) - CMP R13, R6 + CMP R_SEND, R_SRC BEQ end - // R4 = uint32(src[s]) + // R_LEN = uint32(src[s]) // // switch src[s] & 0x03 - MOVBU (R6), R4 - MOVW R4, R3 - ANDW $3, R3 + MOVBU (R_SRC), R_LEN + MOVW R_LEN, R_TMP1 + ANDW $3, R_TMP1 MOVW $1, R1 - CMPW R1, R3 + CMPW R1, R_TMP1 BGE tagCopy // ---------------------------------------- @@ -70,35 +96,35 @@ loop: // x := uint32(src[s] >> 2) // switch MOVW $60, R1 - LSRW $2, R4, R4 - CMPW R4, R1 + LSRW $2, R_LEN, R_LEN + CMPW R_LEN, R1 BLS tagLit60Plus // case x < 60: // s++ - ADD $1, R6, R6 + ADD $1, R_SRC, R_SRC doLit: // This is the end of the inner "switch", when we have a literal tag. // - // We assume that R4 == x and x fits in a uint32, where x is the variable + // We assume that R_LEN == x and x fits in a uint32, where x is the variable // used in the pure Go decode_other.go code. // length = int(x) + 1 // // Unlike the pure Go code, we don't need to check if length <= 0 because - // R4 can hold 64 bits, so the increment cannot overflow. - ADD $1, R4, R4 + // R_LEN can hold 64 bits, so the increment cannot overflow. + ADD $1, R_LEN, R_LEN // Prepare to check if copying length bytes will run past the end of dst or // src. // - // R2 = len(dst) - d - // R3 = len(src) - s - MOVD R10, R2 - SUB R7, R2, R2 - MOVD R13, R3 - SUB R6, R3, R3 + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVD R_DEND, R_TMP0 + SUB R_DST, R_TMP0, R_TMP0 + MOVD R_SEND, R_TMP1 + SUB R_SRC, R_TMP1, R_TMP1 // !!! Try a faster technique for short (16 or fewer bytes) copies. // @@ -111,11 +137,11 @@ doLit: // is contiguous in memory and so it needs to leave enough source bytes to // read the next tag without refilling buffers, but Go's Decode assumes // contiguousness (the src argument is a []byte). - CMP $16, R4 + CMP $16, R_LEN BGT callMemmove - CMP $16, R2 + CMP $16, R_TMP0 BLT callMemmove - CMP $16, R3 + CMP $16, R_TMP1 BLT callMemmove // !!! Implement the copy from src to dst as a 16-byte load and store. @@ -129,53 +155,55 @@ doLit: // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or // 16-byte loads and stores. This technique probably wouldn't be as // effective on architectures that are fussier about alignment. - LDP 0(R6), (R14, R15) - STP (R14, R15), 0(R7) + LDP 0(R_SRC), (R_TMP2, R_TMP3) + STP (R_TMP2, R_TMP3), 0(R_DST) // d += length // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC B loop callMemmove: // if length > len(dst)-d || length > len(src)-s { etc } - CMP R2, R4 + CMP R_TMP0, R_LEN BGT errCorrupt - CMP R3, R4 + CMP R_TMP1, R_LEN BGT errCorrupt // copy(dst[d:], src[s:s+length]) // // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those // three registers to the stack, to save local variables across the CALL. - MOVD R7, 8(RSP) - MOVD R6, 16(RSP) - MOVD R4, 24(RSP) - MOVD R7, 32(RSP) - MOVD R6, 40(RSP) - MOVD R4, 48(RSP) + MOVD R_DST, 8(RSP) + MOVD R_SRC, 16(RSP) + MOVD R_LEN, 24(RSP) + MOVD R_DST, 32(RSP) + MOVD R_SRC, 40(RSP) + MOVD R_LEN, 48(RSP) + MOVD R_OFF, 56(RSP) CALL runtime·memmove(SB) // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVD 32(RSP), R7 - MOVD 40(RSP), R6 - MOVD 48(RSP), R4 - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R13 - ADD R12, R13, R13 + // re-calculate R_DBASE-R_SEND. + MOVD 32(RSP), R_DST + MOVD 40(RSP), R_SRC + MOVD 48(RSP), R_LEN + MOVD 56(RSP), R_OFF + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND // d += length // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC B loop tagLit60Plus: @@ -184,44 +212,41 @@ tagLit60Plus: // s += x - 58; if uint(s) > uint(len(src)) { etc } // // checks. In the asm version, we code it once instead of once per switch case. - ADD R4, R6, R6 - SUB $58, R6, R6 - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt + ADD R_LEN, R_SRC, R_SRC + SUB $58, R_SRC, R_SRC + TEST_SRC() // case x == 60: MOVW $61, R1 - CMPW R1, R4 + CMPW R1, R_LEN BEQ tagLit61 BGT tagLit62Plus // x = uint32(src[s-1]) - MOVBU -1(R6), R4 + MOVBU -1(R_SRC), R_LEN B doLit tagLit61: // case x == 61: // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVHU -2(R6), R4 + MOVHU -2(R_SRC), R_LEN B doLit tagLit62Plus: - CMPW $62, R4 + CMPW $62, R_LEN BHI tagLit63 // case x == 62: // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVHU -3(R6), R4 - MOVBU -1(R6), R3 - ORR R3<<16, R4 + MOVHU -3(R_SRC), R_LEN + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP1<<16, R_LEN B doLit tagLit63: // case x == 63: // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVWU -4(R6), R4 + MOVWU -4(R_SRC), R_LEN B doLit // The code above handles literal tags. @@ -231,103 +256,155 @@ tagLit63: tagCopy4: // case tagCopy4: // s += 5 - ADD $5, R6, R6 + ADD $5, R_SRC, R_SRC // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 + MOVD R_SRC, R_TMP1 + SUB R_SBASE, R_TMP1, R_TMP1 + CMP R_SLEN, R_TMP1 BGT errCorrupt // length = 1 + int(src[s-5])>>2 MOVD $1, R1 - ADD R4>>2, R1, R4 + ADD R_LEN>>2, R1, R_LEN // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVWU -4(R6), R5 + MOVWU -4(R_SRC), R_OFF B doCopy tagCopy2: // case tagCopy2: // s += 3 - ADD $3, R6, R6 + ADD $3, R_SRC, R_SRC // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt + TEST_SRC() // length = 1 + int(src[s-3])>>2 MOVD $1, R1 - ADD R4>>2, R1, R4 + ADD R_LEN>>2, R1, R_LEN // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVHU -2(R6), R5 + MOVHU -2(R_SRC), R_OFF B doCopy tagCopy: // We have a copy tag. We assume that: - // - R3 == src[s] & 0x03 - // - R4 == src[s] - CMP $2, R3 + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMP $2, R_TMP1 BEQ tagCopy2 BGT tagCopy4 // case tagCopy1: // s += 2 - ADD $2, R6, R6 + ADD $2, R_SRC, R_SRC // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt + TEST_SRC() // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVD R4, R5 - AND $0xe0, R5 - MOVBU -1(R6), R3 - ORR R5<<3, R3, R5 + // Calculate offset in R_TMP0 in case it is a repeat. + MOVD R_LEN, R_TMP0 + AND $0xe0, R_TMP0 + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP0<<3, R_TMP1, R_TMP0 // length = 4 + int(src[s-2])>>2&0x7 MOVD $7, R1 - AND R4>>2, R1, R4 - ADD $4, R4, R4 + AND R_LEN>>2, R1, R_LEN + ADD $4, R_LEN, R_LEN + + // check if repeat code with offset 0. + CMP $0, R_TMP0 + BEQ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (offset) + MOVD R_TMP0, R_OFF + B doCopy + + // This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMP $9, R_LEN + BLT doCopyRepeat + BEQ repeatLen1 + CMP $10, R_LEN + BEQ repeatLen2 + +repeatLen3: + // s +=3 + ADD $3, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540 + MOVBU -1(R_SRC), R_TMP0 + MOVHU -3(R_SRC), R_LEN + ORR R_TMP0<<16, R_LEN, R_LEN + ADD $65540, R_LEN, R_LEN + B doCopyRepeat + +repeatLen2: + // s +=2 + ADD $2, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260 + MOVHU -2(R_SRC), R_LEN + ADD $260, R_LEN, R_LEN + B doCopyRepeat + +repeatLen1: + // s +=1 + ADD $1, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = src[s-1] + 8 + MOVBU -1(R_SRC), R_LEN + ADD $8, R_LEN, R_LEN + B doCopyRepeat doCopy: // This is the end of the outer "switch", when we have a copy tag. // // We assume that: - // - R4 == length && R4 > 0 - // - R5 == offset - - // if offset <= 0 { etc } - MOVD $0, R1 - CMP R1, R5 - BLE errCorrupt + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset // if d < offset { etc } - MOVD R7, R3 - SUB R8, R3, R3 - CMP R5, R3 + MOVD R_DST, R_TMP1 + SUB R_DBASE, R_TMP1, R_TMP1 + CMP R_OFF, R_TMP1 BLT errCorrupt + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + + // if offset <= 0 { etc } + CMP $0, R_OFF + BLE errCorrupt + // if length > len(dst)-d { etc } - MOVD R10, R3 - SUB R7, R3, R3 - CMP R3, R4 + MOVD R_DEND, R_TMP1 + SUB R_DST, R_TMP1, R_TMP1 + CMP R_TMP1, R_LEN BGT errCorrupt // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length // // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVD R10, R14 - SUB R7, R14, R14 - MOVD R7, R15 - SUB R5, R15, R15 + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVD R_DEND, R_TMP2 + SUB R_DST, R_TMP2, R_TMP2 + MOVD R_DST, R_TMP3 + SUB R_OFF, R_TMP3, R_TMP3 // !!! Try a faster technique for short (16 or fewer bytes) forward copies. // @@ -342,17 +419,17 @@ doCopy: // } // copy 16 bytes // d += length - CMP $16, R4 + CMP $16, R_LEN BGT slowForwardCopy - CMP $8, R5 + CMP $8, R_OFF BLT slowForwardCopy - CMP $16, R14 + CMP $16, R_TMP2 BLT slowForwardCopy - MOVD 0(R15), R2 - MOVD R2, 0(R7) - MOVD 8(R15), R3 - MOVD R3, 8(R7) - ADD R4, R7, R7 + MOVD 0(R_TMP3), R_TMP0 + MOVD R_TMP0, 0(R_DST) + MOVD 8(R_TMP3), R_TMP1 + MOVD R_TMP1, 8(R_DST) + ADD R_LEN, R_DST, R_DST B loop slowForwardCopy: @@ -404,10 +481,13 @@ slowForwardCopy: // if length > len(dst)-d-10 { // goto verySlowForwardCopy // } - SUB $10, R14, R14 - CMP R14, R4 + SUB $10, R_TMP2, R_TMP2 + CMP R_TMP2, R_LEN BGT verySlowForwardCopy + // We want to keep the offset, so we use R_TMP2 from here. + MOVD R_OFF, R_TMP2 + makeOffsetAtLeast8: // !!! As above, expand the pattern so that offset >= 8 and we can use // 8-byte load/stores. @@ -418,38 +498,38 @@ makeOffsetAtLeast8: // d += offset // offset += offset // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. + // // R_TMP3, is unchanged. // } - CMP $8, R5 + CMP $8, R_TMP2 BGE fixUpSlowForwardCopy - MOVD (R15), R3 - MOVD R3, (R7) - SUB R5, R4, R4 - ADD R5, R7, R7 - ADD R5, R5, R5 + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_DST) + SUB R_TMP2, R_LEN, R_LEN + ADD R_TMP2, R_DST, R_DST + ADD R_TMP2, R_TMP2, R_TMP2 B makeOffsetAtLeast8 fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by R7 being + // !!! Add length (which might be negative now) to d (implied by R_DST being // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save R7 to R2 so that, if + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if // length is positive, copying the remaining length bytes will write to the // right place. - MOVD R7, R2 - ADD R4, R7, R7 + MOVD R_DST, R_TMP0 + ADD R_LEN, R_DST, R_DST finishSlowForwardCopy: // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative // length means that we overrun, but as above, that will be fixed up by // subsequent iterations of the outermost loop. MOVD $0, R1 - CMP R1, R4 + CMP R1, R_LEN BLE loop - MOVD (R15), R3 - MOVD R3, (R2) - ADD $8, R15, R15 - ADD $8, R2, R2 - SUB $8, R4, R4 + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_TMP0) + ADD $8, R_TMP3, R_TMP3 + ADD $8, R_TMP0, R_TMP0 + SUB $8, R_LEN, R_LEN B finishSlowForwardCopy verySlowForwardCopy: @@ -465,12 +545,12 @@ verySlowForwardCopy: // break // } // } - MOVB (R15), R3 - MOVB R3, (R7) - ADD $1, R15, R15 - ADD $1, R7, R7 - SUB $1, R4, R4 - CBNZ R4, verySlowForwardCopy + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + ADD $1, R_TMP3, R_TMP3 + ADD $1, R_DST, R_DST + SUB $1, R_LEN, R_LEN + CBNZ R_LEN, verySlowForwardCopy B loop // The code above handles copy tags. @@ -480,7 +560,7 @@ end: // This is the end of the "for s < len(src)". // // if d != len(dst) { etc } - CMP R10, R7 + CMP R_DEND, R_DST BNE errCorrupt // return 0 @@ -489,6 +569,6 @@ end: errCorrupt: // return decodeErrCodeCorrupt - MOVD $1, R2 - MOVD R2, ret+48(FP) + MOVD $1, R_TMP0 + MOVD R_TMP0, ret+48(FP) RET diff --git a/vendor/github.com/golang/snappy/decode_asm.go b/vendor/github.com/klauspost/compress/s2/decode_asm.go similarity index 66% rename from vendor/github.com/golang/snappy/decode_asm.go rename to vendor/github.com/klauspost/compress/s2/decode_asm.go index 7082b34919..cb3576edd4 100644 --- a/vendor/github.com/golang/snappy/decode_asm.go +++ b/vendor/github.com/klauspost/compress/s2/decode_asm.go @@ -1,15 +1,17 @@ // Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build (amd64 || arm64) && !appengine && gc && !noasm +// +build amd64 arm64 // +build !appengine // +build gc // +build !noasm -// +build amd64 arm64 -package snappy +package s2 // decode has the same semantics as in decode_other.go. // //go:noescape -func decode(dst, src []byte) int +func s2Decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go new file mode 100644 index 0000000000..2cb55c2c77 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_other.go @@ -0,0 +1,292 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !arm64) || appengine || !gc || noasm +// +build !amd64,!arm64 appengine !gc noasm + +package s2 + +import ( + "fmt" + "strconv" +) + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func s2Decode(dst, src []byte) int { + const debug = false + if debug { + fmt.Println("Starting decode, dst len:", len(dst)) + } + var d, s, length int + offset := 0 + + // As long as we can read at least 5 bytes... + for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + x = uint32(src[s-1]) + case x == 61: + in := src[s : s+3] + x = uint32(in[1]) | uint32(in[2])<<8 + s += 3 + case x == 62: + in := src[s : s+4] + // Load as 32 bit and shift down. + x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x >>= 8 + s += 4 + case x == 63: + in := src[s : s+5] + x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 + s += 5 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debug { + fmt.Println("corrupt: lit size", length) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + length = int(src[s]) + 4 + s += 1 + case 6: + in := src[s : s+2] + length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) + s += 2 + case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) + s += 3 + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + in := src[s : s+3] + offset = int(uint32(in[1]) | uint32(in[2])<<8) + length = 1 + int(in[0])>>2 + s += 3 + + case tagCopy4: + in := src[s : s+5] + offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) + length = 1 + int(in[0])>>2 + s += 5 + } + + if offset <= 0 || d < offset || length > len(dst)-d { + if debug { + fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) + } + + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + // Remaining with extra checks... + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debug { + fmt.Println("corrupt: lit size", length) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + if debug { + fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) + } + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/s2/dict.go b/vendor/github.com/klauspost/compress/s2/dict.go new file mode 100644 index 0000000000..f125ad0963 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/dict.go @@ -0,0 +1,350 @@ +// Copyright (c) 2022+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "sync" +) + +const ( + // MinDictSize is the minimum dictionary size when repeat has been read. + MinDictSize = 16 + + // MaxDictSize is the maximum dictionary size when repeat has been read. + MaxDictSize = 65536 + + // MaxDictSrcOffset is the maximum offset where a dictionary entry can start. + MaxDictSrcOffset = 65535 +) + +// Dict contains a dictionary that can be used for encoding and decoding s2 +type Dict struct { + dict []byte + repeat int // Repeat as index of dict + + fast, better, best sync.Once + fastTable *[1 << 14]uint16 + + betterTableShort *[1 << 14]uint16 + betterTableLong *[1 << 17]uint16 + + bestTableShort *[1 << 16]uint32 + bestTableLong *[1 << 19]uint32 +} + +// NewDict will read a dictionary. +// It will return nil if the dictionary is invalid. +func NewDict(dict []byte) *Dict { + if len(dict) == 0 { + return nil + } + var d Dict + // Repeat is the first value of the dict + r, n := binary.Uvarint(dict) + if n <= 0 { + return nil + } + dict = dict[n:] + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + if len(dict) < MinDictSize || len(dict) > MaxDictSize { + return nil + } + d.repeat = int(r) + if d.repeat > len(dict) { + return nil + } + return &d +} + +// Bytes will return a serialized version of the dictionary. +// The output can be sent to NewDict. +func (d *Dict) Bytes() []byte { + dst := make([]byte, binary.MaxVarintLen16+len(d.dict)) + return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...) +} + +// MakeDict will create a dictionary. +// 'data' must be at least MinDictSize. +// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used. +// If searchStart is set the start repeat value will be set to the last +// match of this content. +// If no matches are found, it will attempt to find shorter matches. +// This content should match the typical start of a block. +// If at least 4 bytes cannot be matched, repeat is set to start of block. +func MakeDict(data []byte, searchStart []byte) *Dict { + if len(data) == 0 { + return nil + } + if len(data) > MaxDictSize { + data = data[len(data)-MaxDictSize:] + } + var d Dict + dict := data + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + if len(dict) < MinDictSize { + return nil + } + + // Find the longest match possible, last entry if multiple. + for s := len(searchStart); s > 4; s-- { + if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 { + d.repeat = idx + break + } + } + + return &d +} + +// MakeDictManual will create a dictionary. +// 'data' must be at least MinDictSize and less than or equal to MaxDictSize. +// A manual first repeat index into data must be provided. +// It must be less than len(data)-8. +func MakeDictManual(data []byte, firstIdx uint16) *Dict { + if len(data) < MinDictSize || int(firstIdx) >= len(data)-8 || len(data) > MaxDictSize { + return nil + } + var d Dict + dict := data + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + + d.repeat = int(firstIdx) + return &d +} + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockDictGo(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// EncodeBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) EncodeBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockBetterDict(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// EncodeBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBest compresses as good as reasonably possible but with a +// big speed decrease. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) EncodeBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockBest(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func (d *Dict) Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= cap(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + if s2DecodeDict(dst, src[s:], d) != 0 { + return nil, ErrCorrupt + } + return dst, nil +} + +func (d *Dict) initFast() { + d.fast.Do(func() { + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint16 + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8-2; i += 3 { + x0 := load64(d.dict, i) + h0 := hash6(x0, tableBits) + h1 := hash6(x0>>8, tableBits) + h2 := hash6(x0>>16, tableBits) + table[h0] = uint16(i) + table[h1] = uint16(i + 1) + table[h2] = uint16(i + 2) + } + d.fastTable = &table + }) +} + +func (d *Dict) initBetter() { + d.better.Do(func() { + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint16 + var sTable [maxSTableSize]uint16 + + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8; i++ { + cv := load64(d.dict, i) + lTable[hash7(cv, lTableBits)] = uint16(i) + sTable[hash4(cv, sTableBits)] = uint16(i) + } + d.betterTableShort = &sTable + d.betterTableLong = &lTable + }) +} + +func (d *Dict) initBest() { + d.best.Do(func() { + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8; i++ { + cv := load64(d.dict, i) + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + lTable[hashL] = uint32(i) | candidateL<<16 + sTable[hashS] = uint32(i) | candidateS<<16 + } + d.bestTableShort = &sTable + d.bestTableLong = &lTable + }) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go new file mode 100644 index 0000000000..20b802270a --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode.go @@ -0,0 +1,414 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/internal/race" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlock(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +var estblockPool [2]sync.Pool + +// EstimateBlockSize will perform a very fast compression +// without outputting the result and return the compressed output size. +// The function returns -1 if no improvement could be achieved. +// Using actual compression will most often produce better compression than the estimate. +func EstimateBlockSize(src []byte) (d int) { + if len(src) <= inputMargin || int64(len(src)) > 0xffffffff { + return -1 + } + if len(src) <= 1024 { + const sz, pool = 2048, 0 + tmp, ok := estblockPool[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer estblockPool[pool].Put(tmp) + + d = calcBlockSizeSmall(src, tmp) + } else { + const sz, pool = 32768, 1 + tmp, ok := estblockPool[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer estblockPool[pool].Put(tmp) + + d = calcBlockSize(src, tmp) + } + + if d == 0 { + return -1 + } + // Size of the varint encoded block size. + d += (bits.Len64(uint64(len(src))) + 7) / 7 + + if d >= len(src) { + return -1 + } + return d +} + +// EncodeBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBetter(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBest compresses as good as reasonably possible but with a +// big speed decrease. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBest(dst[d:], src, nil) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappy returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappy(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBetterSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBestSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// ConcatBlocks will concatenate the supplied blocks and append them to the supplied destination. +// If the destination is nil or too small, a new will be allocated. +// The blocks are not validated, so garbage in = garbage out. +// dst may not overlap block data. +// Any data in dst is preserved as is, so it will not be considered a block. +func ConcatBlocks(dst []byte, blocks ...[]byte) ([]byte, error) { + totalSize := uint64(0) + compSize := 0 + for _, b := range blocks { + l, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + totalSize += uint64(l) + compSize += len(b) - hdr + } + if totalSize == 0 { + dst = append(dst, 0) + return dst, nil + } + if totalSize > math.MaxUint32 { + return nil, ErrTooLarge + } + var tmp [binary.MaxVarintLen32]byte + hdrSize := binary.PutUvarint(tmp[:], totalSize) + wantSize := hdrSize + compSize + + if cap(dst)-len(dst) < wantSize { + dst = append(make([]byte, 0, wantSize+len(dst)), dst...) + } + dst = append(dst, tmp[:hdrSize]...) + for _, b := range blocks { + _, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + dst = append(dst, b[hdr:]...) + } + return dst, nil +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 8 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// will be accepted by the encoder. +const minNonLiteralBlockSize = 32 + +const intReduction = 2 - (1 << (^uint(0) >> 63)) // 1 (32 bits) or 0 (64 bits) + +// MaxBlockSize is the maximum value where MaxEncodedLen will return a valid block size. +// Blocks this big are highly discouraged, though. +// Half the size on 32 bit systems. +const MaxBlockSize = (1<<(32-intReduction) - 1) - binary.MaxVarintLen32 - 5 + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +// 32 bit platforms will have lower thresholds for rejecting big content. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if intReduction == 1 { + // 32 bits + if n > math.MaxInt32 { + // Also includes negative. + return -1 + } + } else if n > 0xffffffff { + // 64 bits + // Also includes negative. + return -1 + } + // Size of the varint encoded block size. + n = n + uint64((bits.Len64(n)+7)/7) + + // Add maximum size of encoding block as literals. + n += uint64(literalExtraSize(int64(srcLen))) + if intReduction == 1 { + // 32 bits + if n > math.MaxInt32 { + return -1 + } + } else if n > 0xffffffff { + // 64 bits + // Also includes negative. + return -1 + } + return int(n) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go new file mode 100644 index 0000000000..9977045696 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -0,0 +1,1068 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/bits" +) + +func load32(b []byte, i int) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load64(b []byte, i int) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + const prime6bytes = 227718039650203 + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +func encodeGo(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockGo(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + + debug = false + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +func encodeBlockSnappyGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + maxAhead = 8 // maximum bytes ahead without checking sLimit + + debug = false + ) + dict.initFast() + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if sLimit > MaxDictSrcOffset-maxAhead { + sLimit = MaxDictSrcOffset - maxAhead + } + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form can start with a dict entry (copy or repeat). + s := 0 + + // Convert dict repeat to offset + repeat := len(dict.dict) - dict.repeat + cv := load64(src, 0) + + // While in dict +searchDict: + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + if nextS > sLimit { + if debug { + fmt.Println("slimit reached", s, nextS) + } + break searchDict + } + candidateDict := int(dict.fastTable[hash0]) + candidateDict2 := int(dict.fastTable[hash1]) + candidate2 := int(table[hash1]) + candidate := int(table[hash0]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + + if repeat > s { + candidate := len(dict.dict) - repeat + s + if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) { + // Extend back + base := s + for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { + i-- + base-- + } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + s += 4 + candidate += 4 + for candidate < len(dict.dict)-8 && s <= len(src)-8 { + if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + d += emitRepeat(dst[d:], repeat, s-base) + if debug { + fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + cv = load64(src, s) + continue + } + } else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + + nextEmit = s + if s >= sLimit { + break searchDict + } + if debug { + fmt.Println("emitted reg repeat", s-base, "s:", s) + } + cv = load64(src, s) + continue searchDict + } + if s == 0 { + cv = load64(src, nextS) + s = nextS + continue searchDict + } + // Start with table. These matches will always be closer. + if uint32(cv) == load32(src, candidate) { + goto emitMatch + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + goto emitMatch + } + + // Check dict. Dicts have longer offsets, so we want longer matches. + if cv == load64(dict.dict, candidateDict) { + table[hash2] = uint32(s + 2) + goto emitDict + } + + candidateDict = int(dict.fastTable[hash2]) + // Check if upper 7 bytes match + if candidateDict2 >= 1 { + if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) { + table[hash2] = uint32(s + 2) + candidateDict = candidateDict2 + s++ + goto emitDict + } + } + + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + goto emitMatch + } + if candidateDict >= 2 { + // Check if upper 6 bytes match + if cv^load64(dict.dict, candidateDict-2) < (1 << 16) { + s += 2 + goto emitDict + } + } + + cv = load64(src, nextS) + s = nextS + continue searchDict + + emitDict: + { + if debug { + if load32(dict.dict, candidateDict) != load32(src, s) { + panic("dict emit mismatch") + } + } + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] { + candidateDict-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = s + (len(dict.dict)) - candidateDict + + // Extend the 4-byte match as long as possible. + s += 4 + candidateDict += 4 + for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 { + if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateDict += 8 + } + + // Matches longer than 64 are split. + if s <= sLimit || s-base < 8 { + d += emitCopy(dst[d:], repeat, s-base) + } else { + // Split to ensure we don't start a copy within next block + d += emitCopy(dst[d:], repeat, 4) + d += emitRepeat(dst[d:], repeat, s-base-4) + } + if false { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := dict.dict[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index and continue loop to try new candidate. + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>8, tableBits) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s - 1) + cv = load64(src, s) + } + continue + } + emitMatch: + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + + // Search without dict: + if repeat > s { + repeat = 0 + } + + // No more dict + sLimit = len(src) - inputMargin + if s >= sLimit { + goto emitRemainder + } + if debug { + fmt.Println("non-dict matching at", s, "repeat:", repeat) + } + cv = load64(src, s) + if debug { + fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) + } + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + // Bail if we exceed the maximum size. + if d+(base-nextEmit) > dstLimit { + return 0 + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + if debug { + fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + if debug && nextEmit != s { + fmt.Println("emitted ", len(src)-nextEmit, "literals") + } + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go new file mode 100644 index 0000000000..7aadd255fe --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -0,0 +1,317 @@ +//go:build !appengine && !noasm && gc +// +build !appengine,!noasm,gc + +package s2 + +import ( + "sync" + + "github.com/klauspost/compress/internal/race" +) + +const hasAmd64Asm = true + +var encPools [4]sync.Pool + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) >= 4<<20 { + const sz, pool = 65536, 0 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeBlockAsm(dst, src, tmp) + } + if len(src) >= limit12B { + const sz, pool = 65536, 0 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeBlockAsm4MB(dst, src, tmp) + } + if len(src) >= limit10B { + const sz, pool = 16384, 1 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeBlockAsm12B(dst, src, tmp) + } + if len(src) >= limit8B { + const sz, pool = 4096, 2 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeBlockAsm10B(dst, src, tmp) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + const sz, pool = 1024, 3 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeBlockAsm8B(dst, src, tmp) +} + +var encBetterPools [5]sync.Pool + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetter(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) > 4<<20 { + const sz, pool = 589824, 0 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeBetterBlockAsm(dst, src, tmp) + } + if len(src) >= limit12B { + const sz, pool = 589824, 0 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + + return encodeBetterBlockAsm4MB(dst, src, tmp) + } + if len(src) >= limit10B { + const sz, pool = 81920, 0 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + + return encodeBetterBlockAsm12B(dst, src, tmp) + } + if len(src) >= limit8B { + const sz, pool = 20480, 1 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeBetterBlockAsm10B(dst, src, tmp) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + + const sz, pool = 5120, 2 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeBetterBlockAsm8B(dst, src, tmp) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) > 65536 { + const sz, pool = 65536, 0 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeSnappyBlockAsm(dst, src, tmp) + } + if len(src) >= limit12B { + const sz, pool = 65536, 0 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeSnappyBlockAsm64K(dst, src, tmp) + } + if len(src) >= limit10B { + const sz, pool = 16384, 1 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeSnappyBlockAsm12B(dst, src, tmp) + } + if len(src) >= limit8B { + const sz, pool = 4096, 2 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeSnappyBlockAsm10B(dst, src, tmp) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + const sz, pool = 1024, 3 + tmp, ok := encPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encPools[pool].Put(tmp) + return encodeSnappyBlockAsm8B(dst, src, tmp) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) > 65536 { + const sz, pool = 589824, 0 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeSnappyBetterBlockAsm(dst, src, tmp) + } + + if len(src) >= limit12B { + const sz, pool = 294912, 4 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + + return encodeSnappyBetterBlockAsm64K(dst, src, tmp) + } + if len(src) >= limit10B { + const sz, pool = 81920, 0 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + + return encodeSnappyBetterBlockAsm12B(dst, src, tmp) + } + if len(src) >= limit8B { + const sz, pool = 20480, 1 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeSnappyBetterBlockAsm10B(dst, src, tmp) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + + const sz, pool = 5120, 2 + tmp, ok := encBetterPools[pool].Get().(*[sz]byte) + if !ok { + tmp = &[sz]byte{} + } + race.WriteSlice(tmp[:]) + defer encBetterPools[pool].Put(tmp) + return encodeSnappyBetterBlockAsm8B(dst, src, tmp) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go new file mode 100644 index 0000000000..47bac74234 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_best.go @@ -0,0 +1,796 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "fmt" + "math" + "math/bits" +) + +// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBest(dst, src []byte, dict *Dict) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + + debug = false + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + sLimitDict := len(src) - inputMargin + if sLimitDict > MaxDictSrcOffset-inputMargin { + sLimitDict = MaxDictSrcOffset - inputMargin + } + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + repeat := 1 + if dict != nil { + dict.initBest() + s = 0 + repeat = len(dict.dict) - dict.repeat + } + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + rep, dict bool + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + if dict != nil && s >= MaxDictSrcOffset { + dict = nil + if repeat > s { + repeat = math.MinInt32 + } + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + if m.rep { + return score - emitRepeatSize(offset, m.length) + } + return score - emitCopySize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32, rep bool) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset, rep: rep} + s += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + matchDict := func(candidate, s int, first uint32, rep bool) match { + if s >= MaxDictSrcOffset { + return match{offset: candidate, s: s} + } + // Calculate offset as if in continuous array with s + offset := -len(dict.dict) + candidate + if best.length != 0 && best.s-best.offset == s-offset && !rep { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + + if load32(dict.dict, candidate) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true} + s += 4 + if !rep { + for s < sLimitDict && m.length < len(dict.dict) { + if len(src)-s < 8 || len(dict.dict)-m.length < 8 { + if src[s] == dict.dict[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + } else { + for s < len(src) && m.length < len(dict.dict) { + if len(src)-s < 8 || len(dict.dict)-m.length < 8 { + if src[s] == dict.dict[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + } + m.length -= candidate + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + if s > 0 { + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false)) + } + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false)) + } + { + if (dict == nil || repeat <= s) && repeat > 0 { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) + } else if s-repeat < -4 && dict != nil { + candidate := len(dict.dict) - (repeat - s) + best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) + candidate++ + best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true)) + } + + if best.length > 0 { + hashS := hash4(cv>>8, sTableBits) + // s+1 + nextShort := sTable[hashS] + s := s + 1 + cv := load64(src, s) + hashL := hash8(cv, lTableBits) + nextLong := lTable[hashL] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + + // Dict at + 1 + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + } + + // s+2 + if true { + hashS := hash4(cv>>8, sTableBits) + + nextShort = sTable[hashS] + s++ + cv = load64(src, s) + hashL := hash8(cv, lTableBits) + nextLong = lTable[hashL] + + if (dict == nil || repeat <= s) && repeat > 0 { + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true)) + } else if repeat-s > 4 && dict != nil { + candidate := len(dict.dict) - (repeat - s) + best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) + } + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + + // Dict at +2 + // Very small gain + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + } + } + // Search for a match at best match end, see if that is better. + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 1-2 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + const skipEnd = 1 + if sAt := best.s + best.length - skipEnd; sAt < sLimit { + + sBack := best.s + skipBeginning - skipEnd + backL := best.length - skipBeginning + // Load initial values + cv = load64(src, sBack) + + // Grab candidates... + next := lTable[hash8(load64(src, sAt), lTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + // Disabled: Extremely small gain + if false { + next = sTable[hash4(load64(src, sAt), sTableBits)] + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if !best.rep && !best.dict { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + s += best.length + + if offset > 65535 && s-base <= 5 && !best.rep { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + if debug && nextEmit != base { + fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base) + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if best.rep { + if nextEmit > 0 || best.dict { + if debug { + fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], offset, best.length) + } else { + // First match without dict cannot be a repeat. + if debug { + fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + d += emitCopy(dst[d:], offset, best.length) + } + } else { + if debug { + fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + d += emitCopy(dst[d:], offset, best.length) + } + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + if debug && nextEmit != s { + fmt.Println("emitted ", len(src)-nextEmit, "literals") + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBestSnappy(dst, src []byte) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + + return score - emitCopyNoRepeatSize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset} + s += 4 + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv))) + + { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + if best.length > 0 { + // s+1 + nextShort := sTable[hash4(cv>>8, sTableBits)] + s := s + 1 + cv := load64(src, s) + nextLong := lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + + // s+2 + if true { + nextShort = sTable[hash4(cv>>8, sTableBits)] + s++ + cv = load64(src, s) + nextLong = lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + } + // Search for a match at best match end, see if that is better. + if sAt := best.s + best.length; sAt < sLimit { + sBack := best.s + backL := best.length + // Load initial values + cv = load64(src, sBack) + // Search for mismatch + next := lTable[hash8(load64(src, sAt), lTableBits)] + //next := sTable[hash4(load64(src, sAt), sTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if true { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + + s += best.length + + if offset > 65535 && s-base <= 5 { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, best.length) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// emitCopySize returns the size to encode the offset+length +// +// It assumes that: +// +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopySize(offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeatSize(offset, length) + } + i = 5 + } + if length == 0 { + return i + } + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + if offset < 2048 { + // Emit 8 bytes, then rest as repeats... + return 2 + emitRepeatSize(offset, length-8) + } + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitRepeatSize(offset, length-60) + } + if length >= 12 || offset >= 2048 { + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + return 2 +} + +// emitCopyNoRepeatSize returns the size to encode the offset+length +// +// It assumes that: +// +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopyNoRepeatSize(offset, length int) int { + if offset >= 65536 { + return 5 + 5*(length/64) + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + 3*(length/60) + } + if length >= 12 || offset >= 2048 { + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + return 2 +} + +// emitRepeatSize returns the number of bytes required to encode a repeat. +// Length must be at least 4 and < 1<<24 +func emitRepeatSize(offset, length int) int { + // Repeat offset, make length cheaper + if length <= 4+4 || (length < 8+4 && offset < 2048) { + return 2 + } + if length < (1<<8)+4+4 { + return 3 + } + if length < (1<<16)+(1<<8)+4 { + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= (1 << 16) - 4 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + } + if left > 0 { + return 5 + emitRepeatSize(offset, left) + } + return 5 +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go new file mode 100644 index 0000000000..544cb1e17b --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_better.go @@ -0,0 +1,1106 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "fmt" + "math/bits" +) + +// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint64, h uint8) uint32 { + const prime4bytes = 2654435761 + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + const prime5bytes = 889523592379 + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + const prime7bytes = 58295818150454627 + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + const prime8bytes = 0xcf1bbcdcb7a56463 + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + + // Check repeat at offset checkRep. + const checkRep = 1 + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + + cv = load64(src, s) + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + break + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + // lTable could be postponed, but very minor difference. + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 16 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + const maxSkip = 100 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = (s-nextEmit)>>7 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + if uint32(cv) == load32(src, candidateL) { + break + } + + // Check our short candidate + if uint32(cv) == load32(src, candidateS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, s-base) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + + maxAhead = 8 // maximum bytes ahead without checking sLimit + + debug = false + ) + + sLimit := len(src) - inputMargin + if sLimit > MaxDictSrcOffset-maxAhead { + sLimit = MaxDictSrcOffset - maxAhead + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + + dict.initBetter() + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 0 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := len(dict.dict) - dict.repeat + + // While in dict +searchDict: + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + break searchDict + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + dictL := int(dict.betterTableLong[hashL]) + dictS := int(dict.betterTableShort[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if s != 0 { + if cv == valLong { + goto emitMatch + } + if cv == valShort { + candidateL = candidateS + goto emitMatch + } + } + + // Check dict repeat. + if repeat >= s+4 { + candidate := len(dict.dict) - repeat + s + if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) { + // Extend back + base := s + for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + s += 4 + candidate += 4 + for candidate < len(dict.dict)-8 && s <= len(src)-8 { + if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + d += emitRepeat(dst[d:], repeat, s-base) + if debug { + fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + cv = load64(src, s) + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + continue + } + } + // Don't try to find match at s==0 + if s == 0 { + cv = load64(src, nextS) + s = nextS + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + goto emitMatch + } + + // Long dict... + if uint32(cv) == load32(dict.dict, dictL) { + candidateL = dictL + goto emitDict + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + goto emitMatch + } + // Use our short candidate. + candidateL = candidateS + goto emitMatch + } + if uint32(cv) == load32(dict.dict, dictS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + goto emitMatch + } + candidateL = dictS + goto emitDict + } + cv = load64(src, nextS) + s = nextS + } + emitDict: + { + if debug { + if load32(dict.dict, candidateL) != load32(src, s) { + panic("dict emit mismatch") + } + } + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + offset := s + (len(dict.dict)) - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 { + if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if repeat == offset { + if debug { + fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) + } + d += emitRepeat(dst[d:], offset, s-base) + } else { + if debug { + fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) + } + // Matches longer than 64 are split. + if s <= sLimit || s-base < 8 { + d += emitCopy(dst[d:], offset, s-base) + } else { + // Split to ensure we don't start a copy within next block. + d += emitCopy(dst[d:], offset, 4) + d += emitRepeat(dst[d:], offset, s-base-4) + } + repeat = offset + } + if false { + // Validate match. + if s <= candidateL { + panic("s <= candidate") + } + a := src[base:s] + b := dict.dict[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } + } + continue + } + emitMatch: + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + if repeat == offset { + if debug { + fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s) + } + d += emitRepeat(dst[d:], offset, s-base) + } else { + if debug { + fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s) + } + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + + // Search without dict: + if repeat > s { + repeat = 0 + } + + // No more dict + sLimit = len(src) - inputMargin + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + if debug { + fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) + } + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + + // Check repeat at offset checkRep. + const checkRep = 1 + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + + cv = load64(src, s) + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + break + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go new file mode 100644 index 0000000000..dd1c973ca5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_go.go @@ -0,0 +1,729 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package s2 + +import ( + "bytes" + "math/bits" +) + +const hasAmd64Asm = false + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlock(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetter(dst, src []byte) (d int) { + return encodeBlockBetterGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + return encodeBlockBetterSnappyGo(dst, src) +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockSnappy(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockSnappyGo(dst, src) +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteral(dst, lit []byte) int { + if len(lit) == 0 { + return 0 + } + const num = 63<<2 | tagLiteral + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[1] = uint8(n) + dst[0] = 60<<2 | tagLiteral + i = 2 + case n < 1<<16: + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 61<<2 | tagLiteral + i = 3 + case n < 1<<24: + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 62<<2 | tagLiteral + i = 4 + default: + dst[4] = uint8(n >> 24) + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 63<<2 | tagLiteral + i = 5 + } + return i + copy(dst[i:], lit) +} + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<24 +func emitRepeat(dst []byte, offset, length int) int { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + return 2 + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + return 2 + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + return 3 + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + return 5 + emitRepeat(dst[5:], offset, left) + } + return 5 +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopy(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + return off + emitRepeat(dst[off:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopyNoRepeat(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitCopyNoRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitCopyNoRepeat(dst[3:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) +func matchLen(a []byte, b []byte) int { + b = b[:len(a)] + var checked int + if len(a) > 4 { + // Try 4 bytes first + if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { + return bits.TrailingZeros32(diff) >> 3 + } + // Switch to 8 byte matching. + checked = 4 + a = a[4:] + b = b[4:] + for len(a) >= 8 { + b = b[:len(a)] + if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) + } + checked += 8 + a = a[8:] + b = b[8:] + } + } + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + return int(i) + checked + } + } + return len(a) + checked +} + +// input must be > inputMargin +func calcBlockSize(src []byte, _ *[32768]byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 13 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteralSize(src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteralSize(src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteralSize(src[nextEmit:]) + } + return d +} + +// length must be > inputMargin. +func calcBlockSizeSmall(src []byte, _ *[2048]byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 9 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteralSize(src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteralSize(src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteralSize(src[nextEmit:]) + } + return d +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteralSize(lit []byte) int { + if len(lit) == 0 { + return 0 + } + switch { + case len(lit) <= 60: + return len(lit) + 1 + case len(lit) <= 1<<8: + return len(lit) + 2 + case len(lit) <= 1<<16: + return len(lit) + 3 + case len(lit) <= 1<<24: + return len(lit) + 4 + default: + return len(lit) + 5 + } +} + +func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4BlockAsm should be unreachable") +} + +func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4BlockSnappyAsm should be unreachable") +} + +func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4sBlockAsm should be unreachable") +} + +func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4sBlockSnappyAsm should be unreachable") +} diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go new file mode 100644 index 0000000000..f43aa81543 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go @@ -0,0 +1,228 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +package s2 + +func _dummy_() + +// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int + +// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm4MB(dst []byte, src []byte, tmp *[65536]byte) int + +// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int + +// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int + +// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int + +// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int + +// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm4MB(dst []byte, src []byte, tmp *[589824]byte) int + +// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int + +// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int + +// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int + +// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int + +// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm64K(dst []byte, src []byte, tmp *[65536]byte) int + +// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int + +// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int + +// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int + +// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int + +// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte, tmp *[294912]byte) int + +// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int + +// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int + +// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int + +// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func calcBlockSize(src []byte, tmp *[32768]byte) int + +// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 1024 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func calcBlockSizeSmall(src []byte, tmp *[2048]byte) int + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes with margin of 0 bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +// +//go:noescape +func emitLiteral(dst []byte, lit []byte) int + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<32 +// +//go:noescape +func emitRepeat(dst []byte, offset int, length int) int + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopy(dst []byte, offset int, length int) int + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopyNoRepeat(dst []byte, offset int, length int) int + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) +// +//go:noescape +func matchLen(a []byte, b []byte) int + +// cvtLZ4Block converts an LZ4 block to S2 +// +//go:noescape +func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4sBlock converts an LZ4s block to S2 +// +//go:noescape +func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4Block converts an LZ4 block to Snappy +// +//go:noescape +func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4sBlock converts an LZ4s block to Snappy +// +//go:noescape +func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s new file mode 100644 index 0000000000..df9be687be --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s @@ -0,0 +1,21303 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func _dummy_() +TEXT ·_dummy_(SB), $0 +#ifdef GOAMD64_v4 +#ifndef GOAMD64_v3 +#define GOAMD64_v3 +#endif +#endif + RET + +// func encodeBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBlockAsm: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBlockAsm: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBlockAsm + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm + LEAL 1(DX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm + +repeat_extend_back_loop_encodeBlockAsm: + CMPL DI, R8 + JBE repeat_extend_back_end_encodeBlockAsm + MOVB -1(BX)(SI*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeBlockAsm + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm + +repeat_extend_back_end_encodeBlockAsm: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 5(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm + CMPL SI, $0x00010000 + JB three_bytes_repeat_emit_encodeBlockAsm + CMPL SI, $0x01000000 + JB four_bytes_repeat_emit_encodeBlockAsm + MOVB $0xfc, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_repeat_emit_encodeBlockAsm + +four_bytes_repeat_emit_encodeBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_repeat_emit_encodeBlockAsm + +three_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeBlockAsm + +two_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeBlockAsm + JMP memmove_long_repeat_emit_encodeBlockAsm + +one_byte_repeat_emit_encodeBlockAsm: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeBlockAsm + +memmove_long_repeat_emit_encodeBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeBlockAsm: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL DX, R9 + LEAQ (BX)(DX*1), R10 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm: + CMPL R9, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm + MOVQ (R10)(R12*1), R11 + MOVQ 8(R10)(R12*1), R13 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm + XORQ 8(SI)(R12*1), R13 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm + LEAL -16(R9), R9 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm + +matchlen_bsf_16repeat_extend_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm + +matchlen_match8_repeat_extend_encodeBlockAsm: + CMPL R9, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm + LEAL -8(R9), R9 + LEAL 8(R12), R12 + JMP matchlen_match4_repeat_extend_encodeBlockAsm + +matchlen_bsf_8_repeat_extend_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm + +matchlen_match4_repeat_extend_encodeBlockAsm: + CMPL R9, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm + LEAL -4(R9), R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm: + CMPL R9, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm + JB repeat_extend_forward_end_encodeBlockAsm + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm + LEAL 2(R12), R12 + SUBL $0x02, R9 + JZ repeat_extend_forward_end_encodeBlockAsm + +matchlen_match1_repeat_extend_encodeBlockAsm: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm: + ADDL R12, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_repeat_encodeBlockAsm: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm + CMPL R8, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm + CMPL DI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm + +cant_repeat_two_offset_match_repeat_encodeBlockAsm: + CMPL SI, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm + CMPL SI, $0x00010100 + JB repeat_four_match_repeat_encodeBlockAsm + CMPL SI, $0x0100ffff + JB repeat_five_match_repeat_encodeBlockAsm + LEAL -16842747(SI), SI + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_repeat_encodeBlockAsm + +repeat_five_match_repeat_encodeBlockAsm: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_match_repeat_encodeBlockAsm: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_match_repeat_encodeBlockAsm: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_match_repeat_encodeBlockAsm: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_match_repeat_encodeBlockAsm: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_as_copy_encodeBlockAsm: + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeBlockAsm + CMPL SI, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm + MOVB $0xff, (CX) + MOVL DI, 1(CX) + LEAL -64(SI), SI + ADDQ $0x05, CX + CMPL SI, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL SI, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy + LEAL -16842747(SI), SI + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +four_bytes_remain_repeat_as_copy_encodeBlockAsm: + TESTL SI, SI + JZ repeat_end_emit_encodeBlockAsm + XORL R8, R8 + LEAL -1(R8)(SI*4), SI + MOVB SI, (CX) + MOVL DI, 1(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm + +two_byte_offset_repeat_as_copy_encodeBlockAsm: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm + CMPL DI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB DI, 1(CX) + MOVL DI, R9 + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + SUBL $0x08, SI + + // emitRepeat + LEAL -4(SI), SI + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL SI, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + LEAL -16842747(SI), SI + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +long_offset_short_repeat_as_copy_encodeBlockAsm: + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short: + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL SI, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short + LEAL -16842747(SI), SI + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm + +emit_copy_three_repeat_as_copy_encodeBlockAsm: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeBlockAsm: + MOVL DX, 12(SP) + JMP search_loop_encodeBlockAsm + +no_repeat_found_encodeBlockAsm: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm + MOVL 20(SP), DX + JMP search_loop_encodeBlockAsm + +candidate3_match_encodeBlockAsm: + ADDL $0x02, DX + JMP candidate_match_encodeBlockAsm + +candidate2_match_encodeBlockAsm: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm + +match_extend_back_loop_encodeBlockAsm: + CMPL DX, DI + JBE match_extend_back_end_encodeBlockAsm + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBlockAsm + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBlockAsm + JMP match_extend_back_loop_encodeBlockAsm + +match_extend_back_end_encodeBlockAsm: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 5(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBlockAsm: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeBlockAsm + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm + CMPL R8, $0x00010000 + JB three_bytes_match_emit_encodeBlockAsm + CMPL R8, $0x01000000 + JB four_bytes_match_emit_encodeBlockAsm + MOVB $0xfc, (CX) + MOVL R8, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_match_emit_encodeBlockAsm + +four_bytes_match_emit_encodeBlockAsm: + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (CX) + MOVW R8, 1(CX) + MOVB R10, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeBlockAsm + +three_bytes_match_emit_encodeBlockAsm: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBlockAsm + +two_bytes_match_emit_encodeBlockAsm: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeBlockAsm + JMP memmove_long_match_emit_encodeBlockAsm + +one_byte_match_emit_encodeBlockAsm: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBlockAsm: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeBlockAsm + +memmove_long_match_emit_encodeBlockAsm: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeBlockAsm: +match_nolit_loop_encodeBlockAsm: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeBlockAsm: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm + +matchlen_bsf_16match_nolit_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeBlockAsm + +matchlen_match8_match_nolit_encodeBlockAsm: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeBlockAsm + +matchlen_bsf_8_match_nolit_encodeBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm + +matchlen_match4_match_nolit_encodeBlockAsm: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm + JB match_nolit_end_encodeBlockAsm + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeBlockAsm + +matchlen_match1_match_nolit_encodeBlockAsm: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_match_nolit_encodeBlockAsm + CMPL R10, $0x40 + JBE four_bytes_remain_match_nolit_encodeBlockAsm + MOVB $0xff, (CX) + MOVL SI, 1(CX) + LEAL -64(R10), R10 + ADDQ $0x05, CX + CMPL R10, $0x04 + JB four_bytes_remain_match_nolit_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy: + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy + CMPL R10, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBlockAsm_emit_copy: + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +four_bytes_remain_match_nolit_encodeBlockAsm: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeBlockAsm + XORL DI, DI + LEAL -1(DI)(R10*4), R10 + MOVB R10, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +two_byte_offset_match_nolit_encodeBlockAsm: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm + CMPL SI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(CX) + MOVL SI, R8 + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b: + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL R10, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b + +repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +long_offset_short_match_nolit_encodeBlockAsm: + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short: + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R10, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +two_byte_offset_short_match_nolit_encodeBlockAsm: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +emit_copy_three_match_nolit_encodeBlockAsm: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeBlockAsm: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBlockAsm + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm + INCL DX + JMP search_loop_encodeBlockAsm + +emit_remainder_encodeBlockAsm: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 5(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBlockAsm: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeBlockAsm + MOVB $0xfc, (CX) + MOVL DX, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_emit_remainder_encodeBlockAsm + +four_bytes_emit_remainder_encodeBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeBlockAsm + +three_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBlockAsm + +two_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm + JMP memmove_long_emit_remainder_encodeBlockAsm + +one_byte_emit_remainder_encodeBlockAsm: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBlockAsm + +memmove_long_emit_remainder_encodeBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBlockAsm: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBlockAsm4MB(dst []byte, src []byte, tmp *[65536]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm4MB(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBlockAsm4MB: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBlockAsm4MB: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBlockAsm4MB + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm4MB + LEAL 1(DX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm4MB + +repeat_extend_back_loop_encodeBlockAsm4MB: + CMPL DI, R8 + JBE repeat_extend_back_end_encodeBlockAsm4MB + MOVB -1(BX)(SI*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeBlockAsm4MB + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm4MB + +repeat_extend_back_end_encodeBlockAsm4MB: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 4(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeBlockAsm4MB: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm4MB + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm4MB + CMPL SI, $0x00010000 + JB three_bytes_repeat_emit_encodeBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +three_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +two_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeBlockAsm4MB + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +one_byte_repeat_emit_encodeBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm4MB: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB + +memmove_long_repeat_emit_encodeBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeBlockAsm4MB: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL DX, R9 + LEAQ (BX)(DX*1), R10 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB: + CMPL R9, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm4MB + MOVQ (R10)(R12*1), R11 + MOVQ 8(R10)(R12*1), R13 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB + XORQ 8(SI)(R12*1), R13 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm4MB + LEAL -16(R9), R9 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm4MB + +matchlen_bsf_16repeat_extend_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_match8_repeat_extend_encodeBlockAsm4MB: + CMPL R9, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm4MB + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB + LEAL -8(R9), R9 + LEAL 8(R12), R12 + JMP matchlen_match4_repeat_extend_encodeBlockAsm4MB + +matchlen_bsf_8_repeat_extend_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_match4_repeat_extend_encodeBlockAsm4MB: + CMPL R9, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm4MB + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB + LEAL -4(R9), R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm4MB: + CMPL R9, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm4MB + JB repeat_extend_forward_end_encodeBlockAsm4MB + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB + LEAL 2(R12), R12 + SUBL $0x02, R9 + JZ repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_match1_repeat_extend_encodeBlockAsm4MB: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm4MB + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm4MB: + ADDL R12, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm4MB + CMPL R8, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB + CMPL DI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm4MB + +cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB: + CMPL SI, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm4MB + CMPL SI, $0x00010100 + JB repeat_four_match_repeat_encodeBlockAsm4MB + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_match_repeat_encodeBlockAsm4MB: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_match_repeat_encodeBlockAsm4MB: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_match_repeat_encodeBlockAsm4MB: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_match_repeat_encodeBlockAsm4MB: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_as_copy_encodeBlockAsm4MB: + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeBlockAsm4MB + CMPL SI, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + MOVB $0xff, (CX) + MOVL DI, 1(CX) + LEAL -64(SI), SI + ADDQ $0x05, CX + CMPL SI, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB: + TESTL SI, SI + JZ repeat_end_emit_encodeBlockAsm4MB + XORL R8, R8 + LEAL -1(R8)(SI*4), SI + MOVB SI, (CX) + MOVL DI, 1(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +two_byte_offset_repeat_as_copy_encodeBlockAsm4MB: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB + CMPL DI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + SUBL $0x08, SI + + // emitRepeat + LEAL -4(SI), SI + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +long_offset_short_repeat_as_copy_encodeBlockAsm4MB: + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(SI), SI + MOVL SI, DI + MOVW $0x001d, (CX) + MOVW SI, 2(CX) + SARL $0x10, DI + MOVB DI, 4(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm4MB + +emit_copy_three_repeat_as_copy_encodeBlockAsm4MB: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeBlockAsm4MB: + MOVL DX, 12(SP) + JMP search_loop_encodeBlockAsm4MB + +no_repeat_found_encodeBlockAsm4MB: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm4MB + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm4MB + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm4MB + MOVL 20(SP), DX + JMP search_loop_encodeBlockAsm4MB + +candidate3_match_encodeBlockAsm4MB: + ADDL $0x02, DX + JMP candidate_match_encodeBlockAsm4MB + +candidate2_match_encodeBlockAsm4MB: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeBlockAsm4MB: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm4MB + +match_extend_back_loop_encodeBlockAsm4MB: + CMPL DX, DI + JBE match_extend_back_end_encodeBlockAsm4MB + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBlockAsm4MB + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBlockAsm4MB + JMP match_extend_back_loop_encodeBlockAsm4MB + +match_extend_back_end_encodeBlockAsm4MB: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 4(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBlockAsm4MB: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeBlockAsm4MB + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm4MB + CMPL R8, $0x00010000 + JB three_bytes_match_emit_encodeBlockAsm4MB + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (CX) + MOVW R8, 1(CX) + MOVB R10, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +three_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +two_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeBlockAsm4MB + JMP memmove_long_match_emit_encodeBlockAsm4MB + +one_byte_match_emit_encodeBlockAsm4MB: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBlockAsm4MB: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm4MB: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeBlockAsm4MB + +memmove_long_match_emit_encodeBlockAsm4MB: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeBlockAsm4MB: +match_nolit_loop_encodeBlockAsm4MB: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeBlockAsm4MB: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm4MB + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm4MB + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm4MB + +matchlen_bsf_16match_nolit_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeBlockAsm4MB + +matchlen_match8_match_nolit_encodeBlockAsm4MB: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm4MB + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm4MB + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeBlockAsm4MB + +matchlen_bsf_8_match_nolit_encodeBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm4MB + +matchlen_match4_match_nolit_encodeBlockAsm4MB: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm4MB + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm4MB + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm4MB: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm4MB + JB match_nolit_end_encodeBlockAsm4MB + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm4MB + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeBlockAsm4MB + +matchlen_match1_match_nolit_encodeBlockAsm4MB: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm4MB + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm4MB: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_match_nolit_encodeBlockAsm4MB + CMPL R10, $0x40 + JBE four_bytes_remain_match_nolit_encodeBlockAsm4MB + MOVB $0xff, (CX) + MOVL SI, 1(CX) + LEAL -64(R10), R10 + ADDQ $0x05, CX + CMPL R10, $0x04 + JB four_bytes_remain_match_nolit_encodeBlockAsm4MB + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBlockAsm4MB: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeBlockAsm4MB + XORL DI, DI + LEAL -1(DI)(R10*4), R10 + MOVB R10, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +two_byte_offset_match_nolit_encodeBlockAsm4MB: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm4MB + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +long_offset_short_match_nolit_encodeBlockAsm4MB: + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(R10), R10 + MOVL R10, SI + MOVW $0x001d, (CX) + MOVW R10, 2(CX) + SARL $0x10, SI + MOVB SI, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBlockAsm4MB: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm4MB + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +emit_copy_three_match_nolit_encodeBlockAsm4MB: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeBlockAsm4MB: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBlockAsm4MB + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm4MB: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm4MB + INCL DX + JMP search_loop_encodeBlockAsm4MB + +emit_remainder_encodeBlockAsm4MB: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 4(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBlockAsm4MB: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +three_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +two_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +one_byte_emit_remainder_encodeBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBlockAsm4MB: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm4MB: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB + +memmove_long_emit_remainder_encodeBlockAsm4MB: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBlockAsm4MB: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm12B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000080, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBlockAsm12B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBlockAsm12B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBlockAsm12B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x18, R11 + IMULQ R9, R11 + SHRQ $0x34, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm12B + LEAL 1(DX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm12B + +repeat_extend_back_loop_encodeBlockAsm12B: + CMPL DI, R8 + JBE repeat_extend_back_end_encodeBlockAsm12B + MOVB -1(BX)(SI*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeBlockAsm12B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm12B + +repeat_extend_back_end_encodeBlockAsm12B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm12B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm12B + JB three_bytes_repeat_emit_encodeBlockAsm12B + +three_bytes_repeat_emit_encodeBlockAsm12B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +two_bytes_repeat_emit_encodeBlockAsm12B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeBlockAsm12B + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +one_byte_repeat_emit_encodeBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm12B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeBlockAsm12B + +memmove_long_repeat_emit_encodeBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeBlockAsm12B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL DX, R9 + LEAQ (BX)(DX*1), R10 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm12B: + CMPL R9, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm12B + MOVQ (R10)(R12*1), R11 + MOVQ 8(R10)(R12*1), R13 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B + XORQ 8(SI)(R12*1), R13 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm12B + LEAL -16(R9), R9 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm12B + +matchlen_bsf_16repeat_extend_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_match8_repeat_extend_encodeBlockAsm12B: + CMPL R9, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm12B + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm12B + LEAL -8(R9), R9 + LEAL 8(R12), R12 + JMP matchlen_match4_repeat_extend_encodeBlockAsm12B + +matchlen_bsf_8_repeat_extend_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_match4_repeat_extend_encodeBlockAsm12B: + CMPL R9, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm12B + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm12B + LEAL -4(R9), R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm12B: + CMPL R9, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm12B + JB repeat_extend_forward_end_encodeBlockAsm12B + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm12B + LEAL 2(R12), R12 + SUBL $0x02, R9 + JZ repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_match1_repeat_extend_encodeBlockAsm12B: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm12B + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm12B: + ADDL R12, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm12B + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm12B + CMPL R8, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B + CMPL DI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm12B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm12B: + CMPL SI, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm12B + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_match_repeat_encodeBlockAsm12B: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_match_repeat_encodeBlockAsm12B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_match_repeat_encodeBlockAsm12B: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_as_copy_encodeBlockAsm12B: + // emitCopy + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B + CMPL DI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + SUBL $0x08, SI + + // emitRepeat + LEAL -4(SI), SI + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +long_offset_short_repeat_as_copy_encodeBlockAsm12B: + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeBlockAsm12B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeBlockAsm12B: + MOVL DX, 12(SP) + JMP search_loop_encodeBlockAsm12B + +no_repeat_found_encodeBlockAsm12B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm12B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm12B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm12B + MOVL 20(SP), DX + JMP search_loop_encodeBlockAsm12B + +candidate3_match_encodeBlockAsm12B: + ADDL $0x02, DX + JMP candidate_match_encodeBlockAsm12B + +candidate2_match_encodeBlockAsm12B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm12B + +match_extend_back_loop_encodeBlockAsm12B: + CMPL DX, DI + JBE match_extend_back_end_encodeBlockAsm12B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBlockAsm12B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBlockAsm12B + JMP match_extend_back_loop_encodeBlockAsm12B + +match_extend_back_end_encodeBlockAsm12B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBlockAsm12B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeBlockAsm12B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm12B + JB three_bytes_match_emit_encodeBlockAsm12B + +three_bytes_match_emit_encodeBlockAsm12B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBlockAsm12B + +two_bytes_match_emit_encodeBlockAsm12B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeBlockAsm12B + JMP memmove_long_match_emit_encodeBlockAsm12B + +one_byte_match_emit_encodeBlockAsm12B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBlockAsm12B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm12B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeBlockAsm12B + +memmove_long_match_emit_encodeBlockAsm12B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeBlockAsm12B: +match_nolit_loop_encodeBlockAsm12B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeBlockAsm12B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm12B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm12B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm12B + +matchlen_bsf_16match_nolit_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeBlockAsm12B + +matchlen_match8_match_nolit_encodeBlockAsm12B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm12B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm12B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm12B + +matchlen_match4_match_nolit_encodeBlockAsm12B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm12B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm12B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm12B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm12B + JB match_nolit_end_encodeBlockAsm12B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm12B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeBlockAsm12B + +matchlen_match1_match_nolit_encodeBlockAsm12B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm12B + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm12B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm12B + CMPL SI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm12B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +long_offset_short_match_nolit_encodeBlockAsm12B: + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBlockAsm12B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm12B + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm12B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +emit_copy_three_match_nolit_encodeBlockAsm12B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeBlockAsm12B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBlockAsm12B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x18, R8 + IMULQ R9, R8 + SHRQ $0x34, R8 + SHLQ $0x18, SI + IMULQ R9, SI + SHRQ $0x34, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm12B + INCL DX + JMP search_loop_encodeBlockAsm12B + +emit_remainder_encodeBlockAsm12B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBlockAsm12B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm12B + JB three_bytes_emit_remainder_encodeBlockAsm12B + +three_bytes_emit_remainder_encodeBlockAsm12B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +two_bytes_emit_remainder_encodeBlockAsm12B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm12B + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +one_byte_emit_remainder_encodeBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm12B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBlockAsm12B + +memmove_long_emit_remainder_encodeBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBlockAsm12B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm10B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000020, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBlockAsm10B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBlockAsm10B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBlockAsm10B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x36, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm10B + LEAL 1(DX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm10B + +repeat_extend_back_loop_encodeBlockAsm10B: + CMPL DI, R8 + JBE repeat_extend_back_end_encodeBlockAsm10B + MOVB -1(BX)(SI*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeBlockAsm10B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm10B + +repeat_extend_back_end_encodeBlockAsm10B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm10B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm10B + JB three_bytes_repeat_emit_encodeBlockAsm10B + +three_bytes_repeat_emit_encodeBlockAsm10B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +two_bytes_repeat_emit_encodeBlockAsm10B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeBlockAsm10B + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +one_byte_repeat_emit_encodeBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm10B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeBlockAsm10B + +memmove_long_repeat_emit_encodeBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeBlockAsm10B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL DX, R9 + LEAQ (BX)(DX*1), R10 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm10B: + CMPL R9, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm10B + MOVQ (R10)(R12*1), R11 + MOVQ 8(R10)(R12*1), R13 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B + XORQ 8(SI)(R12*1), R13 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm10B + LEAL -16(R9), R9 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm10B + +matchlen_bsf_16repeat_extend_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_match8_repeat_extend_encodeBlockAsm10B: + CMPL R9, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm10B + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm10B + LEAL -8(R9), R9 + LEAL 8(R12), R12 + JMP matchlen_match4_repeat_extend_encodeBlockAsm10B + +matchlen_bsf_8_repeat_extend_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_match4_repeat_extend_encodeBlockAsm10B: + CMPL R9, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm10B + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm10B + LEAL -4(R9), R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm10B: + CMPL R9, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm10B + JB repeat_extend_forward_end_encodeBlockAsm10B + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm10B + LEAL 2(R12), R12 + SUBL $0x02, R9 + JZ repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_match1_repeat_extend_encodeBlockAsm10B: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm10B + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm10B: + ADDL R12, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm10B + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm10B + CMPL R8, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B + CMPL DI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm10B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm10B: + CMPL SI, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm10B + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_match_repeat_encodeBlockAsm10B: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_match_repeat_encodeBlockAsm10B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_match_repeat_encodeBlockAsm10B: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_as_copy_encodeBlockAsm10B: + // emitCopy + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B + CMPL DI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + SUBL $0x08, SI + + // emitRepeat + LEAL -4(SI), SI + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +long_offset_short_repeat_as_copy_encodeBlockAsm10B: + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + + // emitRepeat + MOVL SI, R8 + LEAL -4(SI), SI + CMPL R8, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeBlockAsm10B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeBlockAsm10B: + MOVL DX, 12(SP) + JMP search_loop_encodeBlockAsm10B + +no_repeat_found_encodeBlockAsm10B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm10B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm10B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm10B + MOVL 20(SP), DX + JMP search_loop_encodeBlockAsm10B + +candidate3_match_encodeBlockAsm10B: + ADDL $0x02, DX + JMP candidate_match_encodeBlockAsm10B + +candidate2_match_encodeBlockAsm10B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm10B + +match_extend_back_loop_encodeBlockAsm10B: + CMPL DX, DI + JBE match_extend_back_end_encodeBlockAsm10B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBlockAsm10B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBlockAsm10B + JMP match_extend_back_loop_encodeBlockAsm10B + +match_extend_back_end_encodeBlockAsm10B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBlockAsm10B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeBlockAsm10B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm10B + JB three_bytes_match_emit_encodeBlockAsm10B + +three_bytes_match_emit_encodeBlockAsm10B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBlockAsm10B + +two_bytes_match_emit_encodeBlockAsm10B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeBlockAsm10B + JMP memmove_long_match_emit_encodeBlockAsm10B + +one_byte_match_emit_encodeBlockAsm10B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBlockAsm10B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm10B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeBlockAsm10B + +memmove_long_match_emit_encodeBlockAsm10B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeBlockAsm10B: +match_nolit_loop_encodeBlockAsm10B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeBlockAsm10B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm10B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm10B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm10B + +matchlen_bsf_16match_nolit_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeBlockAsm10B + +matchlen_match8_match_nolit_encodeBlockAsm10B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm10B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm10B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm10B + +matchlen_match4_match_nolit_encodeBlockAsm10B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm10B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm10B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm10B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm10B + JB match_nolit_end_encodeBlockAsm10B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm10B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeBlockAsm10B + +matchlen_match1_match_nolit_encodeBlockAsm10B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm10B + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm10B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm10B + CMPL SI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm10B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +long_offset_short_match_nolit_encodeBlockAsm10B: + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + + // emitRepeat + MOVL R10, DI + LEAL -4(R10), R10 + CMPL DI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBlockAsm10B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm10B + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm10B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +emit_copy_three_match_nolit_encodeBlockAsm10B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeBlockAsm10B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBlockAsm10B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm10B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x36, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x36, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm10B + INCL DX + JMP search_loop_encodeBlockAsm10B + +emit_remainder_encodeBlockAsm10B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBlockAsm10B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm10B + JB three_bytes_emit_remainder_encodeBlockAsm10B + +three_bytes_emit_remainder_encodeBlockAsm10B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +two_bytes_emit_remainder_encodeBlockAsm10B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm10B + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +one_byte_emit_remainder_encodeBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm10B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBlockAsm10B + +memmove_long_emit_remainder_encodeBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBlockAsm10B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm8B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000008, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBlockAsm8B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBlockAsm8B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBlockAsm8B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x38, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeBlockAsm8B + LEAL 1(DX), DI + MOVL 12(SP), R8 + MOVL DI, SI + SUBL 16(SP), SI + JZ repeat_extend_back_end_encodeBlockAsm8B + +repeat_extend_back_loop_encodeBlockAsm8B: + CMPL DI, R8 + JBE repeat_extend_back_end_encodeBlockAsm8B + MOVB -1(BX)(SI*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeBlockAsm8B + LEAL -1(DI), DI + DECL SI + JNZ repeat_extend_back_loop_encodeBlockAsm8B + +repeat_extend_back_end_encodeBlockAsm8B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm8B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm8B + JB three_bytes_repeat_emit_encodeBlockAsm8B + +three_bytes_repeat_emit_encodeBlockAsm8B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +two_bytes_repeat_emit_encodeBlockAsm8B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeBlockAsm8B + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +one_byte_repeat_emit_encodeBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm8B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeBlockAsm8B + +memmove_long_repeat_emit_encodeBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R12 + SHRQ $0x05, R12 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R13*1), R11 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R13*1), X4 + MOVOU -16(R10)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R9, R13 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeBlockAsm8B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R9 + SUBL DX, R9 + LEAQ (BX)(DX*1), R10 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_repeat_extend_encodeBlockAsm8B: + CMPL R9, $0x10 + JB matchlen_match8_repeat_extend_encodeBlockAsm8B + MOVQ (R10)(R12*1), R11 + MOVQ 8(R10)(R12*1), R13 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B + XORQ 8(SI)(R12*1), R13 + JNZ matchlen_bsf_16repeat_extend_encodeBlockAsm8B + LEAL -16(R9), R9 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_repeat_extend_encodeBlockAsm8B + +matchlen_bsf_16repeat_extend_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_match8_repeat_extend_encodeBlockAsm8B: + CMPL R9, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm8B + MOVQ (R10)(R12*1), R11 + XORQ (SI)(R12*1), R11 + JNZ matchlen_bsf_8_repeat_extend_encodeBlockAsm8B + LEAL -8(R9), R9 + LEAL 8(R12), R12 + JMP matchlen_match4_repeat_extend_encodeBlockAsm8B + +matchlen_bsf_8_repeat_extend_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_match4_repeat_extend_encodeBlockAsm8B: + CMPL R9, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm8B + MOVL (R10)(R12*1), R11 + CMPL (SI)(R12*1), R11 + JNE matchlen_match2_repeat_extend_encodeBlockAsm8B + LEAL -4(R9), R9 + LEAL 4(R12), R12 + +matchlen_match2_repeat_extend_encodeBlockAsm8B: + CMPL R9, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm8B + JB repeat_extend_forward_end_encodeBlockAsm8B + MOVW (R10)(R12*1), R11 + CMPW (SI)(R12*1), R11 + JNE matchlen_match1_repeat_extend_encodeBlockAsm8B + LEAL 2(R12), R12 + SUBL $0x02, R9 + JZ repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_match1_repeat_extend_encodeBlockAsm8B: + MOVB (R10)(R12*1), R11 + CMPB (SI)(R12*1), R11 + JNE repeat_extend_forward_end_encodeBlockAsm8B + LEAL 1(R12), R12 + +repeat_extend_forward_end_encodeBlockAsm8B: + ADDL R12, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + TESTL R8, R8 + JZ repeat_as_copy_encodeBlockAsm8B + + // emitRepeat + MOVL SI, DI + LEAL -4(SI), SI + CMPL DI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm8B + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm8B: + CMPL SI, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm8B + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_match_repeat_encodeBlockAsm8B: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_match_repeat_encodeBlockAsm8B: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_as_copy_encodeBlockAsm8B: + // emitCopy + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B + CMPL DI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + SUBL $0x08, SI + + // emitRepeat + LEAL -4(SI), SI + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + MOVL SI, DI + LEAL -4(SI), SI + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + +long_offset_short_repeat_as_copy_encodeBlockAsm8B: + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + + // emitRepeat + MOVL SI, DI + LEAL -4(SI), SI + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + CMPL SI, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + LEAL -256(SI), SI + MOVW $0x0019, (CX) + MOVW SI, 2(CX) + ADDQ $0x04, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + LEAL -4(SI), SI + MOVW $0x0015, (CX) + MOVB SI, 2(CX) + ADDQ $0x03, CX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, SI + ORL $0x01, SI + MOVW SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ R8, R8 + LEAL 1(R8)(SI*4), SI + MOVB DI, 1(CX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm8B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeBlockAsm8B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeBlockAsm8B: + MOVL DX, 12(SP) + JMP search_loop_encodeBlockAsm8B + +no_repeat_found_encodeBlockAsm8B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBlockAsm8B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeBlockAsm8B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeBlockAsm8B + MOVL 20(SP), DX + JMP search_loop_encodeBlockAsm8B + +candidate3_match_encodeBlockAsm8B: + ADDL $0x02, DX + JMP candidate_match_encodeBlockAsm8B + +candidate2_match_encodeBlockAsm8B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBlockAsm8B + +match_extend_back_loop_encodeBlockAsm8B: + CMPL DX, DI + JBE match_extend_back_end_encodeBlockAsm8B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBlockAsm8B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBlockAsm8B + JMP match_extend_back_loop_encodeBlockAsm8B + +match_extend_back_end_encodeBlockAsm8B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBlockAsm8B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeBlockAsm8B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm8B + JB three_bytes_match_emit_encodeBlockAsm8B + +three_bytes_match_emit_encodeBlockAsm8B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBlockAsm8B + +two_bytes_match_emit_encodeBlockAsm8B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeBlockAsm8B + JMP memmove_long_match_emit_encodeBlockAsm8B + +one_byte_match_emit_encodeBlockAsm8B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBlockAsm8B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBlockAsm8B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeBlockAsm8B + +memmove_long_match_emit_encodeBlockAsm8B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeBlockAsm8B: +match_nolit_loop_encodeBlockAsm8B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeBlockAsm8B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeBlockAsm8B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeBlockAsm8B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeBlockAsm8B + +matchlen_bsf_16match_nolit_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeBlockAsm8B + +matchlen_match8_match_nolit_encodeBlockAsm8B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm8B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeBlockAsm8B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeBlockAsm8B + +matchlen_match4_match_nolit_encodeBlockAsm8B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm8B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeBlockAsm8B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeBlockAsm8B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm8B + JB match_nolit_end_encodeBlockAsm8B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeBlockAsm8B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeBlockAsm8B + +matchlen_match1_match_nolit_encodeBlockAsm8B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeBlockAsm8B + LEAL 1(R10), R10 + +match_nolit_end_encodeBlockAsm8B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm8B + CMPL SI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm8B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + MOVL R10, SI + LEAL -4(R10), R10 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +long_offset_short_match_nolit_encodeBlockAsm8B: + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + + // emitRepeat + MOVL R10, SI + LEAL -4(R10), R10 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short + LEAL -256(R10), R10 + MOVW $0x0019, (CX) + MOVW R10, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (CX) + MOVB R10, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(R10*4), R10 + MOVB SI, 1(CX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, R10 + MOVB R10, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBlockAsm8B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm8B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +emit_copy_three_match_nolit_encodeBlockAsm8B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeBlockAsm8B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBlockAsm8B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm8B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x38, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x38, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeBlockAsm8B + INCL DX + JMP search_loop_encodeBlockAsm8B + +emit_remainder_encodeBlockAsm8B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBlockAsm8B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm8B + JB three_bytes_emit_remainder_encodeBlockAsm8B + +three_bytes_emit_remainder_encodeBlockAsm8B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +two_bytes_emit_remainder_encodeBlockAsm8B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm8B + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +one_byte_emit_remainder_encodeBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm8B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBlockAsm8B + +memmove_long_emit_remainder_encodeBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBlockAsm8B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00001200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -6(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBetterBlockAsm: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JBE check_maxskip_ok_encodeBetterBlockAsm + LEAL 100(DX), SI + JMP check_maxskip_cont_encodeBetterBlockAsm + +check_maxskip_ok_encodeBetterBlockAsm: + LEAL 1(DX)(SI*1), SI + +check_maxskip_cont_encodeBetterBlockAsm: + CMPL SI, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL 524288(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 524288(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeBetterBlockAsm + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm + +no_short_found_encodeBetterBlockAsm: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm + MOVL 20(SP), DX + JMP search_loop_encodeBetterBlockAsm + +candidateS_match_encodeBetterBlockAsm: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm + DECL DX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm + +match_extend_back_loop_encodeBetterBlockAsm: + CMPL DX, DI + JBE match_extend_back_end_encodeBetterBlockAsm + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBetterBlockAsm + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm + JMP match_extend_back_loop_encodeBetterBlockAsm + +match_extend_back_end_encodeBetterBlockAsm: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 5(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm + +matchlen_match8_match_nolit_encodeBetterBlockAsm: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm + +matchlen_match4_match_nolit_encodeBetterBlockAsm: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm + JB match_nolit_end_encodeBetterBlockAsm + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeBetterBlockAsm + +matchlen_match1_match_nolit_encodeBetterBlockAsm: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm + CMPL R12, $0x01 + JA match_length_ok_encodeBetterBlockAsm + CMPL R8, $0x0000ffff + JBE match_length_ok_encodeBetterBlockAsm + MOVL 20(SP), DX + INCL DX + JMP search_loop_encodeBetterBlockAsm + +match_length_ok_encodeBetterBlockAsm: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm + CMPL SI, $0x00010000 + JB three_bytes_match_emit_encodeBetterBlockAsm + CMPL SI, $0x01000000 + JB four_bytes_match_emit_encodeBetterBlockAsm + MOVB $0xfc, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +four_bytes_match_emit_encodeBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +three_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +two_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm + JMP memmove_long_match_emit_encodeBetterBlockAsm + +one_byte_match_emit_encodeBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm + +memmove_long_match_emit_encodeBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeBetterBlockAsm: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JB two_byte_offset_match_nolit_encodeBetterBlockAsm + CMPL R12, $0x40 + JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm + MOVB $0xff, (CX) + MOVL R8, 1(CX) + LEAL -64(R12), R12 + ADDQ $0x05, CX + CMPL R12, $0x04 + JB four_bytes_remain_match_nolit_encodeBetterBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R12, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy + LEAL -16842747(R12), R12 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +four_bytes_remain_match_nolit_encodeBetterBlockAsm: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm + XORL SI, SI + LEAL -1(SI)(R12*4), R12 + MOVB R12, (CX) + MOVL R8, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +two_byte_offset_match_nolit_encodeBetterBlockAsm: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm + CMPL R8, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB R8, 1(CX) + MOVL R8, R9 + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, SI + MOVB SI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R12 + + // emitRepeat + LEAL -4(R12), R12 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL R12, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + LEAL -16842747(R12), R12 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +long_offset_short_match_nolit_encodeBetterBlockAsm: + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R12, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short + LEAL -16842747(R12), R12 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +emit_copy_three_match_nolit_encodeBetterBlockAsm: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +match_is_repeat_encodeBetterBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x00010000 + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL SI, $0x01000000 + JB four_bytes_match_emit_repeat_encodeBetterBlockAsm + MOVB $0xfc, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +four_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +three_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +two_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +one_byte_match_emit_repeat_encodeBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm: + MOVQ SI, CX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + +memmove_long_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitRepeat +emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm: + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm + CMPL R12, $0x0100ffff + JB repeat_five_match_nolit_repeat_encodeBetterBlockAsm + LEAL -16842747(R12), R12 + MOVL $0xfffb001d, (CX) + MOVB $0xff, 4(CX) + ADDQ $0x05, CX + JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm + +repeat_five_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + +match_nolit_emitcopy_end_encodeBetterBlockAsm: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 524288(AX)(R11*4) + MOVL R14, 524288(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm: + CMPQ R8, R9 + JAE search_loop_encodeBetterBlockAsm + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x08, R11 + IMULQ SI, R11 + SHRQ $0x2f, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm + +emit_remainder_encodeBetterBlockAsm: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 5(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeBetterBlockAsm + MOVB $0xfc, (CX) + MOVL DX, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +four_bytes_emit_remainder_encodeBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +three_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +two_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +one_byte_emit_remainder_encodeBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBetterBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm + +memmove_long_emit_remainder_encodeBetterBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBetterBlockAsm4MB(dst []byte, src []byte, tmp *[589824]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm4MB(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00001200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm4MB: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBetterBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -6(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBetterBlockAsm4MB: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JBE check_maxskip_ok_encodeBetterBlockAsm4MB + LEAL 100(DX), SI + JMP check_maxskip_cont_encodeBetterBlockAsm4MB + +check_maxskip_ok_encodeBetterBlockAsm4MB: + LEAL 1(DX)(SI*1), SI + +check_maxskip_cont_encodeBetterBlockAsm4MB: + CMPL SI, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm4MB + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL 524288(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 524288(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm4MB + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm4MB + +no_short_found_encodeBetterBlockAsm4MB: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm4MB + MOVL 20(SP), DX + JMP search_loop_encodeBetterBlockAsm4MB + +candidateS_match_encodeBetterBlockAsm4MB: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm4MB + DECL DX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm4MB: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm4MB + +match_extend_back_loop_encodeBetterBlockAsm4MB: + CMPL DX, DI + JBE match_extend_back_end_encodeBetterBlockAsm4MB + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBetterBlockAsm4MB + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm4MB + JMP match_extend_back_loop_encodeBetterBlockAsm4MB + +match_extend_back_end_encodeBetterBlockAsm4MB: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 4(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm4MB: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm4MB + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm4MB + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_match8_match_nolit_encodeBetterBlockAsm4MB: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm4MB + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm4MB + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm4MB: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_match4_match_nolit_encodeBetterBlockAsm4MB: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm4MB + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm4MB: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB + JB match_nolit_end_encodeBetterBlockAsm4MB + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_match1_match_nolit_encodeBetterBlockAsm4MB: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm4MB + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm4MB: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm4MB + CMPL R12, $0x01 + JA match_length_ok_encodeBetterBlockAsm4MB + CMPL R8, $0x0000ffff + JBE match_length_ok_encodeBetterBlockAsm4MB + MOVL 20(SP), DX + INCL DX + JMP search_loop_encodeBetterBlockAsm4MB + +match_length_ok_encodeBetterBlockAsm4MB: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm4MB + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm4MB + CMPL SI, $0x00010000 + JB three_bytes_match_emit_encodeBetterBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +three_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +two_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +one_byte_match_emit_encodeBetterBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBetterBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm4MB: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB + +memmove_long_match_emit_encodeBetterBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeBetterBlockAsm4MB: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JB two_byte_offset_match_nolit_encodeBetterBlockAsm4MB + CMPL R12, $0x40 + JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + MOVB $0xff, (CX) + MOVL R8, 1(CX) + LEAL -64(R12), R12 + ADDQ $0x05, CX + CMPL R12, $0x04 + JB four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + XORL SI, SI + LEAL -1(SI)(R12*4), R12 + MOVB R12, (CX) + MOVL R8, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +two_byte_offset_match_nolit_encodeBetterBlockAsm4MB: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB + CMPL R8, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R12 + + // emitRepeat + LEAL -4(R12), R12 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +long_offset_short_match_nolit_encodeBetterBlockAsm4MB: + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +emit_copy_three_match_nolit_encodeBetterBlockAsm4MB: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +match_is_repeat_encodeBetterBlockAsm4MB: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x00010000 + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +one_byte_match_emit_repeat_encodeBetterBlockAsm4MB: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVQ SI, CX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + +memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL R12, $0x00010100 + JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB + LEAL -65536(R12), R12 + MOVL R12, R8 + MOVW $0x001d, (CX) + MOVW R12, 2(CX) + SARL $0x10, R8 + MOVB R8, 4(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + +match_nolit_emitcopy_end_encodeBetterBlockAsm4MB: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm4MB + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm4MB: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 524288(AX)(R11*4) + MOVL R14, 524288(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm4MB: + CMPQ R8, R9 + JAE search_loop_encodeBetterBlockAsm4MB + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x08, R11 + IMULQ SI, R11 + SHRQ $0x2f, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm4MB + +emit_remainder_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 4(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBetterBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +three_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +two_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +one_byte_emit_remainder_encodeBetterBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + +memmove_long_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm12B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000280, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm12B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -6(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBetterBlockAsm12B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm12B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL (AX)(R10*4), SI + MOVL 65536(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 65536(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm12B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm12B + +no_short_found_encodeBetterBlockAsm12B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm12B + MOVL 20(SP), DX + JMP search_loop_encodeBetterBlockAsm12B + +candidateS_match_encodeBetterBlockAsm12B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm12B + DECL DX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm12B + +match_extend_back_loop_encodeBetterBlockAsm12B: + CMPL DX, DI + JBE match_extend_back_end_encodeBetterBlockAsm12B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBetterBlockAsm12B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm12B + JMP match_extend_back_loop_encodeBetterBlockAsm12B + +match_extend_back_end_encodeBetterBlockAsm12B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm12B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm12B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm12B + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm12B + +matchlen_match8_match_nolit_encodeBetterBlockAsm12B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm12B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm12B + +matchlen_match4_match_nolit_encodeBetterBlockAsm12B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm12B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm12B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm12B + JB match_nolit_end_encodeBetterBlockAsm12B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeBetterBlockAsm12B + +matchlen_match1_match_nolit_encodeBetterBlockAsm12B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm12B + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm12B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm12B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm12B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm12B + JB three_bytes_match_emit_encodeBetterBlockAsm12B + +three_bytes_match_emit_encodeBetterBlockAsm12B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +two_bytes_match_emit_encodeBetterBlockAsm12B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +one_byte_match_emit_encodeBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm12B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B + +memmove_long_match_emit_encodeBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeBetterBlockAsm12B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B + CMPL R8, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R12 + + // emitRepeat + LEAL -4(R12), R12 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +long_offset_short_match_nolit_encodeBetterBlockAsm12B: + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeBetterBlockAsm12B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +match_is_repeat_encodeBetterBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm12B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm12B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm12B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm12B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm12B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +one_byte_match_emit_repeat_encodeBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + +match_nolit_emitcopy_end_encodeBetterBlockAsm12B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm12B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x32, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x34, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 65536(AX)(R11*4) + MOVL R14, 65536(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm12B: + CMPQ R8, R9 + JAE search_loop_encodeBetterBlockAsm12B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm12B + +emit_remainder_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm12B + JB three_bytes_emit_remainder_encodeBetterBlockAsm12B + +three_bytes_emit_remainder_encodeBetterBlockAsm12B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +two_bytes_emit_remainder_encodeBetterBlockAsm12B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +one_byte_emit_remainder_encodeBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + +memmove_long_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm12B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm10B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x000000a0, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm10B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -6(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBetterBlockAsm10B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm10B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL (AX)(R10*4), SI + MOVL 16384(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 16384(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm10B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm10B + +no_short_found_encodeBetterBlockAsm10B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm10B + MOVL 20(SP), DX + JMP search_loop_encodeBetterBlockAsm10B + +candidateS_match_encodeBetterBlockAsm10B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm10B + DECL DX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm10B + +match_extend_back_loop_encodeBetterBlockAsm10B: + CMPL DX, DI + JBE match_extend_back_end_encodeBetterBlockAsm10B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBetterBlockAsm10B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm10B + JMP match_extend_back_loop_encodeBetterBlockAsm10B + +match_extend_back_end_encodeBetterBlockAsm10B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm10B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm10B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm10B + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm10B + +matchlen_match8_match_nolit_encodeBetterBlockAsm10B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm10B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm10B + +matchlen_match4_match_nolit_encodeBetterBlockAsm10B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm10B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm10B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm10B + JB match_nolit_end_encodeBetterBlockAsm10B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeBetterBlockAsm10B + +matchlen_match1_match_nolit_encodeBetterBlockAsm10B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm10B + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm10B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm10B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm10B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm10B + JB three_bytes_match_emit_encodeBetterBlockAsm10B + +three_bytes_match_emit_encodeBetterBlockAsm10B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +two_bytes_match_emit_encodeBetterBlockAsm10B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +one_byte_match_emit_encodeBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm10B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B + +memmove_long_match_emit_encodeBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeBetterBlockAsm10B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B + CMPL R8, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R12 + + // emitRepeat + LEAL -4(R12), R12 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +long_offset_short_match_nolit_encodeBetterBlockAsm10B: + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeBetterBlockAsm10B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +match_is_repeat_encodeBetterBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm10B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm10B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm10B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm10B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm10B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +one_byte_match_emit_repeat_encodeBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL R8, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + +match_nolit_emitcopy_end_encodeBetterBlockAsm10B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm10B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x34, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x36, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 16384(AX)(R11*4) + MOVL R14, 16384(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm10B: + CMPQ R8, R9 + JAE search_loop_encodeBetterBlockAsm10B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm10B + +emit_remainder_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm10B + JB three_bytes_emit_remainder_encodeBetterBlockAsm10B + +three_bytes_emit_remainder_encodeBetterBlockAsm10B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +two_bytes_emit_remainder_encodeBetterBlockAsm10B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +one_byte_emit_remainder_encodeBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + +memmove_long_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm10B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm8B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000028, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm8B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -6(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeBetterBlockAsm8B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm8B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x38, R11 + MOVL (AX)(R10*4), SI + MOVL 4096(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 4096(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPQ R11, DI + JNE no_short_found_encodeBetterBlockAsm8B + MOVL R8, SI + JMP candidate_match_encodeBetterBlockAsm8B + +no_short_found_encodeBetterBlockAsm8B: + CMPL R10, DI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPL R11, DI + JEQ candidateS_match_encodeBetterBlockAsm8B + MOVL 20(SP), DX + JMP search_loop_encodeBetterBlockAsm8B + +candidateS_match_encodeBetterBlockAsm8B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeBetterBlockAsm8B + DECL DX + MOVL R8, SI + +candidate_match_encodeBetterBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeBetterBlockAsm8B + +match_extend_back_loop_encodeBetterBlockAsm8B: + CMPL DX, DI + JBE match_extend_back_end_encodeBetterBlockAsm8B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeBetterBlockAsm8B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeBetterBlockAsm8B + JMP match_extend_back_loop_encodeBetterBlockAsm8B + +match_extend_back_end_encodeBetterBlockAsm8B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm8B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeBetterBlockAsm8B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeBetterBlockAsm8B + +matchlen_bsf_16match_nolit_encodeBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm8B + +matchlen_match8_match_nolit_encodeBetterBlockAsm8B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm8B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeBetterBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeBetterBlockAsm8B + +matchlen_match4_match_nolit_encodeBetterBlockAsm8B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm8B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeBetterBlockAsm8B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm8B + JB match_nolit_end_encodeBetterBlockAsm8B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeBetterBlockAsm8B + +matchlen_match1_match_nolit_encodeBetterBlockAsm8B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeBetterBlockAsm8B + LEAL 1(R12), R12 + +match_nolit_end_encodeBetterBlockAsm8B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL 16(SP), R8 + JEQ match_is_repeat_encodeBetterBlockAsm8B + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm8B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm8B + JB three_bytes_match_emit_encodeBetterBlockAsm8B + +three_bytes_match_emit_encodeBetterBlockAsm8B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +two_bytes_match_emit_encodeBetterBlockAsm8B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +one_byte_match_emit_encodeBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeBetterBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ R9, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R10), R11 + MOVL R11, (CX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R10), R11 + MOVL -4(R10)(R9*1), R10 + MOVL R11, (CX) + MOVL R10, -4(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm8B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B + +memmove_long_match_emit_encodeBetterBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeBetterBlockAsm8B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B + CMPL R8, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + SUBL $0x08, R12 + + // emitRepeat + LEAL -4(R12), R12 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +long_offset_short_match_nolit_encodeBetterBlockAsm8B: + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm8B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeBetterBlockAsm8B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +match_is_repeat_encodeBetterBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm8B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm8B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm8B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm8B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm8B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +one_byte_match_emit_repeat_encodeBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (CX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (CX) + MOVL R9, -4(CX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R11 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(CX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(CX)(R13*1) + MOVOA X5, -16(CX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitRepeat + MOVL R12, SI + LEAL -4(R12), R12 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B: + CMPL R12, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B + LEAL -256(R12), R12 + MOVW $0x0019, (CX) + MOVW R12, 2(CX) + ADDQ $0x04, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B: + LEAL -4(R12), R12 + MOVW $0x0015, (CX) + MOVB R12, 2(CX) + ADDQ $0x03, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B: + SHLL $0x02, R12 + ORL $0x01, R12 + MOVW R12, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R12*4), R12 + MOVB R8, 1(CX) + SARL $0x08, R8 + SHLL $0x05, R8 + ORL R8, R12 + MOVB R12, (CX) + ADDQ $0x02, CX + +match_nolit_emitcopy_end_encodeBetterBlockAsm8B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm8B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x36, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x38, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 4096(AX)(R11*4) + MOVL R14, 4096(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeBetterBlockAsm8B: + CMPQ R8, R9 + JAE search_loop_encodeBetterBlockAsm8B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeBetterBlockAsm8B + +emit_remainder_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm8B + JB three_bytes_emit_remainder_encodeBetterBlockAsm8B + +three_bytes_emit_remainder_encodeBetterBlockAsm8B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +two_bytes_emit_remainder_encodeBetterBlockAsm8B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +one_byte_emit_remainder_encodeBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + +memmove_long_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm8B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBlockAsm(dst []byte, src []byte, tmp *[65536]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBlockAsm: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm + +repeat_extend_back_loop_encodeSnappyBlockAsm: + CMPL DI, SI + JBE repeat_extend_back_end_encodeSnappyBlockAsm + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeSnappyBlockAsm + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm + +repeat_extend_back_end_encodeSnappyBlockAsm: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 5(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x00010000 + JB three_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL SI, $0x01000000 + JB four_bytes_repeat_emit_encodeSnappyBlockAsm + MOVB $0xfc, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +four_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVL SI, R10 + SHRL $0x10, R10 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R10, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +three_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +two_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +one_byte_repeat_emit_encodeSnappyBlockAsm: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeSnappyBlockAsm: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm + +memmove_long_repeat_emit_encodeSnappyBlockAsm: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm + JB repeat_extend_forward_end_encodeSnappyBlockAsm + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm: + CMPL SI, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xff, (CX) + MOVL DI, 1(CX) + LEAL -64(SI), SI + ADDQ $0x05, CX + CMPL SI, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm: + TESTL SI, SI + JZ repeat_end_emit_encodeSnappyBlockAsm + XORL R8, R8 + LEAL -1(R8)(SI*4), SI + MOVB SI, (CX) + MOVL DI, 1(CX) + ADDQ $0x05, CX + JMP repeat_end_emit_encodeSnappyBlockAsm + +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeSnappyBlockAsm + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeSnappyBlockAsm: + MOVL DX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm + +no_repeat_found_encodeSnappyBlockAsm: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBlockAsm + +candidate3_match_encodeSnappyBlockAsm: + ADDL $0x02, DX + JMP candidate_match_encodeSnappyBlockAsm + +candidate2_match_encodeSnappyBlockAsm: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm + +match_extend_back_loop_encodeSnappyBlockAsm: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBlockAsm + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBlockAsm + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm + JMP match_extend_back_loop_encodeSnappyBlockAsm + +match_extend_back_end_encodeSnappyBlockAsm: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 5(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x00010000 + JB three_bytes_match_emit_encodeSnappyBlockAsm + CMPL R8, $0x01000000 + JB four_bytes_match_emit_encodeSnappyBlockAsm + MOVB $0xfc, (CX) + MOVL R8, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +four_bytes_match_emit_encodeSnappyBlockAsm: + MOVL R8, R10 + SHRL $0x10, R10 + MOVB $0xf8, (CX) + MOVW R8, 1(CX) + MOVB R10, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +three_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +two_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +one_byte_match_emit_encodeSnappyBlockAsm: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBlockAsm: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm + +memmove_long_match_emit_encodeSnappyBlockAsm: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeSnappyBlockAsm: +match_nolit_loop_encodeSnappyBlockAsm: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm + +matchlen_match8_match_nolit_encodeSnappyBlockAsm: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm + +matchlen_match4_match_nolit_encodeSnappyBlockAsm: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm + JB match_nolit_end_encodeSnappyBlockAsm + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBlockAsm + +matchlen_match1_match_nolit_encodeSnappyBlockAsm: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_match_nolit_encodeSnappyBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm: + CMPL R10, $0x40 + JBE four_bytes_remain_match_nolit_encodeSnappyBlockAsm + MOVB $0xff, (CX) + MOVL SI, 1(CX) + LEAL -64(R10), R10 + ADDQ $0x05, CX + CMPL R10, $0x04 + JB four_bytes_remain_match_nolit_encodeSnappyBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBlockAsm: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm + XORL DI, DI + LEAL -1(DI)(R10*4), R10 + MOVB R10, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBlockAsm: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBlockAsm: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm + INCL DX + JMP search_loop_encodeSnappyBlockAsm + +emit_remainder_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 5(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeSnappyBlockAsm + MOVB $0xfc, (CX) + MOVL DX, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +four_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +three_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +two_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +one_byte_emit_remainder_encodeSnappyBlockAsm: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm + +memmove_long_emit_remainder_encodeSnappyBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBlockAsm64K(dst []byte, src []byte, tmp *[65536]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm64K(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm64K: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBlockAsm64K: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm64K + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm64K + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm64K + +repeat_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL DI, SI + JBE repeat_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K + +repeat_extend_back_end_encodeSnappyBlockAsm64K: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm64K: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm64K + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm64K + JB three_bytes_repeat_emit_encodeSnappyBlockAsm64K + +three_bytes_repeat_emit_encodeSnappyBlockAsm64K: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +two_bytes_repeat_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm64K + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +one_byte_repeat_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + +memmove_long_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm64K + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm64K: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K + JB repeat_extend_forward_end_encodeSnappyBlockAsm64K + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm64K: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeSnappyBlockAsm64K + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeSnappyBlockAsm64K: + MOVL DX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm64K + +no_repeat_found_encodeSnappyBlockAsm64K: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm64K + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm64K + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm64K + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBlockAsm64K + +candidate3_match_encodeSnappyBlockAsm64K: + ADDL $0x02, DX + JMP candidate_match_encodeSnappyBlockAsm64K + +candidate2_match_encodeSnappyBlockAsm64K: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm64K: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm64K + +match_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBlockAsm64K + +match_extend_back_end_encodeSnappyBlockAsm64K: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm64K: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm64K + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm64K + JB three_bytes_match_emit_encodeSnappyBlockAsm64K + +three_bytes_match_emit_encodeSnappyBlockAsm64K: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +two_bytes_match_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +one_byte_match_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBlockAsm64K: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm64K: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K + +memmove_long_match_emit_encodeSnappyBlockAsm64K: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeSnappyBlockAsm64K: +match_nolit_loop_encodeSnappyBlockAsm64K: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm64K + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm64K + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_match8_match_nolit_encodeSnappyBlockAsm64K: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm64K + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm64K + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_match4_match_nolit_encodeSnappyBlockAsm64K: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm64K + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm64K: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K + JB match_nolit_end_encodeSnappyBlockAsm64K + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_match1_match_nolit_encodeSnappyBlockAsm64K: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm64K + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm64K: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm64K: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBlockAsm64K: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm64K: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm64K + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm64K: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x32, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x32, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm64K + INCL DX + JMP search_loop_encodeSnappyBlockAsm64K + +emit_remainder_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm64K + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm64K + JB three_bytes_emit_remainder_encodeSnappyBlockAsm64K + +three_bytes_emit_remainder_encodeSnappyBlockAsm64K: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBlockAsm64K: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBlockAsm12B(dst []byte, src []byte, tmp *[16384]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm12B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000080, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm12B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBlockAsm12B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm12B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x18, R11 + IMULQ R9, R11 + SHRQ $0x34, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x18, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm12B + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm12B + +repeat_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL DI, SI + JBE repeat_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B + +repeat_extend_back_end_encodeSnappyBlockAsm12B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm12B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm12B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm12B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm12B + +three_bytes_repeat_emit_encodeSnappyBlockAsm12B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +two_bytes_repeat_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm12B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +one_byte_repeat_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + +memmove_long_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm12B + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm12B: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B + JB repeat_extend_forward_end_encodeSnappyBlockAsm12B + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm12B: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeSnappyBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeSnappyBlockAsm12B: + MOVL DX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm12B + +no_repeat_found_encodeSnappyBlockAsm12B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm12B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm12B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm12B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBlockAsm12B + +candidate3_match_encodeSnappyBlockAsm12B: + ADDL $0x02, DX + JMP candidate_match_encodeSnappyBlockAsm12B + +candidate2_match_encodeSnappyBlockAsm12B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm12B + +match_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBlockAsm12B + +match_extend_back_end_encodeSnappyBlockAsm12B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm12B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm12B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm12B + JB three_bytes_match_emit_encodeSnappyBlockAsm12B + +three_bytes_match_emit_encodeSnappyBlockAsm12B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +two_bytes_match_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +one_byte_match_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBlockAsm12B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm12B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B + +memmove_long_match_emit_encodeSnappyBlockAsm12B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeSnappyBlockAsm12B: +match_nolit_loop_encodeSnappyBlockAsm12B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm12B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm12B + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_match8_match_nolit_encodeSnappyBlockAsm12B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm12B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm12B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm12B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm12B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B + JB match_nolit_end_encodeSnappyBlockAsm12B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm12B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm12B + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm12B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm12B: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm12B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm12B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm12B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x18, R8 + IMULQ R9, R8 + SHRQ $0x34, R8 + SHLQ $0x18, SI + IMULQ R9, SI + SHRQ $0x34, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm12B + INCL DX + JMP search_loop_encodeSnappyBlockAsm12B + +emit_remainder_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm12B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm12B + +three_bytes_emit_remainder_encodeSnappyBlockAsm12B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBlockAsm12B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBlockAsm10B(dst []byte, src []byte, tmp *[4096]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm10B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000020, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm10B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBlockAsm10B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm10B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x36, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm10B + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm10B + +repeat_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL DI, SI + JBE repeat_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B + +repeat_extend_back_end_encodeSnappyBlockAsm10B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm10B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm10B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm10B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm10B + +three_bytes_repeat_emit_encodeSnappyBlockAsm10B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +two_bytes_repeat_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm10B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +one_byte_repeat_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + +memmove_long_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm10B + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm10B: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B + JB repeat_extend_forward_end_encodeSnappyBlockAsm10B + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm10B: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeSnappyBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeSnappyBlockAsm10B: + MOVL DX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm10B + +no_repeat_found_encodeSnappyBlockAsm10B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm10B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm10B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm10B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBlockAsm10B + +candidate3_match_encodeSnappyBlockAsm10B: + ADDL $0x02, DX + JMP candidate_match_encodeSnappyBlockAsm10B + +candidate2_match_encodeSnappyBlockAsm10B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm10B + +match_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBlockAsm10B + +match_extend_back_end_encodeSnappyBlockAsm10B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm10B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm10B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm10B + JB three_bytes_match_emit_encodeSnappyBlockAsm10B + +three_bytes_match_emit_encodeSnappyBlockAsm10B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +two_bytes_match_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +one_byte_match_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBlockAsm10B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm10B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B + +memmove_long_match_emit_encodeSnappyBlockAsm10B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeSnappyBlockAsm10B: +match_nolit_loop_encodeSnappyBlockAsm10B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm10B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm10B + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_match8_match_nolit_encodeSnappyBlockAsm10B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm10B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm10B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm10B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm10B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B + JB match_nolit_end_encodeSnappyBlockAsm10B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm10B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm10B + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm10B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm10B: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm10B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm10B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm10B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm10B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x36, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x36, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm10B + INCL DX + JMP search_loop_encodeSnappyBlockAsm10B + +emit_remainder_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm10B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm10B + +three_bytes_emit_remainder_encodeSnappyBlockAsm10B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBlockAsm10B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBlockAsm8B(dst []byte, src []byte, tmp *[1024]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm8B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000008, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm8B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBlockAsm8B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm8B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x38, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x38, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_encodeSnappyBlockAsm8B + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_encodeSnappyBlockAsm8B + +repeat_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL DI, SI + JBE repeat_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B + +repeat_extend_back_end_encodeSnappyBlockAsm8B: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +repeat_dst_size_check_encodeSnappyBlockAsm8B: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm8B + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm8B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm8B + +three_bytes_repeat_emit_encodeSnappyBlockAsm8B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +two_bytes_repeat_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm8B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +one_byte_repeat_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (CX)(R8*1), SI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (CX) + MOVQ R9, -8(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B: + MOVQ SI, CX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + +memmove_long_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (CX)(R8*1), SI + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R8*1) + MOVOU X3, -16(CX)(R8*1) + MOVQ SI, CX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_encodeSnappyBlockAsm8B + +matchlen_bsf_16repeat_extend_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_match8_repeat_extend_encodeSnappyBlockAsm8B: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B + +matchlen_bsf_8_repeat_extend_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B + JB repeat_extend_forward_end_encodeSnappyBlockAsm8B + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeSnappyBlockAsm8B: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B + MOVB $0xee, (CX) + MOVW DI, 1(CX) + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B + LEAL -15(R8), R8 + MOVB DI, 1(CX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, R8 + MOVB R8, (CX) + ADDQ $0x02, CX + JMP repeat_end_emit_encodeSnappyBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B: + LEAL -2(R8), R8 + MOVB R8, (CX) + MOVW DI, 1(CX) + ADDQ $0x03, CX + +repeat_end_emit_encodeSnappyBlockAsm8B: + MOVL DX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm8B + +no_repeat_found_encodeSnappyBlockAsm8B: + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBlockAsm8B + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_encodeSnappyBlockAsm8B + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_encodeSnappyBlockAsm8B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBlockAsm8B + +candidate3_match_encodeSnappyBlockAsm8B: + ADDL $0x02, DX + JMP candidate_match_encodeSnappyBlockAsm8B + +candidate2_match_encodeSnappyBlockAsm8B: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_encodeSnappyBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBlockAsm8B + +match_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBlockAsm8B + +match_extend_back_end_encodeSnappyBlockAsm8B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm8B: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), R8 + CMPL R8, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm8B + CMPL R8, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm8B + JB three_bytes_match_emit_encodeSnappyBlockAsm8B + +three_bytes_match_emit_encodeSnappyBlockAsm8B: + MOVB $0xf4, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +two_bytes_match_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (CX) + MOVB R8, 1(CX) + ADDQ $0x02, CX + CMPL R8, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +one_byte_match_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, R8 + MOVB R8, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBlockAsm8B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (DI), R10 + MOVQ R10, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (DI), R10 + MOVQ -8(DI)(R9*1), DI + MOVQ R10, (CX) + MOVQ DI, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (DI), X0 + MOVOU -16(DI)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm8B: + MOVQ R8, CX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B + +memmove_long_match_emit_encodeSnappyBlockAsm8B: + LEAQ (CX)(R9*1), R8 + + // genMemMoveLong + MOVOU (DI), X0 + MOVOU 16(DI), X1 + MOVOU -32(DI)(R9*1), X2 + MOVOU -16(DI)(R9*1), X3 + MOVQ R9, R11 + SHRQ $0x05, R11 + MOVQ CX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(DI)(R12*1), R10 + LEAQ -32(CX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(DI)(R12*1), X4 + MOVOU -16(DI)(R12*1), X5 + MOVOA X4, -32(CX)(R12*1) + MOVOA X5, -16(CX)(R12*1) + ADDQ $0x20, R12 + CMPQ R9, R12 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ R8, CX + +emit_literal_done_match_emit_encodeSnappyBlockAsm8B: +match_nolit_loop_encodeSnappyBlockAsm8B: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBlockAsm8B + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBlockAsm8B + +matchlen_bsf_16match_nolit_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_match8_match_nolit_encodeSnappyBlockAsm8B: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm8B + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_encodeSnappyBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeSnappyBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm8B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm8B + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm8B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B + JB match_nolit_end_encodeSnappyBlockAsm8B + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm8B: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_encodeSnappyBlockAsm8B + LEAL 1(R10), R10 + +match_nolit_end_encodeSnappyBlockAsm8B: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm8B: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B + MOVB $0xee, (CX) + MOVW SI, 1(CX) + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B + LEAL -15(DI), DI + MOVB SI, 1(CX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm8B: + LEAL -2(DI), DI + MOVB DI, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm8B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm8B + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm8B: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x38, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x38, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_encodeSnappyBlockAsm8B + INCL DX + JMP search_loop_encodeSnappyBlockAsm8B + +emit_remainder_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm8B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm8B + +three_bytes_emit_remainder_encodeSnappyBlockAsm8B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBlockAsm8B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBetterBlockAsm(dst []byte, src []byte, tmp *[589824]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00001200, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBetterBlockAsm: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + CMPL SI, $0x63 + JBE check_maxskip_ok_encodeSnappyBetterBlockAsm + LEAL 100(DX), SI + JMP check_maxskip_cont_encodeSnappyBetterBlockAsm + +check_maxskip_ok_encodeSnappyBetterBlockAsm: + LEAL 1(DX)(SI*1), SI + +check_maxskip_cont_encodeSnappyBetterBlockAsm: + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL (AX)(R10*4), SI + MOVL 524288(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 524288(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm + +no_short_found_encodeSnappyBetterBlockAsm: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBetterBlockAsm + +candidateS_match_encodeSnappyBetterBlockAsm: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x2f, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm + DECL DX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + +match_extend_back_loop_encodeSnappyBetterBlockAsm: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm + +match_extend_back_end_encodeSnappyBetterBlockAsm: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 5(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm + JB match_nolit_end_encodeSnappyBetterBlockAsm + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + CMPL R12, $0x01 + JA match_length_ok_encodeSnappyBetterBlockAsm + CMPL R8, $0x0000ffff + JBE match_length_ok_encodeSnappyBetterBlockAsm + MOVL 20(SP), DX + INCL DX + JMP search_loop_encodeSnappyBetterBlockAsm + +match_length_ok_encodeSnappyBetterBlockAsm: + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x00010000 + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL SI, $0x01000000 + JB four_bytes_match_emit_encodeSnappyBetterBlockAsm + MOVB $0xfc, (CX) + MOVL SI, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +four_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVL SI, R11 + SHRL $0x10, R11 + MOVB $0xf8, (CX) + MOVW SI, 1(CX) + MOVB R11, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +three_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +two_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +one_byte_match_emit_encodeSnappyBetterBlockAsm: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + +memmove_long_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy + CMPL R8, $0x00010000 + JB two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R12, $0x40 + JBE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xff, (CX) + MOVL R8, 1(CX) + LEAL -64(R12), R12 + ADDQ $0x05, CX + CMPL R12, $0x04 + JB four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm: + TESTL R12, R12 + JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + XORL SI, SI + LEAL -1(SI)(R12*4), R12 + MOVB R12, (CX) + MOVL R8, 1(CX) + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x32, R11 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x2f, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x32, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 524288(AX)(R11*4) + MOVL R14, 524288(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm: + CMPQ R8, R9 + JAE search_loop_encodeSnappyBetterBlockAsm + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x2f, R10 + SHLQ $0x08, R11 + IMULQ SI, R11 + SHRQ $0x2f, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm + +emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 5(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeSnappyBetterBlockAsm + MOVB $0xfc, (CX) + MOVL DX, 1(CX) + ADDQ $0x05, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +four_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (CX) + MOVW DX, 1(CX) + MOVB BL, 3(CX) + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte, tmp *[294912]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm64K(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000900, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm64K: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBetterBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBetterBlockAsm64K: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x07, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm64K + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x33, R11 + MOVL (AX)(R10*4), SI + MOVL 262144(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 262144(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm64K + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm64K + +no_short_found_encodeSnappyBetterBlockAsm64K: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm64K + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBetterBlockAsm64K + +candidateS_match_encodeSnappyBetterBlockAsm64K: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x08, R10 + IMULQ R9, R10 + SHRQ $0x30, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + DECL DX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm64K: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + +match_extend_back_loop_encodeSnappyBetterBlockAsm64K: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm64K + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K + +match_extend_back_end_encodeSnappyBetterBlockAsm64K: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm64K: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm64K + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm64K: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K + JB match_nolit_end_encodeSnappyBetterBlockAsm64K + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm64K + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm64K: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm64K + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm64K + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm64K + +three_bytes_match_emit_encodeSnappyBetterBlockAsm64K: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +two_bytes_match_emit_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +one_byte_match_emit_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + +memmove_long_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm64K + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K: + MOVQ $0x00cf1bbcdcbfa563, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x33, R11 + SHLQ $0x08, R12 + IMULQ SI, R12 + SHRQ $0x30, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x33, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 262144(AX)(R11*4) + MOVL R14, 262144(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm64K: + CMPQ R8, R9 + JAE search_loop_encodeSnappyBetterBlockAsm64K + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x08, R10 + IMULQ SI, R10 + SHRQ $0x30, R10 + SHLQ $0x08, R11 + IMULQ SI, R11 + SHRQ $0x30, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm64K + +emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte, tmp *[81920]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm12B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000280, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm12B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBetterBlockAsm12B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x06, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm12B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL (AX)(R10*4), SI + MOVL 65536(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 65536(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm12B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm12B + +no_short_found_encodeSnappyBetterBlockAsm12B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm12B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBetterBlockAsm12B + +candidateS_match_encodeSnappyBetterBlockAsm12B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x32, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + DECL DX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm12B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + +match_extend_back_loop_encodeSnappyBetterBlockAsm12B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm12B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B + +match_extend_back_end_encodeSnappyBetterBlockAsm12B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm12B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm12B + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm12B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B + JB match_nolit_end_encodeSnappyBetterBlockAsm12B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm12B + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm12B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm12B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm12B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm12B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm12B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +one_byte_match_emit_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm12B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x34, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x32, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x34, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 65536(AX)(R11*4) + MOVL R14, 65536(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm12B: + CMPQ R8, R9 + JAE search_loop_encodeSnappyBetterBlockAsm12B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x32, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x32, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm12B + +emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte, tmp *[20480]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm10B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x000000a0, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm10B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBetterBlockAsm10B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm10B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL (AX)(R10*4), SI + MOVL 16384(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 16384(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm10B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm10B + +no_short_found_encodeSnappyBetterBlockAsm10B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm10B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBetterBlockAsm10B + +candidateS_match_encodeSnappyBetterBlockAsm10B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x34, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + DECL DX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm10B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + +match_extend_back_loop_encodeSnappyBetterBlockAsm10B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm10B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B + +match_extend_back_end_encodeSnappyBetterBlockAsm10B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm10B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm10B + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm10B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B + JB match_nolit_end_encodeSnappyBetterBlockAsm10B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm10B + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm10B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm10B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm10B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm10B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm10B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +one_byte_match_emit_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + CMPL R8, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm10B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x36, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x34, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x36, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 16384(AX)(R11*4) + MOVL R14, 16384(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm10B: + CMPQ R8, R9 + JAE search_loop_encodeSnappyBetterBlockAsm10B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x34, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm10B + +emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte, tmp *[5120]byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm8B(SB), $24-64 + MOVQ tmp+48(FP), AX + MOVQ dst_base+0(FP), CX + MOVQ $0x00000028, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm8B: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_encodeSnappyBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), BX + +search_loop_encodeSnappyBetterBlockAsm8B: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 1(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm8B + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ $0x9e3779b1, SI + MOVQ DI, R10 + MOVQ DI, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ SI, R11 + SHRQ $0x38, R11 + MOVL (AX)(R10*4), SI + MOVL 4096(AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + MOVL DX, 4096(AX)(R11*4) + MOVQ (BX)(SI*1), R10 + MOVQ (BX)(R8*1), R11 + CMPQ R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPQ R11, DI + JNE no_short_found_encodeSnappyBetterBlockAsm8B + MOVL R8, SI + JMP candidate_match_encodeSnappyBetterBlockAsm8B + +no_short_found_encodeSnappyBetterBlockAsm8B: + CMPL R10, DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPL R11, DI + JEQ candidateS_match_encodeSnappyBetterBlockAsm8B + MOVL 20(SP), DX + JMP search_loop_encodeSnappyBetterBlockAsm8B + +candidateS_match_encodeSnappyBetterBlockAsm8B: + SHRQ $0x08, DI + MOVQ DI, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x36, R10 + MOVL (AX)(R10*4), SI + INCL DX + MOVL DX, (AX)(R10*4) + CMPL (BX)(SI*1), DI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + DECL DX + MOVL R8, SI + +candidate_match_encodeSnappyBetterBlockAsm8B: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + +match_extend_back_loop_encodeSnappyBetterBlockAsm8B: + CMPL DX, DI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm8B + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B + +match_extend_back_end_encodeSnappyBetterBlockAsm8B: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm8B: + MOVL DX, DI + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+32(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), R10 + + // matchLen + XORL R12, R12 + +matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R8, $0x10 + JB matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B + MOVQ (R9)(R12*1), R11 + MOVQ 8(R9)(R12*1), R13 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B + XORQ 8(R10)(R12*1), R13 + JNZ matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -16(R8), R8 + LEAL 16(R12), R12 + JMP matchlen_loopback_16_match_nolit_encodeSnappyBetterBlockAsm8B + +matchlen_bsf_16match_nolit_encodeSnappyBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R13, R13 + +#else + BSFQ R13, R13 + +#endif + SARQ $0x03, R13 + LEAL 8(R12)(R13*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_match8_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R8, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B + MOVQ (R9)(R12*1), R11 + XORQ (R10)(R12*1), R11 + JNZ matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -8(R8), R8 + LEAL 8(R12), R12 + JMP matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B + +matchlen_bsf_8_match_nolit_encodeSnappyBetterBlockAsm8B: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL (R12)(R11*1), R12 + JMP match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R8, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B + MOVL (R9)(R12*1), R11 + CMPL (R10)(R12*1), R11 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -4(R8), R8 + LEAL 4(R12), R12 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R8, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B + JB match_nolit_end_encodeSnappyBetterBlockAsm8B + MOVW (R9)(R12*1), R11 + CMPW (R10)(R12*1), R11 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL 2(R12), R12 + SUBL $0x02, R8 + JZ match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVB (R9)(R12*1), R11 + CMPB (R10)(R12*1), R11 + JNE match_nolit_end_encodeSnappyBetterBlockAsm8B + LEAL 1(R12), R12 + +match_nolit_end_encodeSnappyBetterBlockAsm8B: + MOVL DX, R8 + SUBL SI, R8 + + // Check if repeat + MOVL R8, 16(SP) + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R10 + SUBL SI, R9 + LEAL -1(R9), SI + CMPL SI, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm8B + CMPL SI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm8B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm8B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm8B: + MOVB $0xf4, (CX) + MOVW SI, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (CX) + MOVB SI, 1(CX) + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +one_byte_match_emit_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, SI + MOVB SI, (CX) + ADDQ $0x01, CX + +memmove_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8: + MOVQ (R10), R11 + MOVQ R11, (CX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R10), R11 + MOVQ -8(R10)(R9*1), R10 + MOVQ R11, (CX) + MOVQ R10, -8(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R10), X0 + MOVOU -16(R10)(R9*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(R9*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B: + MOVQ SI, CX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (CX)(R9*1), SI + + // genMemMoveLong + MOVOU (R10), X0 + MOVOU 16(R10), X1 + MOVOU -32(R10)(R9*1), X2 + MOVOU -16(R10)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ CX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R14 + SUBQ R11, R14 + DECQ R13 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R10)(R14*1), R11 + LEAQ -32(CX)(R14*1), R15 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R11 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R10)(R14*1), X4 + MOVOU -16(R10)(R14*1), X5 + MOVOA X4, -32(CX)(R14*1) + MOVOA X5, -16(CX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(R9*1) + MOVOU X3, -16(CX)(R9*1) + MOVQ SI, CX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B: + ADDL R12, DX + ADDL $0x04, R12 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R12, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B + MOVB $0xee, (CX) + MOVW R8, 1(CX) + LEAL -60(R12), R12 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVL R12, SI + SHLL $0x02, SI + CMPL R12, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -15(SI), SI + MOVB R8, 1(CX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, SI + MOVB SI, (CX) + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B: + LEAL -2(SI), SI + MOVB SI, (CX) + MOVW R8, 1(CX) + ADDQ $0x03, CX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B: + CMPL DX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm8B + CMPQ CX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, SI + MOVQ $0x9e3779b1, R8 + LEAQ 1(DI), DI + LEAQ -2(DX), R9 + MOVQ (BX)(DI*1), R10 + MOVQ 1(BX)(DI*1), R11 + MOVQ (BX)(R9*1), R12 + MOVQ 1(BX)(R9*1), R13 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x20, R11 + IMULQ R8, R11 + SHRQ $0x38, R11 + SHLQ $0x10, R12 + IMULQ SI, R12 + SHRQ $0x36, R12 + SHLQ $0x20, R13 + IMULQ R8, R13 + SHRQ $0x38, R13 + LEAQ 1(DI), R8 + LEAQ 1(R9), R14 + MOVL DI, (AX)(R10*4) + MOVL R9, (AX)(R12*4) + MOVL R8, 4096(AX)(R11*4) + MOVL R14, 4096(AX)(R13*4) + LEAQ 1(R9)(DI*1), R8 + SHRQ $0x01, R8 + ADDQ $0x01, DI + SUBQ $0x01, R9 + +index_loop_encodeSnappyBetterBlockAsm8B: + CMPQ R8, R9 + JAE search_loop_encodeSnappyBetterBlockAsm8B + MOVQ (BX)(DI*1), R10 + MOVQ (BX)(R8*1), R11 + SHLQ $0x10, R10 + IMULQ SI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R11 + IMULQ SI, R11 + SHRQ $0x36, R11 + MOVL DI, (AX)(R10*4) + MOVL R8, (AX)(R11*4) + ADDQ $0x02, DI + ADDQ $0x02, R8 + JMP index_loop_encodeSnappyBetterBlockAsm8B + +emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+56(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVB $0xf4, (CX) + MOVW DX, 1(CX) + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (CX) + MOVB DL, 1(CX) + ADDQ $0x02, CX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (CX) + ADDQ $0x01, CX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2: + MOVB (AX), SI + MOVB -1(AX)(BX*1), AL + MOVB SI, (CX) + MOVB AL, -1(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3: + MOVW (AX), SI + MOVB 2(AX), AL + MOVW SI, (CX) + MOVB AL, 2(CX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7: + MOVL (AX), SI + MOVL -4(AX)(BX*1), AX + MOVL SI, (CX) + MOVL AX, -4(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (AX), SI + MOVQ -8(AX)(BX*1), AX + MOVQ SI, (CX) + MOVQ AX, -8(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (AX), X0 + MOVOU -16(AX)(BX*1), X1 + MOVOU X0, (CX) + MOVOU X1, -16(CX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ DX, CX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (CX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU -32(AX)(BX*1), X2 + MOVOU -16(AX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ CX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(AX)(R8*1), SI + LEAQ -32(CX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(AX)(R8*1), X4 + MOVOU -16(AX)(R8*1), X5 + MOVOA X4, -32(CX)(R8*1) + MOVOA X5, -16(CX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (CX) + MOVOU X1, 16(CX) + MOVOU X2, -32(CX)(BX*1) + MOVOU X3, -16(CX)(BX*1) + MOVQ DX, CX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ dst_base+0(FP), AX + SUBQ AX, CX + MOVQ CX, ret+56(FP) + RET + +// func calcBlockSize(src []byte, tmp *[32768]byte) int +// Requires: BMI, SSE2 +TEXT ·calcBlockSize(SB), $24-40 + MOVQ tmp+24(FP), AX + XORQ CX, CX + MOVQ $0x00000100, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_calcBlockSize: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_calcBlockSize + MOVL $0x00000000, 12(SP) + MOVQ src_len+8(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+0(FP), BX + +search_loop_calcBlockSize: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x05, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_calcBlockSize + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x33, R10 + SHLQ $0x10, R11 + IMULQ R9, R11 + SHRQ $0x33, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x10, R10 + IMULQ R9, R10 + SHRQ $0x33, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_calcBlockSize + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_calcBlockSize + +repeat_extend_back_loop_calcBlockSize: + CMPL DI, SI + JBE repeat_extend_back_end_calcBlockSize + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_calcBlockSize + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_calcBlockSize + +repeat_extend_back_end_calcBlockSize: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 5(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_calcBlockSize + MOVQ $0x00000000, ret+32(FP) + RET + +repeat_dst_size_check_calcBlockSize: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_calcBlockSize + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_calcBlockSize + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_calcBlockSize + CMPL SI, $0x00010000 + JB three_bytes_repeat_emit_calcBlockSize + CMPL SI, $0x01000000 + JB four_bytes_repeat_emit_calcBlockSize + ADDQ $0x05, CX + JMP memmove_long_repeat_emit_calcBlockSize + +four_bytes_repeat_emit_calcBlockSize: + ADDQ $0x04, CX + JMP memmove_long_repeat_emit_calcBlockSize + +three_bytes_repeat_emit_calcBlockSize: + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_calcBlockSize + +two_bytes_repeat_emit_calcBlockSize: + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_calcBlockSize + JMP memmove_long_repeat_emit_calcBlockSize + +one_byte_repeat_emit_calcBlockSize: + ADDQ $0x01, CX + +memmove_repeat_emit_calcBlockSize: + LEAQ (CX)(R8*1), CX + JMP emit_literal_done_repeat_emit_calcBlockSize + +memmove_long_repeat_emit_calcBlockSize: + LEAQ (CX)(R8*1), CX + +emit_literal_done_repeat_emit_calcBlockSize: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+8(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_calcBlockSize: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_calcBlockSize + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSize + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_calcBlockSize + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_calcBlockSize + +matchlen_bsf_16repeat_extend_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_calcBlockSize + +matchlen_match8_repeat_extend_calcBlockSize: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_calcBlockSize + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSize + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_calcBlockSize + +matchlen_bsf_8_repeat_extend_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_calcBlockSize + +matchlen_match4_repeat_extend_calcBlockSize: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_calcBlockSize + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_calcBlockSize + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_calcBlockSize: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_calcBlockSize + JB repeat_extend_forward_end_calcBlockSize + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_calcBlockSize + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_calcBlockSize + +matchlen_match1_repeat_extend_calcBlockSize: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_calcBlockSize + LEAL 1(R11), R11 + +repeat_extend_forward_end_calcBlockSize: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_repeat_as_copy_calcBlockSize + +four_bytes_loop_back_repeat_as_copy_calcBlockSize: + CMPL SI, $0x40 + JBE four_bytes_remain_repeat_as_copy_calcBlockSize + LEAL -64(SI), SI + ADDQ $0x05, CX + CMPL SI, $0x04 + JB four_bytes_remain_repeat_as_copy_calcBlockSize + JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize + +four_bytes_remain_repeat_as_copy_calcBlockSize: + TESTL SI, SI + JZ repeat_end_emit_calcBlockSize + XORL SI, SI + ADDQ $0x05, CX + JMP repeat_end_emit_calcBlockSize + +two_byte_offset_repeat_as_copy_calcBlockSize: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_calcBlockSize + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_calcBlockSize + +two_byte_offset_short_repeat_as_copy_calcBlockSize: + MOVL SI, R8 + SHLL $0x02, R8 + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_calcBlockSize + CMPL DI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_calcBlockSize + ADDQ $0x02, CX + JMP repeat_end_emit_calcBlockSize + +emit_copy_three_repeat_as_copy_calcBlockSize: + ADDQ $0x03, CX + +repeat_end_emit_calcBlockSize: + MOVL DX, 12(SP) + JMP search_loop_calcBlockSize + +no_repeat_found_calcBlockSize: + CMPL (BX)(SI*1), DI + JEQ candidate_match_calcBlockSize + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_calcBlockSize + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_calcBlockSize + MOVL 20(SP), DX + JMP search_loop_calcBlockSize + +candidate3_match_calcBlockSize: + ADDL $0x02, DX + JMP candidate_match_calcBlockSize + +candidate2_match_calcBlockSize: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_calcBlockSize: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_calcBlockSize + +match_extend_back_loop_calcBlockSize: + CMPL DX, DI + JBE match_extend_back_end_calcBlockSize + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_calcBlockSize + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_calcBlockSize + JMP match_extend_back_loop_calcBlockSize + +match_extend_back_end_calcBlockSize: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 5(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_calcBlockSize + MOVQ $0x00000000, ret+32(FP) + RET + +match_dst_size_check_calcBlockSize: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_calcBlockSize + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), DI + CMPL DI, $0x3c + JB one_byte_match_emit_calcBlockSize + CMPL DI, $0x00000100 + JB two_bytes_match_emit_calcBlockSize + CMPL DI, $0x00010000 + JB three_bytes_match_emit_calcBlockSize + CMPL DI, $0x01000000 + JB four_bytes_match_emit_calcBlockSize + ADDQ $0x05, CX + JMP memmove_long_match_emit_calcBlockSize + +four_bytes_match_emit_calcBlockSize: + ADDQ $0x04, CX + JMP memmove_long_match_emit_calcBlockSize + +three_bytes_match_emit_calcBlockSize: + ADDQ $0x03, CX + JMP memmove_long_match_emit_calcBlockSize + +two_bytes_match_emit_calcBlockSize: + ADDQ $0x02, CX + CMPL DI, $0x40 + JB memmove_match_emit_calcBlockSize + JMP memmove_long_match_emit_calcBlockSize + +one_byte_match_emit_calcBlockSize: + ADDQ $0x01, CX + +memmove_match_emit_calcBlockSize: + LEAQ (CX)(R9*1), CX + JMP emit_literal_done_match_emit_calcBlockSize + +memmove_long_match_emit_calcBlockSize: + LEAQ (CX)(R9*1), CX + +emit_literal_done_match_emit_calcBlockSize: +match_nolit_loop_calcBlockSize: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+8(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_calcBlockSize: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_calcBlockSize + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_calcBlockSize + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_calcBlockSize + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_calcBlockSize + +matchlen_bsf_16match_nolit_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_calcBlockSize + +matchlen_match8_match_nolit_calcBlockSize: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_calcBlockSize + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_calcBlockSize + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_calcBlockSize + +matchlen_bsf_8_match_nolit_calcBlockSize: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_calcBlockSize + +matchlen_match4_match_nolit_calcBlockSize: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_calcBlockSize + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_calcBlockSize + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_calcBlockSize: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_calcBlockSize + JB match_nolit_end_calcBlockSize + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_calcBlockSize + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_calcBlockSize + +matchlen_match1_match_nolit_calcBlockSize: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_calcBlockSize + LEAL 1(R10), R10 + +match_nolit_end_calcBlockSize: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_match_nolit_calcBlockSize + +four_bytes_loop_back_match_nolit_calcBlockSize: + CMPL R10, $0x40 + JBE four_bytes_remain_match_nolit_calcBlockSize + LEAL -64(R10), R10 + ADDQ $0x05, CX + CMPL R10, $0x04 + JB four_bytes_remain_match_nolit_calcBlockSize + JMP four_bytes_loop_back_match_nolit_calcBlockSize + +four_bytes_remain_match_nolit_calcBlockSize: + TESTL R10, R10 + JZ match_nolit_emitcopy_end_calcBlockSize + XORL SI, SI + ADDQ $0x05, CX + JMP match_nolit_emitcopy_end_calcBlockSize + +two_byte_offset_match_nolit_calcBlockSize: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_calcBlockSize + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_calcBlockSize + +two_byte_offset_short_match_nolit_calcBlockSize: + MOVL R10, DI + SHLL $0x02, DI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_calcBlockSize + CMPL SI, $0x00000800 + JAE emit_copy_three_match_nolit_calcBlockSize + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_calcBlockSize + +emit_copy_three_match_nolit_calcBlockSize: + ADDQ $0x03, CX + +match_nolit_emitcopy_end_calcBlockSize: + CMPL DX, 8(SP) + JAE emit_remainder_calcBlockSize + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_calcBlockSize + MOVQ $0x00000000, ret+32(FP) + RET + +match_nolit_dst_ok_calcBlockSize: + MOVQ $0x0000cf1bbcdcbf9b, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x10, R8 + IMULQ R9, R8 + SHRQ $0x33, R8 + SHLQ $0x10, SI + IMULQ R9, SI + SHRQ $0x33, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_calcBlockSize + INCL DX + JMP search_loop_calcBlockSize + +emit_remainder_calcBlockSize: + MOVQ src_len+8(FP), AX + SUBL 12(SP), AX + LEAQ 5(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_calcBlockSize + MOVQ $0x00000000, ret+32(FP) + RET + +emit_remainder_ok_calcBlockSize: + MOVQ src_len+8(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_calcBlockSize + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), AX + CMPL AX, $0x3c + JB one_byte_emit_remainder_calcBlockSize + CMPL AX, $0x00000100 + JB two_bytes_emit_remainder_calcBlockSize + CMPL AX, $0x00010000 + JB three_bytes_emit_remainder_calcBlockSize + CMPL AX, $0x01000000 + JB four_bytes_emit_remainder_calcBlockSize + ADDQ $0x05, CX + JMP memmove_long_emit_remainder_calcBlockSize + +four_bytes_emit_remainder_calcBlockSize: + ADDQ $0x04, CX + JMP memmove_long_emit_remainder_calcBlockSize + +three_bytes_emit_remainder_calcBlockSize: + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_calcBlockSize + +two_bytes_emit_remainder_calcBlockSize: + ADDQ $0x02, CX + CMPL AX, $0x40 + JB memmove_emit_remainder_calcBlockSize + JMP memmove_long_emit_remainder_calcBlockSize + +one_byte_emit_remainder_calcBlockSize: + ADDQ $0x01, CX + +memmove_emit_remainder_calcBlockSize: + LEAQ (CX)(SI*1), AX + MOVQ AX, CX + JMP emit_literal_done_emit_remainder_calcBlockSize + +memmove_long_emit_remainder_calcBlockSize: + LEAQ (CX)(SI*1), AX + MOVQ AX, CX + +emit_literal_done_emit_remainder_calcBlockSize: + MOVQ CX, ret+32(FP) + RET + +// func calcBlockSizeSmall(src []byte, tmp *[2048]byte) int +// Requires: BMI, SSE2 +TEXT ·calcBlockSizeSmall(SB), $24-40 + MOVQ tmp+24(FP), AX + XORQ CX, CX + MOVQ $0x00000010, DX + MOVQ AX, BX + PXOR X0, X0 + +zero_loop_calcBlockSizeSmall: + MOVOU X0, (BX) + MOVOU X0, 16(BX) + MOVOU X0, 32(BX) + MOVOU X0, 48(BX) + MOVOU X0, 64(BX) + MOVOU X0, 80(BX) + MOVOU X0, 96(BX) + MOVOU X0, 112(BX) + ADDQ $0x80, BX + DECQ DX + JNZ zero_loop_calcBlockSizeSmall + MOVL $0x00000000, 12(SP) + MOVQ src_len+8(FP), DX + LEAQ -9(DX), BX + LEAQ -8(DX), SI + MOVL SI, 8(SP) + SHRQ $0x05, DX + SUBL DX, BX + LEAQ (CX)(BX*1), BX + MOVQ BX, (SP) + MOVL $0x00000001, DX + MOVL DX, 16(SP) + MOVQ src_base+0(FP), BX + +search_loop_calcBlockSizeSmall: + MOVL DX, SI + SUBL 12(SP), SI + SHRL $0x04, SI + LEAL 4(DX)(SI*1), SI + CMPL SI, 8(SP) + JAE emit_remainder_calcBlockSizeSmall + MOVQ (BX)(DX*1), DI + MOVL SI, 20(SP) + MOVQ $0x9e3779b1, R9 + MOVQ DI, R10 + MOVQ DI, R11 + SHRQ $0x08, R11 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x37, R10 + SHLQ $0x20, R11 + IMULQ R9, R11 + SHRQ $0x37, R11 + MOVL (AX)(R10*4), SI + MOVL (AX)(R11*4), R8 + MOVL DX, (AX)(R10*4) + LEAL 1(DX), R10 + MOVL R10, (AX)(R11*4) + MOVQ DI, R10 + SHRQ $0x10, R10 + SHLQ $0x20, R10 + IMULQ R9, R10 + SHRQ $0x37, R10 + MOVL DX, R9 + SUBL 16(SP), R9 + MOVL 1(BX)(R9*1), R11 + MOVQ DI, R9 + SHRQ $0x08, R9 + CMPL R9, R11 + JNE no_repeat_found_calcBlockSizeSmall + LEAL 1(DX), DI + MOVL 12(SP), SI + MOVL DI, R8 + SUBL 16(SP), R8 + JZ repeat_extend_back_end_calcBlockSizeSmall + +repeat_extend_back_loop_calcBlockSizeSmall: + CMPL DI, SI + JBE repeat_extend_back_end_calcBlockSizeSmall + MOVB -1(BX)(R8*1), R9 + MOVB -1(BX)(DI*1), R10 + CMPB R9, R10 + JNE repeat_extend_back_end_calcBlockSizeSmall + LEAL -1(DI), DI + DECL R8 + JNZ repeat_extend_back_loop_calcBlockSizeSmall + +repeat_extend_back_end_calcBlockSizeSmall: + MOVL DI, SI + SUBL 12(SP), SI + LEAQ 3(CX)(SI*1), SI + CMPQ SI, (SP) + JB repeat_dst_size_check_calcBlockSizeSmall + MOVQ $0x00000000, ret+32(FP) + RET + +repeat_dst_size_check_calcBlockSizeSmall: + MOVL 12(SP), SI + CMPL SI, DI + JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall + MOVL DI, R8 + MOVL DI, 12(SP) + LEAQ (BX)(SI*1), R9 + SUBL SI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_repeat_emit_calcBlockSizeSmall + CMPL SI, $0x00000100 + JB two_bytes_repeat_emit_calcBlockSizeSmall + JB three_bytes_repeat_emit_calcBlockSizeSmall + +three_bytes_repeat_emit_calcBlockSizeSmall: + ADDQ $0x03, CX + JMP memmove_long_repeat_emit_calcBlockSizeSmall + +two_bytes_repeat_emit_calcBlockSizeSmall: + ADDQ $0x02, CX + CMPL SI, $0x40 + JB memmove_repeat_emit_calcBlockSizeSmall + JMP memmove_long_repeat_emit_calcBlockSizeSmall + +one_byte_repeat_emit_calcBlockSizeSmall: + ADDQ $0x01, CX + +memmove_repeat_emit_calcBlockSizeSmall: + LEAQ (CX)(R8*1), CX + JMP emit_literal_done_repeat_emit_calcBlockSizeSmall + +memmove_long_repeat_emit_calcBlockSizeSmall: + LEAQ (CX)(R8*1), CX + +emit_literal_done_repeat_emit_calcBlockSizeSmall: + ADDL $0x05, DX + MOVL DX, SI + SUBL 16(SP), SI + MOVQ src_len+8(FP), R8 + SUBL DX, R8 + LEAQ (BX)(DX*1), R9 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R11, R11 + +matchlen_loopback_16_repeat_extend_calcBlockSizeSmall: + CMPL R8, $0x10 + JB matchlen_match8_repeat_extend_calcBlockSizeSmall + MOVQ (R9)(R11*1), R10 + MOVQ 8(R9)(R11*1), R12 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall + XORQ 8(SI)(R11*1), R12 + JNZ matchlen_bsf_16repeat_extend_calcBlockSizeSmall + LEAL -16(R8), R8 + LEAL 16(R11), R11 + JMP matchlen_loopback_16_repeat_extend_calcBlockSizeSmall + +matchlen_bsf_16repeat_extend_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R12, R12 + +#else + BSFQ R12, R12 + +#endif + SARQ $0x03, R12 + LEAL 8(R11)(R12*1), R11 + JMP repeat_extend_forward_end_calcBlockSizeSmall + +matchlen_match8_repeat_extend_calcBlockSizeSmall: + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_calcBlockSizeSmall + MOVQ (R9)(R11*1), R10 + XORQ (SI)(R11*1), R10 + JNZ matchlen_bsf_8_repeat_extend_calcBlockSizeSmall + LEAL -8(R8), R8 + LEAL 8(R11), R11 + JMP matchlen_match4_repeat_extend_calcBlockSizeSmall + +matchlen_bsf_8_repeat_extend_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_calcBlockSizeSmall + +matchlen_match4_repeat_extend_calcBlockSizeSmall: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_calcBlockSizeSmall + MOVL (R9)(R11*1), R10 + CMPL (SI)(R11*1), R10 + JNE matchlen_match2_repeat_extend_calcBlockSizeSmall + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_calcBlockSizeSmall: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_calcBlockSizeSmall + JB repeat_extend_forward_end_calcBlockSizeSmall + MOVW (R9)(R11*1), R10 + CMPW (SI)(R11*1), R10 + JNE matchlen_match1_repeat_extend_calcBlockSizeSmall + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_calcBlockSizeSmall + +matchlen_match1_repeat_extend_calcBlockSizeSmall: + MOVB (R9)(R11*1), R10 + CMPB (SI)(R11*1), R10 + JNE repeat_extend_forward_end_calcBlockSizeSmall + LEAL 1(R11), R11 + +repeat_extend_forward_end_calcBlockSizeSmall: + ADDL R11, DX + MOVL DX, SI + SUBL DI, SI + MOVL 16(SP), DI + + // emitCopy +two_byte_offset_repeat_as_copy_calcBlockSizeSmall: + CMPL SI, $0x40 + JBE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall + LEAL -60(SI), SI + ADDQ $0x03, CX + JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall + +two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall: + MOVL SI, DI + SHLL $0x02, DI + CMPL SI, $0x0c + JAE emit_copy_three_repeat_as_copy_calcBlockSizeSmall + ADDQ $0x02, CX + JMP repeat_end_emit_calcBlockSizeSmall + +emit_copy_three_repeat_as_copy_calcBlockSizeSmall: + ADDQ $0x03, CX + +repeat_end_emit_calcBlockSizeSmall: + MOVL DX, 12(SP) + JMP search_loop_calcBlockSizeSmall + +no_repeat_found_calcBlockSizeSmall: + CMPL (BX)(SI*1), DI + JEQ candidate_match_calcBlockSizeSmall + SHRQ $0x08, DI + MOVL (AX)(R10*4), SI + LEAL 2(DX), R9 + CMPL (BX)(R8*1), DI + JEQ candidate2_match_calcBlockSizeSmall + MOVL R9, (AX)(R10*4) + SHRQ $0x08, DI + CMPL (BX)(SI*1), DI + JEQ candidate3_match_calcBlockSizeSmall + MOVL 20(SP), DX + JMP search_loop_calcBlockSizeSmall + +candidate3_match_calcBlockSizeSmall: + ADDL $0x02, DX + JMP candidate_match_calcBlockSizeSmall + +candidate2_match_calcBlockSizeSmall: + MOVL R9, (AX)(R10*4) + INCL DX + MOVL R8, SI + +candidate_match_calcBlockSizeSmall: + MOVL 12(SP), DI + TESTL SI, SI + JZ match_extend_back_end_calcBlockSizeSmall + +match_extend_back_loop_calcBlockSizeSmall: + CMPL DX, DI + JBE match_extend_back_end_calcBlockSizeSmall + MOVB -1(BX)(SI*1), R8 + MOVB -1(BX)(DX*1), R9 + CMPB R8, R9 + JNE match_extend_back_end_calcBlockSizeSmall + LEAL -1(DX), DX + DECL SI + JZ match_extend_back_end_calcBlockSizeSmall + JMP match_extend_back_loop_calcBlockSizeSmall + +match_extend_back_end_calcBlockSizeSmall: + MOVL DX, DI + SUBL 12(SP), DI + LEAQ 3(CX)(DI*1), DI + CMPQ DI, (SP) + JB match_dst_size_check_calcBlockSizeSmall + MOVQ $0x00000000, ret+32(FP) + RET + +match_dst_size_check_calcBlockSizeSmall: + MOVL DX, DI + MOVL 12(SP), R8 + CMPL R8, DI + JEQ emit_literal_done_match_emit_calcBlockSizeSmall + MOVL DI, R9 + MOVL DI, 12(SP) + LEAQ (BX)(R8*1), DI + SUBL R8, R9 + LEAL -1(R9), DI + CMPL DI, $0x3c + JB one_byte_match_emit_calcBlockSizeSmall + CMPL DI, $0x00000100 + JB two_bytes_match_emit_calcBlockSizeSmall + JB three_bytes_match_emit_calcBlockSizeSmall + +three_bytes_match_emit_calcBlockSizeSmall: + ADDQ $0x03, CX + JMP memmove_long_match_emit_calcBlockSizeSmall + +two_bytes_match_emit_calcBlockSizeSmall: + ADDQ $0x02, CX + CMPL DI, $0x40 + JB memmove_match_emit_calcBlockSizeSmall + JMP memmove_long_match_emit_calcBlockSizeSmall + +one_byte_match_emit_calcBlockSizeSmall: + ADDQ $0x01, CX + +memmove_match_emit_calcBlockSizeSmall: + LEAQ (CX)(R9*1), CX + JMP emit_literal_done_match_emit_calcBlockSizeSmall + +memmove_long_match_emit_calcBlockSizeSmall: + LEAQ (CX)(R9*1), CX + +emit_literal_done_match_emit_calcBlockSizeSmall: +match_nolit_loop_calcBlockSizeSmall: + MOVL DX, DI + SUBL SI, DI + MOVL DI, 16(SP) + ADDL $0x04, DX + ADDL $0x04, SI + MOVQ src_len+8(FP), DI + SUBL DX, DI + LEAQ (BX)(DX*1), R8 + LEAQ (BX)(SI*1), SI + + // matchLen + XORL R10, R10 + +matchlen_loopback_16_match_nolit_calcBlockSizeSmall: + CMPL DI, $0x10 + JB matchlen_match8_match_nolit_calcBlockSizeSmall + MOVQ (R8)(R10*1), R9 + MOVQ 8(R8)(R10*1), R11 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall + XORQ 8(SI)(R10*1), R11 + JNZ matchlen_bsf_16match_nolit_calcBlockSizeSmall + LEAL -16(DI), DI + LEAL 16(R10), R10 + JMP matchlen_loopback_16_match_nolit_calcBlockSizeSmall + +matchlen_bsf_16match_nolit_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R11, R11 + +#else + BSFQ R11, R11 + +#endif + SARQ $0x03, R11 + LEAL 8(R10)(R11*1), R10 + JMP match_nolit_end_calcBlockSizeSmall + +matchlen_match8_match_nolit_calcBlockSizeSmall: + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_calcBlockSizeSmall + MOVQ (R8)(R10*1), R9 + XORQ (SI)(R10*1), R9 + JNZ matchlen_bsf_8_match_nolit_calcBlockSizeSmall + LEAL -8(DI), DI + LEAL 8(R10), R10 + JMP matchlen_match4_match_nolit_calcBlockSizeSmall + +matchlen_bsf_8_match_nolit_calcBlockSizeSmall: +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP match_nolit_end_calcBlockSizeSmall + +matchlen_match4_match_nolit_calcBlockSizeSmall: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_calcBlockSizeSmall + MOVL (R8)(R10*1), R9 + CMPL (SI)(R10*1), R9 + JNE matchlen_match2_match_nolit_calcBlockSizeSmall + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_match_nolit_calcBlockSizeSmall: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_calcBlockSizeSmall + JB match_nolit_end_calcBlockSizeSmall + MOVW (R8)(R10*1), R9 + CMPW (SI)(R10*1), R9 + JNE matchlen_match1_match_nolit_calcBlockSizeSmall + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ match_nolit_end_calcBlockSizeSmall + +matchlen_match1_match_nolit_calcBlockSizeSmall: + MOVB (R8)(R10*1), R9 + CMPB (SI)(R10*1), R9 + JNE match_nolit_end_calcBlockSizeSmall + LEAL 1(R10), R10 + +match_nolit_end_calcBlockSizeSmall: + ADDL R10, DX + MOVL 16(SP), SI + ADDL $0x04, R10 + MOVL DX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_calcBlockSizeSmall: + CMPL R10, $0x40 + JBE two_byte_offset_short_match_nolit_calcBlockSizeSmall + LEAL -60(R10), R10 + ADDQ $0x03, CX + JMP two_byte_offset_match_nolit_calcBlockSizeSmall + +two_byte_offset_short_match_nolit_calcBlockSizeSmall: + MOVL R10, SI + SHLL $0x02, SI + CMPL R10, $0x0c + JAE emit_copy_three_match_nolit_calcBlockSizeSmall + ADDQ $0x02, CX + JMP match_nolit_emitcopy_end_calcBlockSizeSmall + +emit_copy_three_match_nolit_calcBlockSizeSmall: + ADDQ $0x03, CX + +match_nolit_emitcopy_end_calcBlockSizeSmall: + CMPL DX, 8(SP) + JAE emit_remainder_calcBlockSizeSmall + MOVQ -2(BX)(DX*1), DI + CMPQ CX, (SP) + JB match_nolit_dst_ok_calcBlockSizeSmall + MOVQ $0x00000000, ret+32(FP) + RET + +match_nolit_dst_ok_calcBlockSizeSmall: + MOVQ $0x9e3779b1, R9 + MOVQ DI, R8 + SHRQ $0x10, DI + MOVQ DI, SI + SHLQ $0x20, R8 + IMULQ R9, R8 + SHRQ $0x37, R8 + SHLQ $0x20, SI + IMULQ R9, SI + SHRQ $0x37, SI + LEAL -2(DX), R9 + LEAQ (AX)(SI*4), R10 + MOVL (R10), SI + MOVL R9, (AX)(R8*4) + MOVL DX, (R10) + CMPL (BX)(SI*1), DI + JEQ match_nolit_loop_calcBlockSizeSmall + INCL DX + JMP search_loop_calcBlockSizeSmall + +emit_remainder_calcBlockSizeSmall: + MOVQ src_len+8(FP), AX + SUBL 12(SP), AX + LEAQ 3(CX)(AX*1), AX + CMPQ AX, (SP) + JB emit_remainder_ok_calcBlockSizeSmall + MOVQ $0x00000000, ret+32(FP) + RET + +emit_remainder_ok_calcBlockSizeSmall: + MOVQ src_len+8(FP), AX + MOVL 12(SP), DX + CMPL DX, AX + JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall + MOVL AX, SI + MOVL AX, 12(SP) + LEAQ (BX)(DX*1), AX + SUBL DX, SI + LEAL -1(SI), AX + CMPL AX, $0x3c + JB one_byte_emit_remainder_calcBlockSizeSmall + CMPL AX, $0x00000100 + JB two_bytes_emit_remainder_calcBlockSizeSmall + JB three_bytes_emit_remainder_calcBlockSizeSmall + +three_bytes_emit_remainder_calcBlockSizeSmall: + ADDQ $0x03, CX + JMP memmove_long_emit_remainder_calcBlockSizeSmall + +two_bytes_emit_remainder_calcBlockSizeSmall: + ADDQ $0x02, CX + CMPL AX, $0x40 + JB memmove_emit_remainder_calcBlockSizeSmall + JMP memmove_long_emit_remainder_calcBlockSizeSmall + +one_byte_emit_remainder_calcBlockSizeSmall: + ADDQ $0x01, CX + +memmove_emit_remainder_calcBlockSizeSmall: + LEAQ (CX)(SI*1), AX + MOVQ AX, CX + JMP emit_literal_done_emit_remainder_calcBlockSizeSmall + +memmove_long_emit_remainder_calcBlockSizeSmall: + LEAQ (CX)(SI*1), AX + MOVQ AX, CX + +emit_literal_done_emit_remainder_calcBlockSizeSmall: + MOVQ CX, ret+32(FP) + RET + +// func emitLiteral(dst []byte, lit []byte) int +// Requires: SSE2 +TEXT ·emitLiteral(SB), NOSPLIT, $0-56 + MOVQ lit_len+32(FP), DX + MOVQ dst_base+0(FP), AX + MOVQ lit_base+24(FP), CX + TESTQ DX, DX + JZ emit_literal_end_standalone_skip + MOVL DX, BX + LEAL -1(DX), SI + CMPL SI, $0x3c + JB one_byte_standalone + CMPL SI, $0x00000100 + JB two_bytes_standalone + CMPL SI, $0x00010000 + JB three_bytes_standalone + CMPL SI, $0x01000000 + JB four_bytes_standalone + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP memmove_long_standalone + +four_bytes_standalone: + MOVL SI, DI + SHRL $0x10, DI + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB DI, 3(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP memmove_long_standalone + +three_bytes_standalone: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP memmove_long_standalone + +two_bytes_standalone: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + CMPL SI, $0x40 + JB memmove_standalone + JMP memmove_long_standalone + +one_byte_standalone: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, BX + ADDQ $0x01, AX + +memmove_standalone: + // genMemMoveShort + CMPQ DX, $0x03 + JB emit_lit_memmove_standalone_memmove_move_1or2 + JE emit_lit_memmove_standalone_memmove_move_3 + CMPQ DX, $0x08 + JB emit_lit_memmove_standalone_memmove_move_4through7 + CMPQ DX, $0x10 + JBE emit_lit_memmove_standalone_memmove_move_8through16 + CMPQ DX, $0x20 + JBE emit_lit_memmove_standalone_memmove_move_17through32 + JMP emit_lit_memmove_standalone_memmove_move_33through64 + +emit_lit_memmove_standalone_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(DX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(DX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(DX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(DX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +memmove_long_standalone: + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVQ DX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_standalonelarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_standalonelarge_big_loop_back + +emit_lit_memmove_long_standalonelarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ DX, R8 + JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +emit_literal_end_standalone_skip: + XORQ BX, BX + +emit_literal_end_standalone: + MOVQ BX, ret+48(FP) + RET + +// func emitRepeat(dst []byte, offset int, length int) int +TEXT ·emitRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitRepeat +emit_repeat_again_standalone: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone + +cant_repeat_two_offset_standalone: + CMPL DX, $0x00000104 + JB repeat_three_standalone + CMPL DX, $0x00010100 + JB repeat_four_standalone + CMPL DX, $0x0100ffff + JB repeat_five_standalone + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone + +repeat_five_standalone: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_repeat_end + +repeat_four_standalone: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_repeat_end + +repeat_three_standalone: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_repeat_end + +repeat_two_standalone: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_repeat_end + +repeat_two_offset_standalone: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + +gen_emit_repeat_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopy(dst []byte, offset int, length int) int +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JB two_byte_offset_standalone + CMPL DX, $0x40 + JBE four_bytes_remain_standalone + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JB four_bytes_remain_standalone + + // emitRepeat +emit_repeat_again_standalone_emit_copy: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy + +cant_repeat_two_offset_standalone_emit_copy: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy + +repeat_five_standalone_emit_copy: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +four_bytes_remain_standalone: + TESTL DX, DX + JZ gen_emit_copy_end + XORL SI, SI + LEAL -1(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +two_byte_offset_standalone: + CMPL DX, $0x40 + JBE two_byte_offset_short_standalone + CMPL CX, $0x00000800 + JAE long_offset_short_standalone + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB CL, 1(AX) + MOVL CX, DI + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + SUBL $0x08, DX + + // emitRepeat + LEAL -4(DX), DX + JMP cant_repeat_two_offset_standalone_emit_copy_short_2b + +emit_repeat_again_standalone_emit_copy_short_2b: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy_short_2b + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy_short_2b + +cant_repeat_two_offset_standalone_emit_copy_short_2b: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy_short_2b + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy_short_2b + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy_short_2b + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short_2b + +repeat_five_standalone_emit_copy_short_2b: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short_2b: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short_2b: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short_2b: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +long_offset_short_standalone: + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + + // emitRepeat +emit_repeat_again_standalone_emit_copy_short: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy_short + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy_short + +cant_repeat_two_offset_standalone_emit_copy_short: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy_short + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy_short + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy_short + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short + +repeat_five_standalone_emit_copy_short: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +two_byte_offset_short_standalone: + MOVL DX, SI + SHLL $0x02, SI + CMPL DX, $0x0c + JAE emit_copy_three_standalone + CMPL CX, $0x00000800 + JAE emit_copy_three_standalone + LEAL -15(SI), SI + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +emit_copy_three_standalone: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopyNoRepeat(dst []byte, offset int, length int) int +TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JB two_byte_offset_standalone_snappy + +four_bytes_loop_back_standalone_snappy: + CMPL DX, $0x40 + JBE four_bytes_remain_standalone_snappy + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JB four_bytes_remain_standalone_snappy + JMP four_bytes_loop_back_standalone_snappy + +four_bytes_remain_standalone_snappy: + TESTL DX, DX + JZ gen_emit_copy_end_snappy + XORL SI, SI + LEAL -1(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end_snappy + +two_byte_offset_standalone_snappy: + CMPL DX, $0x40 + JBE two_byte_offset_short_standalone_snappy + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + JMP two_byte_offset_standalone_snappy + +two_byte_offset_short_standalone_snappy: + MOVL DX, SI + SHLL $0x02, SI + CMPL DX, $0x0c + JAE emit_copy_three_standalone_snappy + CMPL CX, $0x00000800 + JAE emit_copy_three_standalone_snappy + LEAL -15(SI), SI + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end_snappy + +emit_copy_three_standalone_snappy: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end_snappy: + MOVQ BX, ret+40(FP) + RET + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + +matchlen_loopback_16_standalone: + CMPL DX, $0x10 + JB matchlen_match8_standalone + MOVQ (AX)(SI*1), BX + MOVQ 8(AX)(SI*1), DI + XORQ (CX)(SI*1), BX + JNZ matchlen_bsf_8_standalone + XORQ 8(CX)(SI*1), DI + JNZ matchlen_bsf_16standalone + LEAL -16(DX), DX + LEAL 16(SI), SI + JMP matchlen_loopback_16_standalone + +matchlen_bsf_16standalone: +#ifdef GOAMD64_v3 + TZCNTQ DI, DI + +#else + BSFQ DI, DI + +#endif + SARQ $0x03, DI + LEAL 8(SI)(DI*1), SI + JMP gen_match_len_end + +matchlen_match8_standalone: + CMPL DX, $0x08 + JB matchlen_match4_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JNZ matchlen_bsf_8_standalone + LEAL -8(DX), DX + LEAL 8(SI), SI + JMP matchlen_match4_standalone + +matchlen_bsf_8_standalone: +#ifdef GOAMD64_v3 + TZCNTQ BX, BX + +#else + BSFQ BX, BX + +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x01 + JE matchlen_match1_standalone + JB gen_match_len_end + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL 2(SI), SI + SUBL $0x02, DX + JZ gen_match_len_end + +matchlen_match1_standalone: + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + LEAL 1(SI), SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET + +// func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -8(AX)(CX*1), CX + XORQ DI, DI + +lz4_s2_loop: + CMPQ DX, BX + JAE lz4_s2_corrupt + CMPQ AX, CX + JAE lz4_s2_dstfull + MOVBQZX (DX), R8 + MOVQ R8, R9 + MOVQ R8, R10 + SHRQ $0x04, R9 + ANDQ $0x0f, R10 + CMPQ R8, $0xf0 + JB lz4_s2_ll_end + +lz4_s2_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4_s2_corrupt + MOVBQZX (DX), R8 + ADDQ R8, R9 + CMPQ R8, $0xff + JEQ lz4_s2_ll_loop + +lz4_s2_ll_end: + LEAQ (DX)(R9*1), R8 + ADDQ $0x04, R10 + CMPQ R8, BX + JAE lz4_s2_corrupt + INCQ DX + INCQ R8 + TESTQ R9, R9 + JZ lz4_s2_lits_done + LEAQ (AX)(R9*1), R11 + CMPQ R11, CX + JAE lz4_s2_dstfull + ADDQ R9, SI + LEAL -1(R9), R11 + CMPL R11, $0x3c + JB one_byte_lz4_s2 + CMPL R11, $0x00000100 + JB two_bytes_lz4_s2 + CMPL R11, $0x00010000 + JB three_bytes_lz4_s2 + CMPL R11, $0x01000000 + JB four_bytes_lz4_s2 + MOVB $0xfc, (AX) + MOVL R11, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4_s2 + +four_bytes_lz4_s2: + MOVL R11, R12 + SHRL $0x10, R12 + MOVB $0xf8, (AX) + MOVW R11, 1(AX) + MOVB R12, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4_s2 + +three_bytes_lz4_s2: + MOVB $0xf4, (AX) + MOVW R11, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4_s2 + +two_bytes_lz4_s2: + MOVB $0xf0, (AX) + MOVB R11, 1(AX) + ADDQ $0x02, AX + CMPL R11, $0x40 + JB memmove_lz4_s2 + JMP memmove_long_lz4_s2 + +one_byte_lz4_s2: + SHLB $0x02, R11 + MOVB R11, (AX) + ADDQ $0x01, AX + +memmove_lz4_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_lz4_s2_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_lz4_s2_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_lz4_s2_memmove_move_17through32 + JMP emit_lit_memmove_lz4_s2_memmove_move_33through64 + +emit_lit_memmove_lz4_s2_memmove_move_8: + MOVQ (DX), R12 + MOVQ R12, (AX) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_8through16: + MOVQ (DX), R12 + MOVQ -8(DX)(R9*1), DX + MOVQ R12, (AX) + MOVQ DX, -8(AX)(R9*1) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_lz4_s2: + MOVQ R11, AX + JMP lz4_s2_lits_emit_done + +memmove_long_lz4_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R12 + ANDL $0x0000001f, R12 + MOVQ $0x00000040, R14 + SUBQ R12, R14 + DECQ R13 + JA emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 + LEAQ -32(DX)(R14*1), R12 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_lz4_s2large_big_loop_back: + MOVOU (R12), X4 + MOVOU 16(R12), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R12 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_lz4_s2large_big_loop_back + +emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32: + MOVOU -32(DX)(R14*1), X4 + MOVOU -16(DX)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R11, AX + +lz4_s2_lits_emit_done: + MOVQ R8, DX + +lz4_s2_lits_done: + CMPQ DX, BX + JNE lz4_s2_match + CMPQ R10, $0x04 + JEQ lz4_s2_done + JMP lz4_s2_corrupt + +lz4_s2_match: + LEAQ 2(DX), R8 + CMPQ R8, BX + JAE lz4_s2_corrupt + MOVWQZX (DX), R9 + MOVQ R8, DX + TESTQ R9, R9 + JZ lz4_s2_corrupt + CMPQ R9, SI + JA lz4_s2_corrupt + CMPQ R10, $0x13 + JNE lz4_s2_ml_done + +lz4_s2_ml_loop: + MOVBQZX (DX), R8 + INCQ DX + ADDQ R8, R10 + CMPQ DX, BX + JAE lz4_s2_corrupt + CMPQ R8, $0xff + JEQ lz4_s2_ml_loop + +lz4_s2_ml_done: + ADDQ R10, SI + CMPQ R9, DI + JNE lz4_s2_docopy + + // emitRepeat +emit_repeat_again_lz4_s2: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2 + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2 + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2 + +cant_repeat_two_offset_lz4_s2: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2 + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2 + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2 + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2 + +repeat_five_lz4_s2: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +lz4_s2_docopy: + MOVQ R9, DI + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_lz4_s2 + CMPL R9, $0x00000800 + JAE long_offset_short_lz4_s2 + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB R9, 1(AX) + MOVL R9, R11 + SHRL $0x08, R11 + SHLL $0x05, R11 + ORL R11, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + +emit_repeat_again_lz4_s2_emit_copy_short_2b: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short_2b + +cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short_2b + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short_2b + +repeat_five_lz4_s2_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +long_offset_short_lz4_s2: + MOVB $0xee, (AX) + MOVW R9, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_lz4_s2_emit_copy_short: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short + +cant_repeat_two_offset_lz4_s2_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short + +repeat_five_lz4_s2_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +two_byte_offset_short_lz4_s2: + MOVL R10, R8 + SHLL $0x02, R8 + CMPL R10, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R9, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(R8), R8 + MOVB R9, 1(AX) + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +emit_copy_three_lz4_s2: + LEAL -2(R8), R8 + MOVB R8, (AX) + MOVW R9, 1(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +lz4_s2_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4_s2_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4_s2_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -8(AX)(CX*1), CX + XORQ DI, DI + +lz4s_s2_loop: + CMPQ DX, BX + JAE lz4s_s2_corrupt + CMPQ AX, CX + JAE lz4s_s2_dstfull + MOVBQZX (DX), R8 + MOVQ R8, R9 + MOVQ R8, R10 + SHRQ $0x04, R9 + ANDQ $0x0f, R10 + CMPQ R8, $0xf0 + JB lz4s_s2_ll_end + +lz4s_s2_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4s_s2_corrupt + MOVBQZX (DX), R8 + ADDQ R8, R9 + CMPQ R8, $0xff + JEQ lz4s_s2_ll_loop + +lz4s_s2_ll_end: + LEAQ (DX)(R9*1), R8 + ADDQ $0x03, R10 + CMPQ R8, BX + JAE lz4s_s2_corrupt + INCQ DX + INCQ R8 + TESTQ R9, R9 + JZ lz4s_s2_lits_done + LEAQ (AX)(R9*1), R11 + CMPQ R11, CX + JAE lz4s_s2_dstfull + ADDQ R9, SI + LEAL -1(R9), R11 + CMPL R11, $0x3c + JB one_byte_lz4s_s2 + CMPL R11, $0x00000100 + JB two_bytes_lz4s_s2 + CMPL R11, $0x00010000 + JB three_bytes_lz4s_s2 + CMPL R11, $0x01000000 + JB four_bytes_lz4s_s2 + MOVB $0xfc, (AX) + MOVL R11, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4s_s2 + +four_bytes_lz4s_s2: + MOVL R11, R12 + SHRL $0x10, R12 + MOVB $0xf8, (AX) + MOVW R11, 1(AX) + MOVB R12, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4s_s2 + +three_bytes_lz4s_s2: + MOVB $0xf4, (AX) + MOVW R11, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4s_s2 + +two_bytes_lz4s_s2: + MOVB $0xf0, (AX) + MOVB R11, 1(AX) + ADDQ $0x02, AX + CMPL R11, $0x40 + JB memmove_lz4s_s2 + JMP memmove_long_lz4s_s2 + +one_byte_lz4s_s2: + SHLB $0x02, R11 + MOVB R11, (AX) + ADDQ $0x01, AX + +memmove_lz4s_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_lz4s_s2_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_lz4s_s2_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_lz4s_s2_memmove_move_17through32 + JMP emit_lit_memmove_lz4s_s2_memmove_move_33through64 + +emit_lit_memmove_lz4s_s2_memmove_move_8: + MOVQ (DX), R12 + MOVQ R12, (AX) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_8through16: + MOVQ (DX), R12 + MOVQ -8(DX)(R9*1), DX + MOVQ R12, (AX) + MOVQ DX, -8(AX)(R9*1) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_lz4s_s2: + MOVQ R11, AX + JMP lz4s_s2_lits_emit_done + +memmove_long_lz4s_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R12 + ANDL $0x0000001f, R12 + MOVQ $0x00000040, R14 + SUBQ R12, R14 + DECQ R13 + JA emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 + LEAQ -32(DX)(R14*1), R12 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_lz4s_s2large_big_loop_back: + MOVOU (R12), X4 + MOVOU 16(R12), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R12 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_lz4s_s2large_big_loop_back + +emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32: + MOVOU -32(DX)(R14*1), X4 + MOVOU -16(DX)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R11, AX + +lz4s_s2_lits_emit_done: + MOVQ R8, DX + +lz4s_s2_lits_done: + CMPQ DX, BX + JNE lz4s_s2_match + CMPQ R10, $0x03 + JEQ lz4s_s2_done + JMP lz4s_s2_corrupt + +lz4s_s2_match: + CMPQ R10, $0x03 + JEQ lz4s_s2_loop + LEAQ 2(DX), R8 + CMPQ R8, BX + JAE lz4s_s2_corrupt + MOVWQZX (DX), R9 + MOVQ R8, DX + TESTQ R9, R9 + JZ lz4s_s2_corrupt + CMPQ R9, SI + JA lz4s_s2_corrupt + CMPQ R10, $0x12 + JNE lz4s_s2_ml_done + +lz4s_s2_ml_loop: + MOVBQZX (DX), R8 + INCQ DX + ADDQ R8, R10 + CMPQ DX, BX + JAE lz4s_s2_corrupt + CMPQ R8, $0xff + JEQ lz4s_s2_ml_loop + +lz4s_s2_ml_done: + ADDQ R10, SI + CMPQ R9, DI + JNE lz4s_s2_docopy + + // emitRepeat +emit_repeat_again_lz4_s2: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2 + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2 + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2 + +cant_repeat_two_offset_lz4_s2: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2 + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2 + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2 + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2 + +repeat_five_lz4_s2: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +lz4s_s2_docopy: + MOVQ R9, DI + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_lz4_s2 + CMPL R9, $0x00000800 + JAE long_offset_short_lz4_s2 + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB R9, 1(AX) + MOVL R9, R11 + SHRL $0x08, R11 + SHLL $0x05, R11 + ORL R11, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + +emit_repeat_again_lz4_s2_emit_copy_short_2b: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short_2b + +cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short_2b + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short_2b + +repeat_five_lz4_s2_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +long_offset_short_lz4_s2: + MOVB $0xee, (AX) + MOVW R9, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_lz4_s2_emit_copy_short: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short + +cant_repeat_two_offset_lz4_s2_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short + +repeat_five_lz4_s2_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +two_byte_offset_short_lz4_s2: + MOVL R10, R8 + SHLL $0x02, R8 + CMPL R10, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R9, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(R8), R8 + MOVB R9, 1(AX) + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +emit_copy_three_lz4_s2: + LEAL -2(R8), R8 + MOVB R8, (AX) + MOVW R9, 1(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +lz4s_s2_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4s_s2_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4s_s2_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -8(AX)(CX*1), CX + +lz4_snappy_loop: + CMPQ DX, BX + JAE lz4_snappy_corrupt + CMPQ AX, CX + JAE lz4_snappy_dstfull + MOVBQZX (DX), DI + MOVQ DI, R8 + MOVQ DI, R9 + SHRQ $0x04, R8 + ANDQ $0x0f, R9 + CMPQ DI, $0xf0 + JB lz4_snappy_ll_end + +lz4_snappy_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4_snappy_corrupt + MOVBQZX (DX), DI + ADDQ DI, R8 + CMPQ DI, $0xff + JEQ lz4_snappy_ll_loop + +lz4_snappy_ll_end: + LEAQ (DX)(R8*1), DI + ADDQ $0x04, R9 + CMPQ DI, BX + JAE lz4_snappy_corrupt + INCQ DX + INCQ DI + TESTQ R8, R8 + JZ lz4_snappy_lits_done + LEAQ (AX)(R8*1), R10 + CMPQ R10, CX + JAE lz4_snappy_dstfull + ADDQ R8, SI + LEAL -1(R8), R10 + CMPL R10, $0x3c + JB one_byte_lz4_snappy + CMPL R10, $0x00000100 + JB two_bytes_lz4_snappy + CMPL R10, $0x00010000 + JB three_bytes_lz4_snappy + CMPL R10, $0x01000000 + JB four_bytes_lz4_snappy + MOVB $0xfc, (AX) + MOVL R10, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4_snappy + +four_bytes_lz4_snappy: + MOVL R10, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW R10, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4_snappy + +three_bytes_lz4_snappy: + MOVB $0xf4, (AX) + MOVW R10, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4_snappy + +two_bytes_lz4_snappy: + MOVB $0xf0, (AX) + MOVB R10, 1(AX) + ADDQ $0x02, AX + CMPL R10, $0x40 + JB memmove_lz4_snappy + JMP memmove_long_lz4_snappy + +one_byte_lz4_snappy: + SHLB $0x02, R10 + MOVB R10, (AX) + ADDQ $0x01, AX + +memmove_lz4_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_lz4_snappy_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_lz4_snappy_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_lz4_snappy_memmove_move_17through32 + JMP emit_lit_memmove_lz4_snappy_memmove_move_33through64 + +emit_lit_memmove_lz4_snappy_memmove_move_8: + MOVQ (DX), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_8through16: + MOVQ (DX), R11 + MOVQ -8(DX)(R8*1), DX + MOVQ R11, (AX) + MOVQ DX, -8(AX)(R8*1) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_lz4_snappy: + MOVQ R10, AX + JMP lz4_snappy_lits_emit_done + +memmove_long_lz4_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 + LEAQ -32(DX)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_lz4_snappylarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_lz4_snappylarge_big_loop_back + +emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32: + MOVOU -32(DX)(R13*1), X4 + MOVOU -16(DX)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ R10, AX + +lz4_snappy_lits_emit_done: + MOVQ DI, DX + +lz4_snappy_lits_done: + CMPQ DX, BX + JNE lz4_snappy_match + CMPQ R9, $0x04 + JEQ lz4_snappy_done + JMP lz4_snappy_corrupt + +lz4_snappy_match: + LEAQ 2(DX), DI + CMPQ DI, BX + JAE lz4_snappy_corrupt + MOVWQZX (DX), R8 + MOVQ DI, DX + TESTQ R8, R8 + JZ lz4_snappy_corrupt + CMPQ R8, SI + JA lz4_snappy_corrupt + CMPQ R9, $0x13 + JNE lz4_snappy_ml_done + +lz4_snappy_ml_loop: + MOVBQZX (DX), DI + INCQ DX + ADDQ DI, R9 + CMPQ DX, BX + JAE lz4_snappy_corrupt + CMPQ DI, $0xff + JEQ lz4_snappy_ml_loop + +lz4_snappy_ml_done: + ADDQ R9, SI + + // emitCopy +two_byte_offset_lz4_s2: + CMPL R9, $0x40 + JBE two_byte_offset_short_lz4_s2 + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + CMPQ AX, CX + JAE lz4_snappy_loop + JMP two_byte_offset_lz4_s2 + +two_byte_offset_short_lz4_s2: + MOVL R9, DI + SHLL $0x02, DI + CMPL R9, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R8, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(DI), DI + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP lz4_snappy_loop + +emit_copy_three_lz4_s2: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP lz4_snappy_loop + +lz4_snappy_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4_snappy_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4_snappy_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -8(AX)(CX*1), CX + +lz4s_snappy_loop: + CMPQ DX, BX + JAE lz4s_snappy_corrupt + CMPQ AX, CX + JAE lz4s_snappy_dstfull + MOVBQZX (DX), DI + MOVQ DI, R8 + MOVQ DI, R9 + SHRQ $0x04, R8 + ANDQ $0x0f, R9 + CMPQ DI, $0xf0 + JB lz4s_snappy_ll_end + +lz4s_snappy_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4s_snappy_corrupt + MOVBQZX (DX), DI + ADDQ DI, R8 + CMPQ DI, $0xff + JEQ lz4s_snappy_ll_loop + +lz4s_snappy_ll_end: + LEAQ (DX)(R8*1), DI + ADDQ $0x03, R9 + CMPQ DI, BX + JAE lz4s_snappy_corrupt + INCQ DX + INCQ DI + TESTQ R8, R8 + JZ lz4s_snappy_lits_done + LEAQ (AX)(R8*1), R10 + CMPQ R10, CX + JAE lz4s_snappy_dstfull + ADDQ R8, SI + LEAL -1(R8), R10 + CMPL R10, $0x3c + JB one_byte_lz4s_snappy + CMPL R10, $0x00000100 + JB two_bytes_lz4s_snappy + CMPL R10, $0x00010000 + JB three_bytes_lz4s_snappy + CMPL R10, $0x01000000 + JB four_bytes_lz4s_snappy + MOVB $0xfc, (AX) + MOVL R10, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4s_snappy + +four_bytes_lz4s_snappy: + MOVL R10, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW R10, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4s_snappy + +three_bytes_lz4s_snappy: + MOVB $0xf4, (AX) + MOVW R10, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4s_snappy + +two_bytes_lz4s_snappy: + MOVB $0xf0, (AX) + MOVB R10, 1(AX) + ADDQ $0x02, AX + CMPL R10, $0x40 + JB memmove_lz4s_snappy + JMP memmove_long_lz4s_snappy + +one_byte_lz4s_snappy: + SHLB $0x02, R10 + MOVB R10, (AX) + ADDQ $0x01, AX + +memmove_lz4s_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_17through32 + JMP emit_lit_memmove_lz4s_snappy_memmove_move_33through64 + +emit_lit_memmove_lz4s_snappy_memmove_move_8: + MOVQ (DX), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_8through16: + MOVQ (DX), R11 + MOVQ -8(DX)(R8*1), DX + MOVQ R11, (AX) + MOVQ DX, -8(AX)(R8*1) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_lz4s_snappy: + MOVQ R10, AX + JMP lz4s_snappy_lits_emit_done + +memmove_long_lz4s_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 + LEAQ -32(DX)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_lz4s_snappylarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_lz4s_snappylarge_big_loop_back + +emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32: + MOVOU -32(DX)(R13*1), X4 + MOVOU -16(DX)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ R10, AX + +lz4s_snappy_lits_emit_done: + MOVQ DI, DX + +lz4s_snappy_lits_done: + CMPQ DX, BX + JNE lz4s_snappy_match + CMPQ R9, $0x03 + JEQ lz4s_snappy_done + JMP lz4s_snappy_corrupt + +lz4s_snappy_match: + CMPQ R9, $0x03 + JEQ lz4s_snappy_loop + LEAQ 2(DX), DI + CMPQ DI, BX + JAE lz4s_snappy_corrupt + MOVWQZX (DX), R8 + MOVQ DI, DX + TESTQ R8, R8 + JZ lz4s_snappy_corrupt + CMPQ R8, SI + JA lz4s_snappy_corrupt + CMPQ R9, $0x12 + JNE lz4s_snappy_ml_done + +lz4s_snappy_ml_loop: + MOVBQZX (DX), DI + INCQ DX + ADDQ DI, R9 + CMPQ DX, BX + JAE lz4s_snappy_corrupt + CMPQ DI, $0xff + JEQ lz4s_snappy_ml_loop + +lz4s_snappy_ml_done: + ADDQ R9, SI + + // emitCopy +two_byte_offset_lz4_s2: + CMPL R9, $0x40 + JBE two_byte_offset_short_lz4_s2 + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + CMPQ AX, CX + JAE lz4s_snappy_loop + JMP two_byte_offset_lz4_s2 + +two_byte_offset_short_lz4_s2: + MOVL R9, DI + SHLL $0x02, DI + CMPL R9, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R8, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(DI), DI + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP lz4s_snappy_loop + +emit_copy_three_lz4_s2: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP lz4s_snappy_loop + +lz4s_snappy_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4s_snappy_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4s_snappy_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go new file mode 100644 index 0000000000..4229957b96 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/index.go @@ -0,0 +1,602 @@ +// Copyright (c) 2022+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "sort" +) + +const ( + S2IndexHeader = "s2idx\x00" + S2IndexTrailer = "\x00xdi2s" + maxIndexEntries = 1 << 16 + // If distance is less than this, we do not add the entry. + minIndexDist = 1 << 20 +) + +// Index represents an S2/Snappy index. +type Index struct { + TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown. + info []struct { + compressedOffset int64 + uncompressedOffset int64 + } + estBlockUncomp int64 +} + +func (i *Index) reset(maxBlock int) { + i.estBlockUncomp = int64(maxBlock) + i.TotalCompressed = -1 + i.TotalUncompressed = -1 + if len(i.info) > 0 { + i.info = i.info[:0] + } +} + +// allocInfos will allocate an empty slice of infos. +func (i *Index) allocInfos(n int) { + if n > maxIndexEntries { + panic("n > maxIndexEntries") + } + i.info = make([]struct { + compressedOffset int64 + uncompressedOffset int64 + }, 0, n) +} + +// add an uncompressed and compressed pair. +// Entries must be sent in order. +func (i *Index) add(compressedOffset, uncompressedOffset int64) error { + if i == nil { + return nil + } + lastIdx := len(i.info) - 1 + if lastIdx >= 0 { + latest := i.info[lastIdx] + if latest.uncompressedOffset == uncompressedOffset { + // Uncompressed didn't change, don't add entry, + // but update start index. + latest.compressedOffset = compressedOffset + i.info[lastIdx] = latest + return nil + } + if latest.uncompressedOffset > uncompressedOffset { + return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + } + if latest.compressedOffset > compressedOffset { + return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + } + if latest.uncompressedOffset+minIndexDist > uncompressedOffset { + // Only add entry if distance is large enough. + return nil + } + } + i.info = append(i.info, struct { + compressedOffset int64 + uncompressedOffset int64 + }{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset}) + return nil +} + +// Find the offset at or before the wanted (uncompressed) offset. +// If offset is 0 or positive it is the offset from the beginning of the file. +// If the uncompressed size is known, the offset must be within the file. +// If an offset outside the file is requested io.ErrUnexpectedEOF is returned. +// If the offset is negative, it is interpreted as the distance from the end of the file, +// where -1 represents the last byte. +// If offset from the end of the file is requested, but size is unknown, +// ErrUnsupported will be returned. +func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) { + if i.TotalUncompressed < 0 { + return 0, 0, ErrCorrupt + } + if offset < 0 { + offset = i.TotalUncompressed + offset + if offset < 0 { + return 0, 0, io.ErrUnexpectedEOF + } + } + if offset > i.TotalUncompressed { + return 0, 0, io.ErrUnexpectedEOF + } + if len(i.info) > 200 { + n := sort.Search(len(i.info), func(n int) bool { + return i.info[n].uncompressedOffset > offset + }) + if n == 0 { + n = 1 + } + return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil + } + for _, info := range i.info { + if info.uncompressedOffset > offset { + break + } + compressedOff = info.compressedOffset + uncompressedOff = info.uncompressedOffset + } + return compressedOff, uncompressedOff, nil +} + +// reduce to stay below maxIndexEntries +func (i *Index) reduce() { + if len(i.info) < maxIndexEntries && i.estBlockUncomp >= minIndexDist { + return + } + + // Algorithm, keep 1, remove removeN entries... + removeN := (len(i.info) + 1) / maxIndexEntries + src := i.info + j := 0 + + // Each block should be at least 1MB, but don't reduce below 1000 entries. + for i.estBlockUncomp*(int64(removeN)+1) < minIndexDist && len(i.info)/(removeN+1) > 1000 { + removeN++ + } + for idx := 0; idx < len(src); idx++ { + i.info[j] = src[idx] + j++ + idx += removeN + } + i.info = i.info[:j] + // Update maxblock estimate. + i.estBlockUncomp += i.estBlockUncomp * int64(removeN) +} + +func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte { + i.reduce() + var tmp [binary.MaxVarintLen64]byte + + initSize := len(b) + // We make the start a skippable header+size. + b = append(b, ChunkTypeIndex, 0, 0, 0) + b = append(b, []byte(S2IndexHeader)...) + // Total Uncompressed size + n := binary.PutVarint(tmp[:], uncompTotal) + b = append(b, tmp[:n]...) + // Total Compressed size + n = binary.PutVarint(tmp[:], compTotal) + b = append(b, tmp[:n]...) + // Put EstBlockUncomp size + n = binary.PutVarint(tmp[:], i.estBlockUncomp) + b = append(b, tmp[:n]...) + // Put length + n = binary.PutVarint(tmp[:], int64(len(i.info))) + b = append(b, tmp[:n]...) + + // Check if we should add uncompressed offsets + var hasUncompressed byte + for idx, info := range i.info { + if idx == 0 { + if info.uncompressedOffset != 0 { + hasUncompressed = 1 + break + } + continue + } + if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp { + hasUncompressed = 1 + break + } + } + b = append(b, hasUncompressed) + + // Add each entry + if hasUncompressed == 1 { + for idx, info := range i.info { + uOff := info.uncompressedOffset + if idx > 0 { + prev := i.info[idx-1] + uOff -= prev.uncompressedOffset + (i.estBlockUncomp) + } + n = binary.PutVarint(tmp[:], uOff) + b = append(b, tmp[:n]...) + } + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + for idx, info := range i.info { + cOff := info.compressedOffset + if idx > 0 { + prev := i.info[idx-1] + cOff -= prev.compressedOffset + cPredict + // Update compressed size prediction, with half the error. + cPredict += cOff / 2 + } + n = binary.PutVarint(tmp[:], cOff) + b = append(b, tmp[:n]...) + } + + // Add Total Size. + // Stored as fixed size for easier reading. + binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer))) + b = append(b, tmp[:4]...) + // Trailer + b = append(b, []byte(S2IndexTrailer)...) + + // Update size + chunkLen := len(b) - initSize - skippableFrameHeader + b[initSize+1] = uint8(chunkLen >> 0) + b[initSize+2] = uint8(chunkLen >> 8) + b[initSize+3] = uint8(chunkLen >> 16) + //fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal) + return b +} + +// Load a binary index. +// A zero value Index can be used or a previous one can be reused. +func (i *Index) Load(b []byte) ([]byte, error) { + if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + if b[0] != ChunkTypeIndex { + return b, ErrCorrupt + } + chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 + b = b[4:] + + // Validate we have enough... + if len(b) < chunkLen { + return b, io.ErrUnexpectedEOF + } + if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { + return b, ErrUnsupported + } + b = b[len(S2IndexHeader):] + + // Total Uncompressed + if v, n := binary.Varint(b); n <= 0 || v < 0 { + return b, ErrCorrupt + } else { + i.TotalUncompressed = v + b = b[n:] + } + + // Total Compressed + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + i.TotalCompressed = v + b = b[n:] + } + + // Read EstBlockUncomp + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 { + return b, ErrCorrupt + } + i.estBlockUncomp = v + b = b[n:] + } + + var entries int + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 || v > maxIndexEntries { + return b, ErrCorrupt + } + entries = int(v) + b = b[n:] + } + if cap(i.info) < entries { + i.allocInfos(entries) + } + i.info = i.info[:entries] + + if len(b) < 1 { + return b, io.ErrUnexpectedEOF + } + hasUncompressed := b[0] + b = b[1:] + if hasUncompressed&1 != hasUncompressed { + return b, ErrCorrupt + } + + // Add each uncompressed entry + for idx := range i.info { + var uOff int64 + if hasUncompressed != 0 { + // Load delta + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + uOff = v + b = b[n:] + } + } + + if idx > 0 { + prev := i.info[idx-1].uncompressedOffset + uOff += prev + (i.estBlockUncomp) + if uOff <= prev { + return b, ErrCorrupt + } + } + if uOff < 0 { + return b, ErrCorrupt + } + i.info[idx].uncompressedOffset = uOff + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + // Add each compressed entry + for idx := range i.info { + var cOff int64 + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + cOff = v + b = b[n:] + } + + if idx > 0 { + // Update compressed size prediction, with half the error. + cPredictNew := cPredict + cOff/2 + + prev := i.info[idx-1].compressedOffset + cOff += prev + cPredict + if cOff <= prev { + return b, ErrCorrupt + } + cPredict = cPredictNew + } + if cOff < 0 { + return b, ErrCorrupt + } + i.info[idx].compressedOffset = cOff + } + if len(b) < 4+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + // Skip size... + b = b[4:] + + // Check trailer... + if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return b, ErrCorrupt + } + return b[len(S2IndexTrailer):], nil +} + +// LoadStream will load an index from the end of the supplied stream. +// ErrUnsupported will be returned if the signature cannot be found. +// ErrCorrupt will be returned if unexpected values are found. +// io.ErrUnexpectedEOF is returned if there are too few bytes. +// IO errors are returned as-is. +func (i *Index) LoadStream(rs io.ReadSeeker) error { + // Go to end. + _, err := rs.Seek(-10, io.SeekEnd) + if err != nil { + return err + } + var tmp [10]byte + _, err = io.ReadFull(rs, tmp[:]) + if err != nil { + return err + } + // Check trailer... + if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return ErrUnsupported + } + sz := binary.LittleEndian.Uint32(tmp[:4]) + if sz > maxChunkSize+skippableFrameHeader { + return ErrCorrupt + } + _, err = rs.Seek(-int64(sz), io.SeekEnd) + if err != nil { + return err + } + + // Read index. + buf := make([]byte, sz) + _, err = io.ReadFull(rs, buf) + if err != nil { + return err + } + _, err = i.Load(buf) + return err +} + +// IndexStream will return an index for a stream. +// The stream structure will be checked, but +// data within blocks is not verified. +// The returned index can either be appended to the end of the stream +// or stored separately. +func IndexStream(r io.Reader) ([]byte, error) { + var i Index + var buf [maxChunkSize]byte + var readHeader bool + for { + _, err := io.ReadFull(r, buf[:4]) + if err != nil { + if err == io.EOF { + return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil + } + return nil, err + } + // Start of this chunk. + startChunk := i.TotalCompressed + i.TotalCompressed += 4 + + chunkType := buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + return nil, ErrCorrupt + } + readHeader = true + } + chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16 + if chunkLen < checksumSize { + return nil, ErrCorrupt + } + + i.TotalCompressed += int64(chunkLen) + _, err = io.ReadFull(r, buf[:chunkLen]) + if err != nil { + return nil, io.ErrUnexpectedEOF + } + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + // Skip checksum. + dLen, err := DecodedLen(buf[checksumSize:]) + if err != nil { + return nil, err + } + if dLen > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(dLen) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(dLen) + continue + case chunkTypeUncompressedData: + n2 := chunkLen - checksumSize + if n2 > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(n2) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(n2) + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + return nil, ErrCorrupt + } + + if string(buf[:len(magicBody)]) != magicBody { + if string(buf[:len(magicBody)]) != magicBodySnappy { + return nil, ErrCorrupt + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + return nil, ErrUnsupported + } + if chunkLen > maxChunkSize { + return nil, ErrUnsupported + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + } +} + +// JSON returns the index as JSON text. +func (i *Index) JSON() []byte { + type offset struct { + CompressedOffset int64 `json:"compressed"` + UncompressedOffset int64 `json:"uncompressed"` + } + x := struct { + TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown. + Offsets []offset `json:"offsets"` + EstBlockUncomp int64 `json:"est_block_uncompressed"` + }{ + TotalUncompressed: i.TotalUncompressed, + TotalCompressed: i.TotalCompressed, + EstBlockUncomp: i.estBlockUncomp, + } + for _, v := range i.info { + x.Offsets = append(x.Offsets, offset{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset}) + } + b, _ := json.MarshalIndent(x, "", " ") + return b +} + +// RemoveIndexHeaders will trim all headers and trailers from a given index. +// This is expected to save 20 bytes. +// These can be restored using RestoreIndexHeaders. +// This removes a layer of security, but is the most compact representation. +// Returns nil if headers contains errors. +// The returned slice references the provided slice. +func RemoveIndexHeaders(b []byte) []byte { + const save = 4 + len(S2IndexHeader) + len(S2IndexTrailer) + 4 + if len(b) <= save { + return nil + } + if b[0] != ChunkTypeIndex { + return nil + } + chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 + b = b[4:] + + // Validate we have enough... + if len(b) < chunkLen { + return nil + } + b = b[:chunkLen] + + if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { + return nil + } + b = b[len(S2IndexHeader):] + if !bytes.HasSuffix(b, []byte(S2IndexTrailer)) { + return nil + } + b = bytes.TrimSuffix(b, []byte(S2IndexTrailer)) + + if len(b) < 4 { + return nil + } + return b[:len(b)-4] +} + +// RestoreIndexHeaders will index restore headers removed by RemoveIndexHeaders. +// No error checking is performed on the input. +// If a 0 length slice is sent, it is returned without modification. +func RestoreIndexHeaders(in []byte) []byte { + if len(in) == 0 { + return in + } + b := make([]byte, 0, 4+len(S2IndexHeader)+len(in)+len(S2IndexTrailer)+4) + b = append(b, ChunkTypeIndex, 0, 0, 0) + b = append(b, []byte(S2IndexHeader)...) + b = append(b, in...) + + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(S2IndexTrailer))) + b = append(b, tmp[:4]...) + // Trailer + b = append(b, []byte(S2IndexTrailer)...) + + chunkLen := len(b) - skippableFrameHeader + b[1] = uint8(chunkLen >> 0) + b[2] = uint8(chunkLen >> 8) + b[3] = uint8(chunkLen >> 16) + return b +} diff --git a/vendor/github.com/klauspost/compress/s2/lz4convert.go b/vendor/github.com/klauspost/compress/s2/lz4convert.go new file mode 100644 index 0000000000..46ed908e3c --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/lz4convert.go @@ -0,0 +1,585 @@ +// Copyright (c) 2022 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "errors" + "fmt" +) + +// LZ4Converter provides conversion from LZ4 blocks as defined here: +// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md +type LZ4Converter struct { +} + +// ErrDstTooSmall is returned when provided destination is too small. +var ErrDstTooSmall = errors.New("s2: destination too small") + +// ConvertBlock will convert an LZ4 block and append it as an S2 +// block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const inline = true + const lz4MinMatch = 4 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + if !debug && hasAmd64Asm { + res, sz := cvtLZ4BlockAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var lastOffset uint16 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return dst[:d], 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return dst[:d], 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if s == len(src) && ml == lz4MinMatch { + break + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if offset == lastOffset { + if debug { + fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitRepeat16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + d += 2 + break + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + d += 2 + break + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + d += 3 + break + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + d += 4 + break + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + d += 5 + emitRepeat16(dst[5:], offset, left) + break + } + d += 5 + break + } + } + } else { + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitCopy16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + d += off + emitRepeat16(dst[off:], offset, length) + break + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + } + lastOffset = offset + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// ConvertBlockSnappy will convert an LZ4 block and append it +// as a Snappy block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const lz4MinMatch = 4 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + // Use assembly when possible + if !debug && hasAmd64Asm { + res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return nil, 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if s == len(src) && ml == lz4MinMatch { + break + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + length := ml + // d += emitCopyNoRepeat(dst[d:], int(offset), ml) + for length > 0 { + if d >= dLimit { + return nil, 0, ErrDstTooSmall + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = 63<<2 | tagCopy2 + length -= 64 + d += 3 + continue + } + if length >= 12 || offset >= 2048 || length < 4 { + // Emit the remaining copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[d+1] = uint8(offset) + dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<24 +func emitRepeat16(dst []byte, offset uint16, length int) int { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + return 2 + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + return 2 + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + return 3 + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + return 5 + emitRepeat16(dst[5:], offset, left) + } + return 5 +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint16 +// 4 <= length && length <= math.MaxUint32 +func emitCopy16(dst []byte, offset uint16, length int) int { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + return off + emitRepeat16(dst[off:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteralGo(dst, lit []byte) int { + if len(lit) == 0 { + return 0 + } + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[1] = uint8(n) + dst[0] = 60<<2 | tagLiteral + i = 2 + case n < 1<<16: + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 61<<2 | tagLiteral + i = 3 + case n < 1<<24: + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 62<<2 | tagLiteral + i = 4 + default: + dst[4] = uint8(n >> 24) + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 63<<2 | tagLiteral + i = 5 + } + return i + copy(dst[i:], lit) +} diff --git a/vendor/github.com/klauspost/compress/s2/lz4sconvert.go b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go new file mode 100644 index 0000000000..000f39719c --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go @@ -0,0 +1,467 @@ +// Copyright (c) 2022 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "fmt" +) + +// LZ4sConverter provides conversion from LZ4s. +// (Intel modified LZ4 Blocks) +// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf +// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format. +// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData. +// The LZ4s block returned by the Intel® QAT hardware can be used by an external +// software post-processing to generate other compressed data formats. +// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses +// the same high-level formatting as LZ4 block format with the following encoding changes: +// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte. +// ONLY "Min match of 4 bytes" is supported. +type LZ4sConverter struct { +} + +// ConvertBlock will convert an LZ4s block and append it as an S2 +// block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const inline = true + const lz4MinMatch = 3 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + if !debug && hasAmd64Asm { + res, sz := cvtLZ4sBlockAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var lastOffset uint16 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return dst[:d], 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return dst[:d], 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if ml == lz4MinMatch { + if s == len(src) { + break + } + // 0 bytes. + continue + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if offset == lastOffset { + if debug { + fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitRepeat16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + d += 2 + break + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + d += 2 + break + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + d += 3 + break + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + d += 4 + break + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + d += 5 + emitRepeat16(dst[5:], offset, left) + break + } + d += 5 + break + } + } + } else { + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitCopy16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + d += off + emitRepeat16(dst[off:], offset, length) + break + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + } + lastOffset = offset + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// ConvertBlockSnappy will convert an LZ4s block and append it +// as a Snappy block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const lz4MinMatch = 3 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + // Use assembly when possible + if !debug && hasAmd64Asm { + res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return nil, 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if ml == lz4MinMatch { + if s == len(src) { + break + } + // 0 bytes. + continue + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + length := ml + // d += emitCopyNoRepeat(dst[d:], int(offset), ml) + for length > 0 { + if d >= dLimit { + return nil, 0, ErrDstTooSmall + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = 63<<2 | tagCopy2 + length -= 64 + d += 3 + continue + } + if length >= 12 || offset >= 2048 || length < 4 { + // Emit the remaining copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[d+1] = uint8(offset) + dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go new file mode 100644 index 0000000000..8372d752f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/reader.go @@ -0,0 +1,1075 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "runtime" + "sync" +) + +// ErrCantSeek is returned if the stream cannot be seeked. +type ErrCantSeek struct { + Reason string +} + +// Error returns the error as string. +func (e ErrCantSeek) Error() string { + return fmt.Sprintf("s2: Can't seek because %s", e.Reason) +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes. +func NewReader(r io.Reader, opts ...ReaderOption) *Reader { + nr := Reader{ + r: r, + maxBlock: maxBlockSize, + } + for _, opt := range opts { + if err := opt(&nr); err != nil { + nr.err = err + return &nr + } + } + nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize + if nr.lazyBuf > 0 { + nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize) + } else { + nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize) + } + nr.readHeader = nr.ignoreStreamID + nr.paramsOK = true + return &nr +} + +// ReaderOption is an option for creating a decoder. +type ReaderOption func(*Reader) error + +// ReaderMaxBlockSize allows to control allocations if the stream +// has been compressed with a smaller WriterBlockSize, or with the default 1MB. +// Blocks must be this size or smaller to decompress, +// otherwise the decoder will return ErrUnsupported. +// +// For streams compressed with Snappy this can safely be set to 64KB (64 << 10). +// +// Default is the maximum limit of 4MB. +func ReaderMaxBlockSize(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize <= 0 { + return errors.New("s2: block size too large. Must be <= 4MB and > 0") + } + if r.lazyBuf == 0 && blockSize < defaultBlockSize { + r.lazyBuf = blockSize + } + r.maxBlock = blockSize + return nil + } +} + +// ReaderAllocBlock allows to control upfront stream allocations +// and not allocate for frames bigger than this initially. +// If frames bigger than this is seen a bigger buffer will be allocated. +// +// Default is 1MB, which is default output size. +func ReaderAllocBlock(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize < 1024 { + return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024") + } + r.lazyBuf = blockSize + return nil + } +} + +// ReaderIgnoreStreamIdentifier will make the reader skip the expected +// stream identifier at the beginning of the stream. +// This can be used when serving a stream that has been forwarded to a specific point. +func ReaderIgnoreStreamIdentifier() ReaderOption { + return func(r *Reader) error { + r.ignoreStreamID = true + return nil + } +} + +// ReaderSkippableCB will register a callback for chuncks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. +func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption { + return func(r *Reader) error { + if id < 0x80 || id > 0xfd { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)") + } + r.skippableCB[id-0x80] = fn + return nil + } +} + +// ReaderIgnoreCRC will make the reader skip CRC calculation and checks. +func ReaderIgnoreCRC() ReaderOption { + return func(r *Reader) error { + r.ignoreCRC = true + return nil + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + skippableCB [0xff - 0x80]func(r io.Reader) error + blockStart int64 // Uncompressed offset at start of current. + index *Index + + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + // maximum block size allowed. + maxBlock int + // maximum expected buffer size. + maxBufSize int + // alloc a buffer this size if > 0. + lazyBuf int + readHeader bool + paramsOK bool + snappyFrame bool + ignoreStreamID bool + ignoreCRC bool +} + +// GetBufferCapacity returns the capacity of the internal buffer. +// This might be useful to know when reusing the same reader in combination +// with the lazy buffer option. +func (r *Reader) GetBufferCapacity() int { + return cap(r.buf) +} + +// ensureBufferSize will ensure that the buffer can take at least n bytes. +// If false is returned the buffer exceeds maximum allowed size. +func (r *Reader) ensureBufferSize(n int) bool { + if n > r.maxBufSize { + r.err = ErrCorrupt + return false + } + if cap(r.buf) >= n { + return true + } + // Realloc buffer. + r.buf = make([]byte, n) + return true +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + if !r.paramsOK { + return + } + r.index = nil + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.blockStart = 0 + r.readHeader = r.ignoreStreamID +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// skippable will skip n bytes. +// If the supplied reader supports seeking that is used. +// tmp is used as a temporary buffer for reading. +// The supplied slice does not need to be the size of the read. +func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) { + if id < 0x80 { + r.err = fmt.Errorf("internal error: skippable id < 0x80") + return false + } + if fn := r.skippableCB[id-0x80]; fn != nil { + rd := io.LimitReader(r.r, int64(n)) + r.err = fn(rd) + if r.err != nil { + return false + } + _, r.err = io.CopyBuffer(ioutil.Discard, rd, tmp) + return r.err == nil + } + if rs, ok := r.r.(io.ReadSeeker); ok { + _, err := rs.Seek(int64(n), io.SeekCurrent) + if err == nil { + return true + } + if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + return false + } + } + for n > 0 { + if n < len(tmp) { + tmp = tmp[:n] + } + if _, r.err = io.ReadFull(r.r, tmp); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + n -= len(tmp) + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if !r.ignoreCRC && crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if !r.ignoreCRC && crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return 0, r.err + } else { + r.snappyFrame = true + } + } else { + r.snappyFrame = false + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if chunkLen > maxChunkSize { + // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) + r.err = ErrUnsupported + return 0, r.err + } + + // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return 0, r.err + } + } +} + +// DecodeConcurrent will decode the full stream to w. +// This function should not be combined with reading, seeking or other operations. +// Up to 'concurrent' goroutines will be used. +// If <= 0, runtime.NumCPU will be used. +// On success the number of bytes decompressed nil and is returned. +// This is mainly intended for bigger streams. +func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) { + if r.i > 0 || r.j > 0 || r.blockStart > 0 { + return 0, errors.New("DecodeConcurrent called after ") + } + if concurrent <= 0 { + concurrent = runtime.NumCPU() + } + + // Write to output + var errMu sync.Mutex + var aErr error + setErr := func(e error) (ok bool) { + errMu.Lock() + defer errMu.Unlock() + if e == nil { + return aErr == nil + } + if aErr == nil { + aErr = e + } + return false + } + hasErr := func() (ok bool) { + errMu.Lock() + v := aErr != nil + errMu.Unlock() + return v + } + + var aWritten int64 + toRead := make(chan []byte, concurrent) + writtenBlocks := make(chan []byte, concurrent) + queue := make(chan chan []byte, concurrent) + reUse := make(chan chan []byte, concurrent) + for i := 0; i < concurrent; i++ { + toRead <- make([]byte, 0, r.maxBufSize) + writtenBlocks <- make([]byte, 0, r.maxBufSize) + reUse <- make(chan []byte, 1) + } + // Writer + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for toWrite := range queue { + entry := <-toWrite + reUse <- toWrite + if hasErr() || entry == nil { + if entry != nil { + writtenBlocks <- entry + } + continue + } + if hasErr() { + writtenBlocks <- entry + continue + } + n, err := w.Write(entry) + want := len(entry) + writtenBlocks <- entry + if err != nil { + setErr(err) + continue + } + if n != want { + setErr(io.ErrShortWrite) + continue + } + aWritten += int64(n) + } + }() + + defer func() { + if r.err != nil { + setErr(r.err) + } else if err != nil { + setErr(err) + } + close(queue) + wg.Wait() + if err == nil { + err = aErr + } + written = aWritten + }() + + // Reader + for !hasErr() { + if !r.readFull(r.buf[:4], true) { + if r.err == io.EOF { + r.err = nil + } + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if chunkLen > r.maxBufSize { + r.err = ErrCorrupt + return 0, r.err + } + orgBuf := <-toRead + buf := orgBuf[:chunkLen] + + if !r.readFull(buf, false) { + return 0, r.err + } + + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + wg.Add(1) + + decoded := <-writtenBlocks + entry := <-reUse + queue <- entry + go func() { + defer wg.Done() + decoded = decoded[:n] + _, err := Decode(decoded, buf) + toRead <- orgBuf + if err != nil { + writtenBlocks <- decoded + setErr(err) + entry <- nil + return + } + if !r.ignoreCRC && crc(decoded) != checksum { + writtenBlocks <- decoded + setErr(ErrCRC) + entry <- nil + return + } + entry <- decoded + }() + continue + + case chunkTypeUncompressedData: + + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if chunkLen > r.maxBufSize { + r.err = ErrCorrupt + return 0, r.err + } + // Grab write buffer + orgBuf := <-writtenBlocks + buf := orgBuf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read content. + n := chunkLen - checksumSize + + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + // Read uncompressed + buf = orgBuf[:n] + if !r.readFull(buf, false) { + return 0, r.err + } + + if !r.ignoreCRC && crc(buf) != checksum { + r.err = ErrCRC + return 0, r.err + } + entry := <-reUse + queue <- entry + entry <- buf + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return 0, r.err + } else { + r.snappyFrame = true + } + } else { + r.snappyFrame = false + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if chunkLen > maxChunkSize { + // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) + r.err = ErrUnsupported + return 0, r.err + } + + // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return 0, r.err + } + } + return 0, r.err +} + +// Skip will skip n bytes forward in the decompressed output. +// For larger skips this consumes less CPU and is faster than reading output and discarding it. +// CRC is not checked on skipped blocks. +// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped. +// If a decoding error is encountered subsequent calls to Read will also fail. +func (r *Reader) Skip(n int64) error { + if n < 0 { + return errors.New("attempted negative skip") + } + if r.err != nil { + return r.err + } + + for n > 0 { + if r.i < r.j { + // Skip in buffer. + // decoded[i:j] contains decoded bytes that have not yet been passed on. + left := int64(r.j - r.i) + if left >= n { + tmp := int64(r.i) + n + if tmp > math.MaxInt32 { + return errors.New("s2: internal overflow in skip") + } + r.i = int(tmp) + return nil + } + n -= int64(r.j - r.i) + r.i = r.j + } + + // Buffer empty; read blocks until we have content. + if !r.readFull(r.buf[:4], true) { + if r.err == io.EOF { + r.err = io.ErrUnexpectedEOF + } + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + dLen, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if dLen > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + // Check if destination is within this block + if int64(dLen) > n { + if len(r.decoded) < dLen { + r.decoded = make([]byte, dLen) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:dLen]) != checksum { + r.err = ErrCorrupt + return r.err + } + } else { + // Skip block completely + n -= int64(dLen) + r.blockStart += int64(dLen) + dLen = 0 + } + r.i, r.j = 0, dLen + continue + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err != nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n2 := chunkLen - checksumSize + if n2 > len(r.decoded) { + if n2 > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + r.decoded = make([]byte, n2) + } + if !r.readFull(r.decoded[:n2], false) { + return r.err + } + if int64(n2) < n { + if crc(r.decoded[:n2]) != checksum { + r.err = ErrCorrupt + return r.err + } + } + r.i, r.j = 0, n2 + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return r.err + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + if chunkLen > maxChunkSize { + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return r.err + } + } + return nil +} + +// ReadSeeker provides random or forward seeking in compressed content. +// See Reader.ReadSeeker +type ReadSeeker struct { + *Reader + readAtMu sync.Mutex +} + +// ReadSeeker will return an io.ReadSeeker and io.ReaderAt +// compatible version of the reader. +// If 'random' is specified the returned io.Seeker can be used for +// random seeking, otherwise only forward seeking is supported. +// Enabling random seeking requires the original input to support +// the io.Seeker interface. +// A custom index can be specified which will be used if supplied. +// When using a custom index, it will not be read from the input stream. +// The ReadAt position will affect regular reads and the current position of Seek. +// So using Read after ReadAt will continue from where the ReadAt stopped. +// No functions should be used concurrently. +// The returned ReadSeeker contains a shallow reference to the existing Reader, +// meaning changes performed to one is reflected in the other. +func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { + // Read index if provided. + if len(index) != 0 { + if r.index == nil { + r.index = &Index{} + } + if _, err := r.index.Load(index); err != nil { + return nil, ErrCantSeek{Reason: "loading index returned: " + err.Error()} + } + } + + // Check if input is seekable + rs, ok := r.r.(io.ReadSeeker) + if !ok { + if !random { + return &ReadSeeker{Reader: r}, nil + } + return nil, ErrCantSeek{Reason: "input stream isn't seekable"} + } + + if r.index != nil { + // Seekable and index, ok... + return &ReadSeeker{Reader: r}, nil + } + + // Load from stream. + r.index = &Index{} + + // Read current position. + pos, err := rs.Seek(0, io.SeekCurrent) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + err = r.index.LoadStream(rs) + if err != nil { + if err == ErrUnsupported { + // If we don't require random seeking, reset input and return. + if !random { + _, err = rs.Seek(pos, io.SeekStart) + if err != nil { + return nil, ErrCantSeek{Reason: "resetting stream returned: " + err.Error()} + } + r.index = nil + return &ReadSeeker{Reader: r}, nil + } + return nil, ErrCantSeek{Reason: "input stream does not contain an index"} + } + return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()} + } + + // reset position. + _, err = rs.Seek(pos, io.SeekStart) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + return &ReadSeeker{Reader: r}, nil +} + +// Seek allows seeking in compressed data. +func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { + if r.err != nil { + if !errors.Is(r.err, io.EOF) { + return 0, r.err + } + // Reset on EOF + r.err = nil + } + + // Calculate absolute offset. + absOffset := offset + + switch whence { + case io.SeekStart: + case io.SeekCurrent: + absOffset = r.blockStart + int64(r.i) + offset + case io.SeekEnd: + if r.index == nil { + return 0, ErrUnsupported + } + absOffset = r.index.TotalUncompressed + offset + default: + r.err = ErrUnsupported + return 0, r.err + } + + if absOffset < 0 { + return 0, errors.New("seek before start of file") + } + + if !r.readHeader { + // Make sure we read the header. + _, r.err = r.Read([]byte{}) + if r.err != nil { + return 0, r.err + } + } + + // If we are inside current block no need to seek. + // This includes no offset changes. + if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) { + r.i = int(absOffset - r.blockStart) + return r.blockStart + int64(r.i), nil + } + + rs, ok := r.r.(io.ReadSeeker) + if r.index == nil || !ok { + currOffset := r.blockStart + int64(r.i) + if absOffset >= currOffset { + err := r.Skip(absOffset - currOffset) + return r.blockStart + int64(r.i), err + } + return 0, ErrUnsupported + } + + // We can seek and we have an index. + c, u, err := r.index.Find(absOffset) + if err != nil { + return r.blockStart + int64(r.i), err + } + + // Seek to next block + _, err = rs.Seek(c, io.SeekStart) + if err != nil { + return 0, err + } + + r.i = r.j // Remove rest of current block. + r.blockStart = u - int64(r.j) // Adjust current block start for accounting. + if u < absOffset { + // Forward inside block + return absOffset, r.Skip(absOffset - u) + } + if u > absOffset { + return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset) + } + return absOffset, nil +} + +// ReadAt reads len(p) bytes into p starting at offset off in the +// underlying input source. It returns the number of bytes +// read (0 <= n <= len(p)) and any error encountered. +// +// When ReadAt returns n < len(p), it returns a non-nil error +// explaining why more bytes were not returned. In this respect, +// ReadAt is stricter than Read. +// +// Even if ReadAt returns n < len(p), it may use all of p as scratch +// space during the call. If some data is available but not len(p) bytes, +// ReadAt blocks until either all the data is available or an error occurs. +// In this respect ReadAt is different from Read. +// +// If the n = len(p) bytes returned by ReadAt are at the end of the +// input source, ReadAt may return either err == EOF or err == nil. +// +// If ReadAt is reading from an input source with a seek offset, +// ReadAt should not affect nor be affected by the underlying +// seek offset. +// +// Clients of ReadAt can execute parallel ReadAt calls on the +// same input source. This is however not recommended. +func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) { + r.readAtMu.Lock() + defer r.readAtMu.Unlock() + _, err := r.Seek(offset, io.SeekStart) + if err != nil { + return 0, err + } + n := 0 + for n < len(p) { + n2, err := r.Read(p[n:]) + if err != nil { + // This will include io.EOF + return n + n2, err + } + n += n2 + } + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + if r.i < r.j { + c := r.decoded[r.i] + r.i++ + return c, nil + } + var tmp [1]byte + for i := 0; i < 10; i++ { + n, err := r.Read(tmp[:]) + if err != nil { + return 0, err + } + if n == 1 { + return tmp[0], nil + } + } + return 0, io.ErrNoProgress +} + +// SkippableCB will register a callback for chunks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +// Sending a nil function will disable previous callbacks. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. +func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { + if id < 0x80 || id >= chunkTypePadding { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)") + } + r.skippableCB[id-0x80] = fn + return nil +} diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go new file mode 100644 index 0000000000..cbd1ed64d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/s2.go @@ -0,0 +1,151 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2 implements the S2 compression format. +// +// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput, +// which is why it features concurrent compression for bigger payloads. +// +// Decoding is compatible with Snappy compressed content, +// but content compressed with S2 cannot be decompressed by Snappy. +// +// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2 +// +// There are actually two S2 formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a S2 stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// A "better" compression option is available. This will trade some compression +// speed +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// Blocks to not offer much data protection, so it is up to you to +// add data validation of decompressed blocks. +// +// Streams perform CRC validation of the decompressed data. +// Stream compression will also be performed on multiple CPU cores concurrently +// significantly improving throughput. +package s2 + +import ( + "bytes" + "hash/crc32" + + "github.com/klauspost/compress/internal/race" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy + magicBodySnappy = "sNaPpY" + magicBody = "S2sTwO" + + // maxBlockSize is the maximum size of the input to encodeBlock. + // + // For the framing format (Writer type instead of Encode function), + // this is the maximum uncompressed size of a block. + maxBlockSize = 4 << 20 + + // minBlockSize is the minimum size of block setting when creating a writer. + minBlockSize = 4 << 10 + + skippableFrameHeader = 4 + maxChunkSize = 1<<24 - 1 // 16777215 + + // Default block size + defaultBlockSize = 1 << 20 + + // maxSnappyBlockSize is the maximum snappy block size. + maxSnappyBlockSize = 1 << 16 + + obufHeaderLen = checksumSize + chunkHeaderSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + ChunkTypeIndex = 0x99 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + crcTable = crc32.MakeTable(crc32.Castagnoli) + magicChunkSnappyBytes = []byte(magicChunkSnappy) // Can be passed to functions where it escapes. + magicChunkBytes = []byte(magicChunk) // Can be passed to functions where it escapes. +) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + race.ReadSlice(b) + + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// literalExtraSize returns the extra size of encoding n literals. +// n should be >= 0 and <= math.MaxUint32. +func literalExtraSize(n int64) int64 { + if n == 0 { + return 0 + } + switch { + case n < 60: + return 1 + case n < 1<<8: + return 2 + case n < 1<<16: + return 3 + case n < 1<<24: + return 4 + default: + return 5 + } +} + +type byter interface { + Bytes() []byte +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go new file mode 100644 index 0000000000..fd15078f7d --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -0,0 +1,1064 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "runtime" + "sync" + + "github.com/klauspost/compress/internal/race" +) + +const ( + levelUncompressed = iota + 1 + levelFast + levelBetter + levelBest +) + +// NewWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// Users must call Close to guarantee all data has been forwarded to +// the underlying io.Writer and that resources are released. +// They may also call Flush zero or more times before calling Close. +func NewWriter(w io.Writer, opts ...WriterOption) *Writer { + w2 := Writer{ + blockSize: defaultBlockSize, + concurrency: runtime.GOMAXPROCS(0), + randSrc: rand.Reader, + level: levelFast, + } + for _, opt := range opts { + if err := opt(&w2); err != nil { + w2.errState = err + return &w2 + } + } + w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize) + w2.paramsOK = true + w2.ibuf = make([]byte, 0, w2.blockSize) + w2.buffers.New = func() interface{} { + return make([]byte, w2.obufLen) + } + w2.Reset(w) + return &w2 +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + errMu sync.Mutex + errState error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + ibuf []byte + + blockSize int + obufLen int + concurrency int + written int64 + uncompWritten int64 // Bytes sent to compression + output chan chan result + buffers sync.Pool + pad int + + writer io.Writer + randSrc io.Reader + writerWg sync.WaitGroup + index Index + customEnc func(dst, src []byte) int + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool + paramsOK bool + snappy bool + flushOnWrite bool + appendIndex bool + bufferCB func([]byte) + level uint8 +} + +type result struct { + b []byte + // return when writing + ret []byte + // Uncompressed start offset + startOffset int64 +} + +// err returns the previously set error. +// If no error has been set it is set to err if not nil. +func (w *Writer) err(err error) error { + w.errMu.Lock() + errSet := w.errState + if errSet == nil && err != nil { + w.errState = err + errSet = err + } + w.errMu.Unlock() + return errSet +} + +// Reset discards the writer's state and switches the Snappy writer to write to w. +// This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + if !w.paramsOK { + return + } + // Close previous writer, if any. + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + w.errState = nil + w.ibuf = w.ibuf[:0] + w.wroteStreamHeader = false + w.written = 0 + w.writer = writer + w.uncompWritten = 0 + w.index.reset(w.blockSize) + + // If we didn't get a writer, stop here. + if writer == nil { + return + } + // If no concurrency requested, don't spin up writer goroutine. + if w.concurrency == 1 { + return + } + + toWrite := make(chan chan result, w.concurrency) + w.output = toWrite + w.writerWg.Add(1) + + // Start a writer goroutine that will write all output in order. + go func() { + defer w.writerWg.Done() + + // Get a queued write. + for write := range toWrite { + // Wait for the data to be available. + input := <-write + if input.ret != nil && w.bufferCB != nil { + w.bufferCB(input.ret) + input.ret = nil + } + in := input.b + if len(in) > 0 { + if w.err(nil) == nil { + // Don't expose data from previous buffers. + toWrite := in[:len(in):len(in)] + // Write to output. + n, err := writer.Write(toWrite) + if err == nil && n != len(toWrite) { + err = io.ErrShortBuffer + } + _ = w.err(err) + w.err(w.index.add(w.written, input.startOffset)) + w.written += int64(n) + } + } + if cap(in) >= w.obufLen { + w.buffers.Put(in) + } + // close the incoming write request. + // This can be used for synchronizing flushes. + close(write) + } + }() +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.flushOnWrite { + return w.write(p) + } + // If we exceed the input buffer size, start writing + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err(nil) == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + } + nRet += n + p = p[n:] + } + if err := w.err(nil); err != nil { + return nRet, err + } + // p should always be able to fit into w.ibuf now. + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +// ReadFrom implements the io.ReaderFrom interface. +// Using this is typically more efficient since it avoids a memory copy. +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { + if err := w.err(nil); err != nil { + return 0, err + } + if len(w.ibuf) > 0 { + err := w.AsyncFlush() + if err != nil { + return 0, err + } + } + if br, ok := r.(byter); ok { + buf := br.Bytes() + if err := w.EncodeBuffer(buf); err != nil { + return 0, err + } + return int64(len(buf)), w.AsyncFlush() + } + for { + inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen] + n2, err := io.ReadFull(r, inbuf[obufHeaderLen:]) + if err != nil { + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + if err != io.EOF { + return n, w.err(err) + } + } + if n2 == 0 { + if cap(inbuf) >= w.obufLen { + w.buffers.Put(inbuf) + } + break + } + n += int64(n2) + err2 := w.writeFull(inbuf[:n2+obufHeaderLen]) + if w.err(err2) != nil { + break + } + + if err != nil { + // We got EOF and wrote everything + break + } + } + + return n, w.err(nil) +} + +// AddSkippableBlock will add a skippable block to the stream. +// The ID must be 0x80-0xfe (inclusive). +// Length of the skippable block must be <= 16777215 bytes. +func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + if len(data) == 0 { + return nil + } + if id < 0x80 || id > chunkTypePadding { + return fmt.Errorf("invalid skippable block id %x", id) + } + if len(data) > maxChunkSize { + return fmt.Errorf("skippable block excessed maximum size") + } + var header [4]byte + chunkLen := len(data) + header[0] = id + header[1] = uint8(chunkLen >> 0) + header[2] = uint8(chunkLen >> 8) + header[3] = uint8(chunkLen >> 16) + if w.concurrency == 1 { + write := func(b []byte) error { + n, err := w.writer.Write(b) + if err = w.err(err); err != nil { + return err + } + if n != len(b) { + return w.err(io.ErrShortWrite) + } + w.written += int64(n) + return w.err(nil) + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + if w.snappy { + if err := write([]byte(magicChunkSnappy)); err != nil { + return err + } + } else { + if err := write([]byte(magicChunk)); err != nil { + return err + } + } + } + if err := write(header[:]); err != nil { + return err + } + return write(data) + } + + // Create output... + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + + // Copy input. + inbuf := w.buffers.Get().([]byte)[:4] + copy(inbuf, header[:]) + inbuf = append(inbuf, data...) + + output := make(chan result, 1) + // Queue output. + w.output <- output + output <- result{startOffset: w.uncompWritten, b: inbuf} + + return nil +} + +// EncodeBuffer will add a buffer to the stream. +// This is the fastest way to encode a stream, +// but the input buffer cannot be written to by the caller +// until Flush or Close has been called when concurrency != 1. +// +// Use the WriterBufferDone to receive a callback when the buffer is done +// Processing. +// +// Note that input is not buffered. +// This means that each write will result in discrete blocks being created. +// For buffered writes, use the regular Write function. +func (w *Writer) EncodeBuffer(buf []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + + if w.flushOnWrite { + _, err := w.write(buf) + return err + } + // Flush queued data first. + if len(w.ibuf) > 0 { + err := w.AsyncFlush() + if err != nil { + return err + } + } + if w.concurrency == 1 { + _, err := w.writeSync(buf) + if w.bufferCB != nil { + w.bufferCB(buf) + } + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + orgBuf := buf + for len(buf) > 0 { + // Cut input. + uncompressed := buf + if len(uncompressed) > w.blockSize { + uncompressed = uncompressed[:w.blockSize] + } + buf = buf[len(uncompressed):] + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + race.WriteSlice(obuf) + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + if len(buf) == 0 && w.bufferCB != nil { + res.ret = orgBuf + } + go func() { + race.ReadSlice(uncompressed) + + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // copy uncompressed + copy(obuf[obufHeaderLen:], uncompressed) + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + }() + } + return nil +} + +func (w *Writer) encodeBlock(obuf, uncompressed []byte) int { + if w.customEnc != nil { + if ret := w.customEnc(obuf, uncompressed); ret >= 0 { + return ret + } + } + if w.snappy { + switch w.level { + case levelFast: + return encodeBlockSnappy(obuf, uncompressed) + case levelBetter: + return encodeBlockBetterSnappy(obuf, uncompressed) + case levelBest: + return encodeBlockBestSnappy(obuf, uncompressed) + } + return 0 + } + switch w.level { + case levelFast: + return encodeBlock(obuf, uncompressed) + case levelBetter: + return encodeBlockBetter(obuf, uncompressed) + case levelBest: + return encodeBlockBest(obuf, uncompressed, nil) + } + return 0 +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.concurrency == 1 { + return w.writeSync(p) + } + + // Spawn goroutine and write block to output channel. + for len(p) > 0 { + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + // Copy input. + // If the block is incompressible, this is used for the result. + inbuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + obuf := w.buffers.Get().([]byte)[:w.obufLen] + copy(inbuf[obufHeaderLen:], uncompressed) + uncompressed = inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + nRet += len(uncompressed) + } + return nRet, nil +} + +// writeFull is a special version of write that will always write the full buffer. +// Data to be compressed should start at offset obufHeaderLen and fill the remainder of the buffer. +// The data will be written as a single block. +// The caller is not allowed to use inbuf after this function has been called. +func (w *Writer) writeFull(inbuf []byte) (errRet error) { + if err := w.err(nil); err != nil { + return err + } + + if w.concurrency == 1 { + _, err := w.writeSync(inbuf[obufHeaderLen:]) + if cap(inbuf) >= w.obufLen { + w.buffers.Put(inbuf) + } + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkSnappyBytes} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} + } + } + + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:w.obufLen] + uncompressed := inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + return nil +} + +func (w *Writer) writeSync(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + var n int + var err error + if w.snappy { + n, err = w.writer.Write(magicChunkSnappyBytes) + } else { + n, err = w.writer.Write(magicChunkBytes) + } + if err != nil { + return 0, w.err(err) + } + if n != len(magicChunk) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + + for len(p) > 0 { + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + obuf := w.buffers.Get().([]byte)[:w.obufLen] + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + obuf = obuf[:8] + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + n, err := w.writer.Write(obuf) + if err != nil { + return 0, w.err(err) + } + if n != len(obuf) { + return 0, w.err(io.ErrShortWrite) + } + w.err(w.index.add(w.written, w.uncompWritten)) + w.written += int64(n) + w.uncompWritten += int64(len(uncompressed)) + + if chunkType == chunkTypeUncompressedData { + // Write uncompressed data. + n, err := w.writer.Write(uncompressed) + if err != nil { + return 0, w.err(err) + } + if n != len(uncompressed) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + w.buffers.Put(obuf) + // Queue final output. + nRet += len(uncompressed) + } + return nRet, nil +} + +// AsyncFlush writes any buffered bytes to a block and starts compressing it. +// It does not wait for the output has been written as Flush() does. +func (w *Writer) AsyncFlush() error { + if err := w.err(nil); err != nil { + return err + } + + // Queue any data still in input buffer. + if len(w.ibuf) != 0 { + if !w.wroteStreamHeader { + _, err := w.writeSync(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err(err) + } else { + _, err := w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + err = w.err(err) + if err != nil { + return err + } + } + } + return w.err(nil) +} + +// Flush flushes the Writer to its underlying io.Writer. +// This does not apply padding. +func (w *Writer) Flush() error { + if err := w.AsyncFlush(); err != nil { + return err + } + if w.output == nil { + return w.err(nil) + } + + // Send empty buffer + res := make(chan result) + w.output <- res + // Block until this has been picked up. + res <- result{b: nil, startOffset: w.uncompWritten} + // When it is closed, we have flushed. + <-res + return w.err(nil) +} + +// Close calls Flush and then closes the Writer. +// Calling Close multiple times is ok, +// but calling CloseIndex after this will make it not return the index. +func (w *Writer) Close() error { + _, err := w.closeIndex(w.appendIndex) + return err +} + +// CloseIndex calls Close and returns an index on first call. +// This is not required if you are only adding index to a stream. +func (w *Writer) CloseIndex() ([]byte, error) { + return w.closeIndex(true) +} + +func (w *Writer) closeIndex(idx bool) ([]byte, error) { + err := w.Flush() + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + + var index []byte + if w.err(err) == nil && w.writer != nil { + // Create index. + if idx { + compSize := int64(-1) + if w.pad <= 1 { + compSize = w.written + } + index = w.index.appendTo(w.ibuf[:0], w.uncompWritten, compSize) + // Count as written for padding. + if w.appendIndex { + w.written += int64(len(index)) + } + } + + if w.pad > 1 { + tmp := w.ibuf[:0] + if len(index) > 0 { + // Allocate another buffer. + tmp = w.buffers.Get().([]byte)[:0] + defer w.buffers.Put(tmp) + } + add := calcSkippableFrame(w.written, int64(w.pad)) + frame, err := skippableFrame(tmp, add, w.randSrc) + if err = w.err(err); err != nil { + return nil, err + } + n, err2 := w.writer.Write(frame) + if err2 == nil && n != len(frame) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + if len(index) > 0 && w.appendIndex { + n, err2 := w.writer.Write(index) + if err2 == nil && n != len(index) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + } + err = w.err(errClosed) + if err == errClosed { + return index, nil + } + return nil, err +} + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < maxBlockSize + skippableFrameHeader +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) < 4", total) + } + if int64(total) >= maxBlockSize+skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) >= max 1<<24", total) + } + // Chunk type 0xfe "Section 4.4 Padding (chunk type 0xfe)" + dst = append(dst, chunkTypePadding) + f := uint32(total - skippableFrameHeader) + // Add chunk length. + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16)) + // Add data + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} + +var errClosed = errors.New("s2: Writer is closed") + +// WriterOption is an option for creating a encoder. +type WriterOption func(*Writer) error + +// WriterConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WriterConcurrency(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return errors.New("concurrency must be at least 1") + } + w.concurrency = n + return nil + } +} + +// WriterAddIndex will append an index to the end of a stream +// when it is closed. +func WriterAddIndex() WriterOption { + return func(w *Writer) error { + w.appendIndex = true + return nil + } +} + +// WriterBetterCompression will enable better compression. +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +func WriterBetterCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBetter + return nil + } +} + +// WriterBestCompression will enable better compression. +// EncodeBest compresses better than Encode but typically with a +// big speed decrease on compression. +func WriterBestCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBest + return nil + } +} + +// WriterUncompressed will bypass compression. +// The stream will be written as uncompressed blocks only. +// If concurrency is > 1 CRC and output will still be done async. +func WriterUncompressed() WriterOption { + return func(w *Writer) error { + w.level = levelUncompressed + return nil + } +} + +// WriterBufferDone will perform a callback when EncodeBuffer has finished +// writing a buffer to the output and the buffer can safely be reused. +// If the buffer was split into several blocks, it will be sent after the last block. +// Callbacks will not be done concurrently. +func WriterBufferDone(fn func(b []byte)) WriterOption { + return func(w *Writer) error { + w.bufferCB = fn + return nil + } +} + +// WriterBlockSize allows to override the default block size. +// Blocks will be this size or smaller. +// Minimum size is 4KB and maximum size is 4MB. +// +// Bigger blocks may give bigger throughput on systems with many cores, +// and will increase compression slightly, but it will limit the possible +// concurrency for smaller payloads for both encoding and decoding. +// Default block size is 1MB. +// +// When writing Snappy compatible output using WriterSnappyCompat, +// the maximum block size is 64KB. +func WriterBlockSize(n int) WriterOption { + return func(w *Writer) error { + if w.snappy && n > maxSnappyBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 64K and >=4KB on for snappy compatible output") + } + if n > maxBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 4MB and >=4KB") + } + w.blockSize = n + return nil + } +} + +// WriterPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 4MB. +// The padded area will be filled with data from crypto/rand.Reader. +// The padding will be applied whenever Close is called on the writer. +func WriterPadding(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return fmt.Errorf("s2: padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + w.pad = 0 + } + if n > maxBlockSize { + return fmt.Errorf("s2: padding must less than 4MB") + } + w.pad = n + return nil + } +} + +// WriterPaddingSrc will get random data for padding from the supplied source. +// By default crypto/rand is used. +func WriterPaddingSrc(reader io.Reader) WriterOption { + return func(w *Writer) error { + w.randSrc = reader + return nil + } +} + +// WriterSnappyCompat will write snappy compatible output. +// The output can be decompressed using either snappy or s2. +// If block size is more than 64KB it is set to that. +func WriterSnappyCompat() WriterOption { + return func(w *Writer) error { + w.snappy = true + if w.blockSize > 64<<10 { + // We choose 8 bytes less than 64K, since that will make literal emits slightly more effective. + // And allows us to skip some size checks. + w.blockSize = (64 << 10) - 8 + } + return nil + } +} + +// WriterFlushOnWrite will compress blocks on each call to the Write function. +// +// This is quite inefficient as blocks size will depend on the write size. +// +// Use WriterConcurrency(1) to also make sure that output is flushed. +// When Write calls return, otherwise they will be written when compression is done. +func WriterFlushOnWrite() WriterOption { + return func(w *Writer) error { + w.flushOnWrite = true + return nil + } +} + +// WriterCustomEncoder allows to override the encoder for blocks on the stream. +// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer. +// Block size (initial varint) should not be added by the encoder. +// Returning value 0 indicates the block could not be compressed. +// Returning a negative value indicates that compression should be attempted. +// The function should expect to be called concurrently. +func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption { + return func(w *Writer) error { + w.customEnc = fn + return nil + } +} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore similarity index 100% rename from vendor/github.com/golang/snappy/.gitignore rename to vendor/github.com/klauspost/compress/snappy/.gitignore diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS similarity index 100% rename from vendor/github.com/golang/snappy/AUTHORS rename to vendor/github.com/klauspost/compress/snappy/AUTHORS diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS similarity index 100% rename from vendor/github.com/golang/snappy/CONTRIBUTORS rename to vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE similarity index 100% rename from vendor/github.com/golang/snappy/LICENSE rename to vendor/github.com/klauspost/compress/snappy/LICENSE diff --git a/vendor/github.com/klauspost/compress/snappy/README.md b/vendor/github.com/klauspost/compress/snappy/README.md new file mode 100644 index 0000000000..8271bbd090 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/README.md @@ -0,0 +1,17 @@ +# snappy + +The Snappy compression format in the Go programming language. + +This is a drop-in replacement for `github.com/golang/snappy`. + +It provides a full, compatible replacement of the Snappy package by simply changing imports. + +See [Snappy Compatibility](https://github.com/klauspost/compress/tree/master/s2#snappy-compatibility) in the S2 documentation. + +"Better" compression mode is used. For buffered streams concurrent compression is used. + +For more options use the [s2 package](https://pkg.go.dev/github.com/klauspost/compress/s2). + +# usage + +Replace imports `github.com/golang/snappy` with `github.com/klauspost/compress/snappy`. diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 0000000000..89f1fa2344 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,60 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "io" + + "github.com/klauspost/compress/s2" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = s2.ErrCorrupt + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = s2.ErrTooLarge + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = s2.ErrUnsupported +) + +const ( + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + return s2.DecodedLen(src) +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + return s2.Decode(dst, src) +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return s2.NewReader(r, s2.ReaderMaxBlockSize(maxBlockSize)) +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader = s2.Reader diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 0000000000..e8bd72c186 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "io" + + "github.com/klauspost/compress/s2" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + return s2.EncodeSnappyBetter(dst, src) +} + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + return s2.MaxEncodedLen(srcLen) +} + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression(), s2.WriterFlushOnWrite(), s2.WriterConcurrency(1)) +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression()) +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer = s2.Writer diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go similarity index 60% rename from vendor/github.com/golang/snappy/snappy.go rename to vendor/github.com/klauspost/compress/snappy/snappy.go index ece692ea46..398cdc95a0 100644 --- a/vendor/github.com/golang/snappy/snappy.go +++ b/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -17,11 +17,7 @@ // // The canonical, C++ implementation is at https://github.com/google/snappy and // it only implements the block format. -package snappy // import "github.com/golang/snappy" - -import ( - "hash/crc32" -) +package snappy /* Each encoded block begins with the varint-encoded length of the decoded data, @@ -48,51 +44,3 @@ Lempel-Ziv compression algorithms. In particular: [1, 65). The length is 1 + m. The offset is the little-endian unsigned integer denoted by the next 4 bytes. */ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 03744fbc76..9c28840c3b 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index a4f5bf91fc..84a79fde76 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -179,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -210,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -241,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -270,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -708,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -738,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -772,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -801,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f74..d36be7bd8c 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0fe..8f8223cd3a 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -6,6 +6,7 @@ package zstd import ( "crypto/rand" + "errors" "fmt" "io" "math" @@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) { // and write CRC if requested. func (e *Encoder) Write(p []byte) (n int, err error) { s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } for len(p) > 0 { if len(p)+len(s.filling) < e.o.blockSize { if e.o.crc { @@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error { s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) s.wg.Add(1) + if final { + s.eofWritten = true + } go func(src []byte) { if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) @@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error { blk := enc.Block() enc.Encode(blk, src) blk.last = final - if final { - s.eofWritten = true - } // Wait for pending writes. s.wWg.Wait() if s.writeErr != nil { @@ -401,12 +405,20 @@ func (e *Encoder) Flush() error { if len(s.filling) > 0 { err := e.nextBlock(false) if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } } s.wg.Wait() s.wWg.Wait() if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return s.err } return s.writeErr @@ -422,6 +434,9 @@ func (e *Encoder) Close() error { } err := e.nextBlock(true) if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } if s.frameContentSize > 0 { @@ -459,6 +474,11 @@ func (e *Encoder) Close() error { } _, s.err = s.w.Write(frame) } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + return s.err } @@ -469,6 +489,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7e5..e47af66e7c 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd8287..c59f17e07a 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 5b06174b89..f5591fa1e8 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 4be7cc7367..066bef2a4f 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -88,6 +88,10 @@ var ( // Close has been called. ErrDecoderClosed = errors.New("decoder used after Close") + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + // ErrDecoderNilInput is returned when a nil Reader was provided // and an operation other than Reset/DecodeAll/Close was attempted. ErrDecoderNilInput = errors.New("nil input provided as reader") diff --git a/vendor/modules.txt b/vendor/modules.txt index d37f5ca2e3..aa388a379b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -26,9 +26,6 @@ github.com/bytecodealliance/wasmtime-go/v3/build/windows-x86_64 # github.com/cenkalti/backoff/v4 v4.3.0 ## explicit; go 1.18 github.com/cenkalti/backoff/v4 -# github.com/cespare/xxhash v1.1.0 -## explicit -github.com/cespare/xxhash # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -62,25 +59,23 @@ github.com/containerd/platforms # github.com/cpuguy83/go-md2man/v2 v2.0.4 ## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man -# github.com/dgraph-io/badger/v3 v3.2103.5 -## explicit; go 1.12 -github.com/dgraph-io/badger/v3 -github.com/dgraph-io/badger/v3/fb -github.com/dgraph-io/badger/v3/options -github.com/dgraph-io/badger/v3/pb -github.com/dgraph-io/badger/v3/skl -github.com/dgraph-io/badger/v3/table -github.com/dgraph-io/badger/v3/trie -github.com/dgraph-io/badger/v3/y -# github.com/dgraph-io/ristretto v0.1.1 -## explicit; go 1.12 -github.com/dgraph-io/ristretto -github.com/dgraph-io/ristretto/z -github.com/dgraph-io/ristretto/z/simd -# github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 -## explicit -# github.com/dustin/go-humanize v1.0.0 -## explicit +# github.com/dgraph-io/badger/v4 v4.5.0 +## explicit; go 1.21 +github.com/dgraph-io/badger/v4 +github.com/dgraph-io/badger/v4/fb +github.com/dgraph-io/badger/v4/options +github.com/dgraph-io/badger/v4/pb +github.com/dgraph-io/badger/v4/skl +github.com/dgraph-io/badger/v4/table +github.com/dgraph-io/badger/v4/trie +github.com/dgraph-io/badger/v4/y +# github.com/dgraph-io/ristretto/v2 v2.0.0 +## explicit; go 1.21 +github.com/dgraph-io/ristretto/v2 +github.com/dgraph-io/ristretto/v2/z +github.com/dgraph-io/ristretto/v2/z/simd +# github.com/dustin/go-humanize v1.0.1 +## explicit; go 1.16 github.com/dustin/go-humanize # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 @@ -115,14 +110,6 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings -# github.com/gogo/protobuf v1.3.2 -## explicit; go 1.15 -github.com/gogo/protobuf/proto -# github.com/golang/glog v1.2.2 -## explicit; go 1.19 -github.com/golang/glog -github.com/golang/glog/internal/logsink -github.com/golang/glog/internal/stackdump # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru @@ -130,10 +117,7 @@ github.com/golang/groupcache/lru ## explicit; go 1.17 github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto -# github.com/golang/snappy v0.0.4 -## explicit -github.com/golang/snappy -# github.com/google/flatbuffers v1.12.1 +# github.com/google/flatbuffers v24.3.25+incompatible ## explicit github.com/google/flatbuffers/go # github.com/google/go-cmp v0.6.0 @@ -170,13 +154,16 @@ github.com/hashicorp/hcl/json/token # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap -# github.com/klauspost/compress v1.17.9 -## explicit; go 1.20 +# github.com/klauspost/compress v1.17.11 +## explicit; go 1.21 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/race github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/s2 +github.com/klauspost/compress/snappy github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash # github.com/kr/pretty v0.3.1