diff --git a/.gitignore b/.gitignore index f43b1e9423b27..43aa9227e2320 100644 --- a/.gitignore +++ b/.gitignore @@ -27,5 +27,4 @@ docs/changed-files man/man1 man/man5 man/man8 -vendor/ vendor/pkg/ diff --git a/COMPILE_GUIDANCE.md b/COMPILE_GUIDANCE.md new file mode 100644 index 0000000000000..a20544f3000de --- /dev/null +++ b/COMPILE_GUIDANCE.md @@ -0,0 +1,71 @@ +## Update vendor + +* if you need to update dependent package in vendor, you need to add or update package in hack/vendor.sh file, like this: + +``` +clone git github.com/galaxydi/go-loghub 228ca31911b37dd900a5ab9a3fbdc66c2d88ab16 +clone git github.com/cloudflare/golz4 ef862a3cdc58a6f1fee4e3af3d44fbe279194cde +clone git github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998 +``` + +* if your package use 'import "C"'(use cgo) and has necessary .c and .h files, append the directory which contains only .c and .h files to the parameter "findArgs" in hack/.vendor-helpers.sh like this: + +``` +# lz4 uses cgo and has .c and .h files +findArgs+=( -or -path vendor/src/github.com/cloudflare/golz4/src ) +``` + +* run the command to update vendor + +``` +bash hack/vendor.sh +``` + +* run the command to compile binary + +``` +make bianry +``` + +* you will see the result in + +``` +$ll -rt bundles/* +lrwxrwxrwx 1 yushuting staff 6 Jun 13 17:46 bundles/latest -> 1.12.6 + +bundles/1.12.6: +total 0 +drwxr-xr-x 6 yushuting staff 204 Jun 13 17:46 binary-client +drwxr-xr-x 22 yushuting staff 748 Jun 13 17:48 binary-daemon + +$ll -rt * +binary-client: +total 30648 +-rw-r--r-- 1 yushuting staff 80 Jun 13 17:46 docker-1.12.6.sha256 +-rw-r--r-- 1 yushuting staff 48 Jun 13 17:46 docker-1.12.6.md5 +-rwxr-xr-x 1 yushuting staff 15675704 Jun 13 17:46 docker-1.12.6 +lrwxrwxrwx 1 yushuting staff 13 Jun 13 17:46 docker -> docker-1.12.6 + +binary-daemon: +total 163160 +-rwxr-xr-x 1 yushuting staff 46168368 Jun 13 17:48 dockerd-1.12.6 +-rw-r--r-- 1 yushuting staff 49 Jun 13 17:48 dockerd-1.12.6.md5 +lrwxrwxrwx 1 yushuting staff 14 Jun 13 17:48 dockerd -> dockerd-1.12.6 +-rw-r--r-- 1 yushuting staff 81 Jun 13 17:48 dockerd-1.12.6.sha256 +-rw-r--r-- 1 yushuting staff 86 Jun 13 17:48 docker-proxy-1.12.6.sha256 +-rw-r--r-- 1 yushuting staff 54 Jun 13 17:48 docker-proxy-1.12.6.md5 +-rwxr-xr-x 1 yushuting staff 2879456 Jun 13 17:48 docker-proxy-1.12.6 +lrwxrwxrwx 1 yushuting staff 19 Jun 13 17:48 docker-proxy -> docker-proxy-1.12.6 +-rw-r--r-- 1 yushuting staff 52 Jun 13 17:48 docker-containerd.md5 +-rwxr-xr-x 1 yushuting staff 11291144 Jun 13 17:48 docker-containerd +-rw-r--r-- 1 yushuting staff 84 Jun 13 17:48 docker-containerd.sha256 +-rw-r--r-- 1 yushuting staff 89 Jun 13 17:48 docker-containerd-shim.sha256 +-rw-r--r-- 1 yushuting staff 57 Jun 13 17:48 docker-containerd-shim.md5 +-rwxr-xr-x 1 yushuting staff 3831592 Jun 13 17:48 docker-containerd-shim +-rw-r--r-- 1 yushuting staff 56 Jun 13 17:48 docker-containerd-ctr.md5 +-rwxr-xr-x 1 yushuting staff 10537472 Jun 13 17:48 docker-containerd-ctr +-rw-r--r-- 1 yushuting staff 46 Jun 13 17:48 docker-runc.md5 +-rwxr-xr-x 1 yushuting staff 8764120 Jun 13 17:48 docker-runc +-rw-r--r-- 1 yushuting staff 88 Jun 13 17:48 docker-containerd-ctr.sha256 +-rw-r--r-- 1 yushuting staff 78 Jun 13 17:48 docker-runc.sha256 +``` diff --git a/hack/.vendor-helpers.sh b/hack/.vendor-helpers.sh index 82fe7b3b3847b..f87cbf76a0304 100755 --- a/hack/.vendor-helpers.sh +++ b/hack/.vendor-helpers.sh @@ -134,6 +134,9 @@ clean() { # The docker proxy command is built from libnetwork findArgs+=( -or -path vendor/src/github.com/docker/libnetwork/cmd/proxy ) + # lz4 uses cgo and has .c and .h files + findArgs+=( -or -path vendor/src/github.com/cloudflare/golz4/src ) + local IFS=$'\n' local prune=( $($find vendor -depth -type d -not '(' "${findArgs[@]}" ')') ) unset IFS diff --git a/hack/vendor.sh b/hack/vendor.sh index 8ff5972f800a4..dd255ae302488 100755 --- a/hack/vendor.sh +++ b/hack/vendor.sh @@ -122,6 +122,11 @@ clone git github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c # fsnotify clone git gopkg.in/fsnotify.v1 v1.2.11 +# alilogs deps +clone git github.com/galaxydi/go-loghub 228ca31911b37dd900a5ab9a3fbdc66c2d88ab16 +clone git github.com/cloudflare/golz4 ef862a3cdc58a6f1fee4e3af3d44fbe279194cde +clone git github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998 + # awslogs deps clone git github.com/aws/aws-sdk-go v1.1.30 clone git github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0 diff --git a/vendor.conf b/vendor.conf deleted file mode 100644 index dd1ec84a539d3..0000000000000 --- a/vendor.conf +++ /dev/null @@ -1,145 +0,0 @@ -# the following lines are in sorted order, FYI -github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 -github.com/Microsoft/hcsshim v0.5.9 -github.com/Microsoft/go-winio v0.3.7 -github.com/Sirupsen/logrus f76d643702a30fbffecdfe50831e11881c96ceb3 https://github.com/aaronlehmann/logrus -github.com/davecgh/go-spew 6d212800a42e8ab5c146b8ace3490ee17e5225f9 -github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a -github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git -github.com/gorilla/context v1.1 -github.com/gorilla/mux v1.1 -github.com/kr/pty 5cf931ef8f -github.com/mattn/go-shellwords v1.0.0 -github.com/mattn/go-sqlite3 v1.1.0 -github.com/tchap/go-patricia v2.2.6 -github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 -# forked golang.org/x/net package includes a patch for lazy loading trace templates -golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git -golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 -github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97 -github.com/docker/go-connections 4ccf312bf1d35e5dbda654e57a9be4c3f3cd0366 - -github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 -github.com/imdario/mergo 0.2.1 - -#get libnetwork packages -github.com/docker/libnetwork b908488a139e81cb8c4091cd836745aeb4d813a4 -github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894 -github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 -github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec -github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b -github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37 -github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e -github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 -github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef -github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 -github.com/vishvananda/netlink 482f7a52b758233521878cb6c5904b6bd63f3457 -github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 -github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 -github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d -github.com/coreos/etcd 3a49cbb769ebd8d1dd25abb1e83386e9883a5707 -github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 -github.com/hashicorp/consul v0.5.2 -github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 -github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 - -# get graph and distribution packages -github.com/docker/distribution 28602af35aceda2f8d571bad7ca37a54cf0250bc -github.com/vbatts/tar-split v0.10.1 - -# get go-zfs packages -github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa -github.com/pborman/uuid v1.0 - -# get desired notary commit, might also need to be updated in Dockerfile -github.com/docker/notary v0.4.2 - -google.golang.org/grpc v1.0.2 -github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f -github.com/docker/go v1.5.1-1-1-gbaf439e -github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c - -github.com/opencontainers/runc 51371867a01c467f08af739783b8beafc15 # libcontainer -github.com/opencontainers/runtime-spec 1c7c27d043c2a5e513a44084d2b10d77d1402b8c # specs -github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 -# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) -github.com/coreos/go-systemd v4 -github.com/godbus/dbus v4.0.0 -github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 -github.com/golang/protobuf 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a - -# gelf logging driver deps -github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883 - -github.com/fluent/fluent-logger-golang v1.2.1 -# fluent-logger-golang deps -github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa -github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c - -# fsnotify -github.com/fsnotify/fsnotify v1.2.11 - -# alilogs deps -github.com/galaxydi/go-loghub 174412b0a261627ee66637bdfcc9a17dc951b800 -github.com/cloudflare/golz4 ef862a3cdc58a6f1fee4e3af3d44fbe279194cde -github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998 - -# awslogs deps -github.com/aws/aws-sdk-go v1.4.22 -github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0 -github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 - -# logentries -github.com/bsphere/le_go d3308aafe090956bc89a65f0769f58251a1b4f03 - -# gcplogs deps -golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be -google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 -google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 - -# native credentials -github.com/docker/docker-credential-helpers f72c04f1d8e71959a6d103f808c50ccbad79b9fd - -# containerd -github.com/docker/containerd 03e5862ec0d8d3b3f750e19fca3ee367e13c090e -github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 - -# cluster -github.com/docker/swarmkit 9e4bd71a1690cd27400714fcd98c329b752b5c4c -github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 -github.com/gogo/protobuf v0.3 -github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a -github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e -golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47 -github.com/hashicorp/go-memdb 608dda3b1410a73eaf3ac8b517c9ae7ebab6aa87 https://github.com/floridoo/go-memdb -github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 -github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 -github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 -github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 -github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e -github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 -github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 -github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 -bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675 -github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a -github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 - -# cli -github.com/spf13/cobra v1.5 https://github.com/dnephin/cobra.git -github.com/spf13/pflag dabebe21bf790f782ea4c7bbd2efc430de182afd -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff - -# metrics -github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72 - -# composefile -github.com/aanand/compose-file a3e58764f50597b6217fec07e9bff7225c4a1719 -github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 -github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a -github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 -github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d -gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 diff --git a/vendor/src/github.com/cloudflare/golz4/.gitignore b/vendor/src/github.com/cloudflare/golz4/.gitignore new file mode 100644 index 0000000000000..00268614f0456 --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/src/github.com/cloudflare/golz4/LICENSE b/vendor/src/github.com/cloudflare/golz4/LICENSE new file mode 100644 index 0000000000000..1579e81a9a18e --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 CloudFlare, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +* Neither the name of the CloudFlare, Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/cloudflare/golz4/Makefile b/vendor/src/github.com/cloudflare/golz4/Makefile new file mode 100644 index 0000000000000..2296d80e066e7 --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/Makefile @@ -0,0 +1,14 @@ +GCFLAGS := +LDFLAGS := + +.PHONY: install +install: + @go install -v . + +.PHONY: test +test: + @go test -gcflags='$(GCFLAGS)' -ldflags='$(LDFLAGS)' . + +.PHONY: bench +bench: + @go test -gcflags='$(GCFLAGS)' -ldflags='$(LDFLAGS)' -bench . diff --git a/vendor/src/github.com/cloudflare/golz4/README.md b/vendor/src/github.com/cloudflare/golz4/README.md new file mode 100644 index 0000000000000..e1bdb26e8296f --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/README.md @@ -0,0 +1,4 @@ +golz4 +===== + +Golang interface to LZ4 compression diff --git a/vendor/src/github.com/cloudflare/golz4/doc.go b/vendor/src/github.com/cloudflare/golz4/doc.go new file mode 100644 index 0000000000000..4876be870f2ff --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/doc.go @@ -0,0 +1,4 @@ +// Package lz4 implements compression using lz4.c and lz4hc.c +// +// Copyright (c) 2013 CloudFlare, Inc. +package lz4 diff --git a/vendor/src/github.com/cloudflare/golz4/lz4.go b/vendor/src/github.com/cloudflare/golz4/lz4.go new file mode 100644 index 0000000000000..f9abcb2dbcada --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/lz4.go @@ -0,0 +1,55 @@ +package lz4 + +// #cgo CFLAGS: -O3 +// #include "src/lz4.h" +// #include "src/lz4.c" +import "C" + +import ( + "errors" + "fmt" + "unsafe" +) + +// p gets a char pointer to the first byte of a []byte slice +func p(in []byte) *C.char { + if len(in) == 0 { + return (*C.char)(unsafe.Pointer(nil)) + } + return (*C.char)(unsafe.Pointer(&in[0])) +} + +// clen gets the length of a []byte slice as a char * +func clen(s []byte) C.int { + return C.int(len(s)) +} + +// Uncompress with a known output size. len(out) should be equal to +// the length of the uncompressed out. +func Uncompress(in, out []byte) (error) { + if int(C.LZ4_decompress_safe(p(in), p(out), clen(in), clen(out))) < 0 { + return errors.New("Malformed compression stream") + } + + return nil +} + +// CompressBound calculates the size of the output buffer needed by +// Compress. This is based on the following macro: +// +// #define LZ4_COMPRESSBOUND(isize) +// ((unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) +func CompressBound(in []byte) int { + return len(in) + ((len(in) / 255) + 16) +} + +// Compress compresses in and puts the content in out. len(out) +// should have enough space for the compressed data (use CompressBound +// to calculate). Returns the number of bytes in the out slice. +func Compress(in, out []byte) (outSize int, err error) { + outSize = int(C.LZ4_compress_limitedOutput(p(in), p(out), clen(in), clen(out))) + if outSize == 0 { + err = fmt.Errorf("insufficient space for compression") + } + return +} diff --git a/vendor/src/github.com/cloudflare/golz4/lz4_hc.go b/vendor/src/github.com/cloudflare/golz4/lz4_hc.go new file mode 100644 index 0000000000000..9779352c4608c --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/lz4_hc.go @@ -0,0 +1,38 @@ +package lz4 + +// #cgo CFLAGS: -O3 +// #include "src/lz4hc.h" +// #include "src/lz4hc.c" +import "C" + +import ( + "fmt" +) + +// CompressHC compresses in and puts the content in out. len(out) +// should have enough space for the compressed data (use CompressBound +// to calculate). Returns the number of bytes in the out slice. Determines +// the compression level automatically. +func CompressHC(in, out []byte) (int, error) { + // 0 automatically sets the compression level. + return CompressHCLevel(in, out, 0) +} + +// CompressHCLevel compresses in at the given compression level and puts the +// content in out. len(out) should have enough space for the compressed data +// (use CompressBound to calculate). Returns the number of bytes in the out +// slice. To automatically choose the compression level, use 0. Otherwise, use +// any value in the inclusive range 1 (worst) through 16 (best). Most +// applications will prefer CompressHC. +func CompressHCLevel(in, out []byte, level int) (outSize int, err error) { + // LZ4HC does not handle empty buffers. Pass through to Compress. + if len(in) == 0 || len(out) == 0 { + return Compress(in, out) + } + + outSize = int(C.LZ4_compressHC2_limitedOutput(p(in), p(out), clen(in), clen(out), C.int(level))) + if outSize == 0 { + err = fmt.Errorf("insufficient space for compression") + } + return +} diff --git a/vendor/src/github.com/cloudflare/golz4/sample.txt b/vendor/src/github.com/cloudflare/golz4/sample.txt new file mode 100644 index 0000000000000..3bb27fb76d1ea --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/sample.txt @@ -0,0 +1,143 @@ +CANTO I + + +IN the midway of this our mortal life, +I found me in a gloomy wood, astray +Gone from the path direct: and e'en to tell +It were no easy task, how savage wild +That forest, how robust and rough its growth, +Which to remember only, my dismay +Renews, in bitterness not far from death. +Yet to discourse of what there good befell, +All else will I relate discover'd there. +How first I enter'd it I scarce can say, +Such sleepy dullness in that instant weigh'd +My senses down, when the true path I left, +But when a mountain's foot I reach'd, where clos'd +The valley, that had pierc'd my heart with dread, +I look'd aloft, and saw his shoulders broad +Already vested with that planet's beam, +Who leads all wanderers safe through every way. + +Then was a little respite to the fear, +That in my heart's recesses deep had lain, +All of that night, so pitifully pass'd: +And as a man, with difficult short breath, +Forespent with toiling, 'scap'd from sea to shore, +Turns to the perilous wide waste, and stands +At gaze; e'en so my spirit, that yet fail'd +Struggling with terror, turn'd to view the straits, +That none hath pass'd and liv'd. My weary frame +After short pause recomforted, again +I journey'd on over that lonely steep, + +The hinder foot still firmer. Scarce the ascent +Began, when, lo! a panther, nimble, light, +And cover'd with a speckled skin, appear'd, +Nor, when it saw me, vanish'd, rather strove +To check my onward going; that ofttimes +With purpose to retrace my steps I turn'd. + +The hour was morning's prime, and on his way +Aloft the sun ascended with those stars, +That with him rose, when Love divine first mov'd +Those its fair works: so that with joyous hope +All things conspir'd to fill me, the gay skin +Of that swift animal, the matin dawn +And the sweet season. Soon that joy was chas'd, +And by new dread succeeded, when in view +A lion came, 'gainst me, as it appear'd, + +With his head held aloft and hunger-mad, +That e'en the air was fear-struck. A she-wolf +Was at his heels, who in her leanness seem'd +Full of all wants, and many a land hath made +Disconsolate ere now. She with such fear +O'erwhelmed me, at the sight of her appall'd, +That of the height all hope I lost. As one, +Who with his gain elated, sees the time +When all unwares is gone, he inwardly +Mourns with heart-griping anguish; such was I, +Haunted by that fell beast, never at peace, +Who coming o'er against me, by degrees +Impell'd me where the sun in silence rests. + +While to the lower space with backward step +I fell, my ken discern'd the form one of one, +Whose voice seem'd faint through long disuse of speech. +When him in that great desert I espied, +"Have mercy on me!" cried I out aloud, +"Spirit! or living man! what e'er thou be!" + +He answer'd: "Now not man, man once I was, +And born of Lombard parents, Mantuana both +By country, when the power of Julius yet +Was scarcely firm. At Rome my life was past +Beneath the mild Augustus, in the time +Of fabled deities and false. A bard +Was I, and made Anchises' upright son +The subject of my song, who came from Troy, +When the flames prey'd on Ilium's haughty towers. +But thou, say wherefore to such perils past +Return'st thou? wherefore not this pleasant mount +Ascendest, cause and source of all delight?" +"And art thou then that Virgil, that well-spring, +From which such copious floods of eloquence +Have issued?" I with front abash'd replied. +"Glory and light of all the tuneful train! +May it avail me that I long with zeal +Have sought thy volume, and with love immense +Have conn'd it o'er. My master thou and guide! +Thou he from whom alone I have deriv'd +That style, which for its beauty into fame +Exalts me. See the beast, from whom I fled. +O save me from her, thou illustrious sage!" + +"For every vein and pulse throughout my frame +She hath made tremble." He, soon as he saw +That I was weeping, answer'd, "Thou must needs +Another way pursue, if thou wouldst 'scape +From out that savage wilderness. This beast, +At whom thou criest, her way will suffer none +To pass, and no less hindrance makes than death: +So bad and so accursed in her kind, +That never sated is her ravenous will, +Still after food more craving than before. +To many an animal in wedlock vile +She fastens, and shall yet to many more, +Until that greyhound come, who shall destroy +Her with sharp pain. He will not life support +By earth nor its base metals, but by love, +Wisdom, and virtue, and his land shall be +The land 'twixt either Feltro. In his might +Shall safety to Italia's plains arise, +For whose fair realm, Camilla, virgin pure, +Nisus, Euryalus, and Turnus fell. +He with incessant chase through every town +Shall worry, until he to hell at length +Restore her, thence by envy first let loose. +I for thy profit pond'ring now devise, +That thou mayst follow me, and I thy guide +Will lead thee hence through an eternal space, +Where thou shalt hear despairing shrieks, and see +Spirits of old tormented, who invoke +A second death; and those next view, who dwell +Content in fire, for that they hope to come, +Whene'er the time may be, among the blest, +Into whose regions if thou then desire +T' ascend, a spirit worthier then I +Must lead thee, in whose charge, when I depart, +Thou shalt be left: for that Almighty King, +Who reigns above, a rebel to his law, +Adjudges me, and therefore hath decreed, +That to his city none through me should come. +He in all parts hath sway; there rules, there holds +His citadel and throne. O happy those, +Whom there he chooses!" I to him in few: +"Bard! by that God, whom thou didst not adore, +I do beseech thee (that this ill and worse +I may escape) to lead me, where thou saidst, +That I Saint Peter's gate may view, and those +Who as thou tell'st, are in such dismal plight." + +Onward he mov'd, I close his steps pursu'd. diff --git a/vendor/src/github.com/cloudflare/golz4/src/lz4.c b/vendor/src/github.com/cloudflare/golz4/src/lz4.c new file mode 100644 index 0000000000000..ed928ced3f154 --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/src/lz4.c @@ -0,0 +1,1367 @@ +/* + LZ4 - Fast LZ compression algorithm + Copyright (C) 2011-2015, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 source repository : http://code.google.com/p/lz4 + - LZ4 source mirror : https://github.com/Cyan4973/lz4 + - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c +*/ + + +/************************************** + Tuning parameters +**************************************/ +/* + * HEAPMODE : + * Select how default compression functions will allocate memory for their hash table, + * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). + */ +#define HEAPMODE 0 + +/* + * CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS : + * By default, the source code expects the compiler to correctly optimize + * 4-bytes and 8-bytes read on architectures able to handle it efficiently. + * This is not always the case. In some circumstances (ARM notably), + * the compiler will issue cautious code even when target is able to correctly handle unaligned memory accesses. + * + * You can force the compiler to use unaligned memory access by uncommenting the line below. + * One of the below scenarios will happen : + * 1 - Your target CPU correctly handle unaligned access, and was not well optimized by compiler (good case). + * You will witness large performance improvements (+50% and up). + * Keep the line uncommented and send a word to upstream (https://groups.google.com/forum/#!forum/lz4c) + * The goal is to automatically detect such situations by adding your target CPU within an exception list. + * 2 - Your target CPU correctly handle unaligned access, and was already already optimized by compiler + * No change will be experienced. + * 3 - Your target CPU inefficiently handle unaligned access. + * You will experience a performance loss. Comment back the line. + * 4 - Your target CPU does not handle unaligned access. + * Program will crash. + * If uncommenting results in better performance (case 1) + * please report your configuration to upstream (https://groups.google.com/forum/#!forum/lz4c) + * An automatic detection macro will be added to match your case within future versions of the library. + */ +/* #define CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS 1 */ + + +/************************************** + CPU Feature Detection +**************************************/ +/* + * Automated efficient unaligned memory access detection + * Based on known hardware architectures + * This list will be updated thanks to feedbacks + */ +#if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \ + || defined(__ARM_FEATURE_UNALIGNED) \ + || defined(__i386__) || defined(__x86_64__) \ + || defined(_M_IX86) || defined(_M_X64) \ + || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \ + || (defined(_M_ARM) && (_M_ARM >= 7)) +# define LZ4_UNALIGNED_ACCESS 1 +#else +# define LZ4_UNALIGNED_ACCESS 0 +#endif + +/* + * LZ4_FORCE_SW_BITCOUNT + * Define this parameter if your target system or compiler does not support hardware bit count + */ +#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */ +# define LZ4_FORCE_SW_BITCOUNT +#endif + + +/************************************** + Compiler Options +**************************************/ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */ +/* "restrict" is a known keyword */ +#else +# define restrict /* Disable restrict */ +#endif + +#ifdef _MSC_VER /* Visual Studio */ +# define FORCE_INLINE static __forceinline +# include +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */ +#else +# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif /* _MSC_VER */ + +#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__) +# define expect(expr,value) (__builtin_expect ((expr),(value)) ) +#else +# define expect(expr,value) (expr) +#endif + +#define likely(expr) expect((expr) != 0, 1) +#define unlikely(expr) expect((expr) != 0, 0) + + +/************************************** + Memory routines +**************************************/ +#include /* malloc, calloc, free */ +#define ALLOCATOR(n,s) calloc(n,s) +#define FREEMEM free +#include /* memset, memcpy */ +#define MEM_INIT memset + + +/************************************** + Includes +**************************************/ +#include "lz4.h" + + +/************************************** + Basic Types +**************************************/ +#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */ +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; +#else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; +#endif + + +/************************************** + Reading and writing into memory +**************************************/ +#define STEPSIZE sizeof(size_t) + +static unsigned LZ4_64bits(void) { return sizeof(void*)==8; } + +static unsigned LZ4_isLittleEndian(void) +{ + const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + + +static U16 LZ4_readLE16(const void* memPtr) +{ + if ((LZ4_UNALIGNED_ACCESS) && (LZ4_isLittleEndian())) + return *(U16*)memPtr; + else + { + const BYTE* p = memPtr; + return (U16)((U16)p[0] + (p[1]<<8)); + } +} + +static void LZ4_writeLE16(void* memPtr, U16 value) +{ + if ((LZ4_UNALIGNED_ACCESS) && (LZ4_isLittleEndian())) + { + *(U16*)memPtr = value; + return; + } + else + { + BYTE* p = memPtr; + p[0] = (BYTE) value; + p[1] = (BYTE)(value>>8); + } +} + + +static U16 LZ4_read16(const void* memPtr) +{ + if (LZ4_UNALIGNED_ACCESS) + return *(U16*)memPtr; + else + { + U16 val16; + memcpy(&val16, memPtr, 2); + return val16; + } +} + +static U32 LZ4_read32(const void* memPtr) +{ + if (LZ4_UNALIGNED_ACCESS) + return *(U32*)memPtr; + else + { + U32 val32; + memcpy(&val32, memPtr, 4); + return val32; + } +} + +static U64 LZ4_read64(const void* memPtr) +{ + if (LZ4_UNALIGNED_ACCESS) + return *(U64*)memPtr; + else + { + U64 val64; + memcpy(&val64, memPtr, 8); + return val64; + } +} + +static size_t LZ4_read_ARCH(const void* p) +{ + if (LZ4_64bits()) + return (size_t)LZ4_read64(p); + else + return (size_t)LZ4_read32(p); +} + + +static void LZ4_copy4(void* dstPtr, const void* srcPtr) +{ + if (LZ4_UNALIGNED_ACCESS) + { + *(U32*)dstPtr = *(U32*)srcPtr; + return; + } + memcpy(dstPtr, srcPtr, 4); +} + +static void LZ4_copy8(void* dstPtr, const void* srcPtr) +{ +#if GCC_VERSION!=409 /* disabled on GCC 4.9, as it generates invalid opcode (crash) */ + if (LZ4_UNALIGNED_ACCESS) + { + if (LZ4_64bits()) + *(U64*)dstPtr = *(U64*)srcPtr; + else + ((U32*)dstPtr)[0] = ((U32*)srcPtr)[0], + ((U32*)dstPtr)[1] = ((U32*)srcPtr)[1]; + return; + } +#endif + memcpy(dstPtr, srcPtr, 8); +} + +/* customized version of memcpy, which may overwrite up to 7 bytes beyond dstEnd */ +static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) +{ + BYTE* d = dstPtr; + const BYTE* s = srcPtr; + BYTE* e = dstEnd; + do { LZ4_copy8(d,s); d+=8; s+=8; } while (d>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctzll((U64)val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } + else /* 32 bits */ + { +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanForward( &r, (U32)val ); + return (int)(r>>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_ctz((U32)val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } + } + else /* Big Endian CPU */ + { + if (LZ4_64bits()) + { +# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanReverse64( &r, val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clzll(val) >> 3); +# else + unsigned r; + if (!(val>>32)) { r=4; } else { r=0; val>>=32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif + } + else /* 32 bits */ + { +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanReverse( &r, (unsigned long)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (__builtin_clz(val) >> 3); +# else + unsigned r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif + } + } +} + +static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) +{ + const BYTE* const pStart = pIn; + + while (likely(pIn compression run slower on incompressible data */ + + +/************************************** + Local Utils +**************************************/ +int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; } +int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } + + +/************************************** + Local Structures and types +**************************************/ +typedef struct { + U32 hashTable[HASH_SIZE_U32]; + U32 currentOffset; + U32 initCheck; + const BYTE* dictionary; + const BYTE* bufferStart; + U32 dictSize; +} LZ4_stream_t_internal; + +typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive; +typedef enum { byPtr, byU32, byU16 } tableType_t; + +typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; +typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; + +typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; +typedef enum { full = 0, partial = 1 } earlyEnd_directive; + + + +/******************************** + Compression functions +********************************/ + +static U32 LZ4_hashSequence(U32 sequence, tableType_t tableType) +{ + if (tableType == byU16) + return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1))); + else + return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG)); +} + +static U32 LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(LZ4_read32(p), tableType); } + +static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase) +{ + switch (tableType) + { + case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; } + case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; } + } +} + +static void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) +{ + U32 h = LZ4_hashPosition(p, tableType); + LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); +} + +static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase) +{ + if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; } + if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; } + { U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ +} + +static const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) +{ + U32 h = LZ4_hashPosition(p, tableType); + return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); +} + +static int LZ4_compress_generic( + void* ctx, + const char* source, + char* dest, + int inputSize, + int maxOutputSize, + limitedOutput_directive outputLimited, + tableType_t tableType, + dict_directive dict, + dictIssue_directive dictIssue) +{ + LZ4_stream_t_internal* const dictPtr = (LZ4_stream_t_internal*)ctx; + + const BYTE* ip = (const BYTE*) source; + const BYTE* base; + const BYTE* lowLimit; + const BYTE* const lowRefLimit = ip - dictPtr->dictSize; + const BYTE* const dictionary = dictPtr->dictionary; + const BYTE* const dictEnd = dictionary + dictPtr->dictSize; + const size_t dictDelta = dictEnd - (const BYTE*)source; + const BYTE* anchor = (const BYTE*) source; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = iend - LASTLITERALS; + + BYTE* op = (BYTE*) dest; + BYTE* const olimit = op + maxOutputSize; + + U32 forwardH; + size_t refDelta=0; + + /* Init conditions */ + if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */ + switch(dict) + { + case noDict: + default: + base = (const BYTE*)source; + lowLimit = (const BYTE*)source; + break; + case withPrefix64k: + base = (const BYTE*)source - dictPtr->currentOffset; + lowLimit = (const BYTE*)source - dictPtr->dictSize; + break; + case usingExtDict: + base = (const BYTE*)source - dictPtr->currentOffset; + lowLimit = (const BYTE*)source; + break; + } + if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */ + if (inputSize> LZ4_skipTrigger; + + if (unlikely(forwardIp > mflimit)) goto _last_literals; + + match = LZ4_getPositionOnHash(h, ctx, tableType, base); + if (dict==usingExtDict) + { + if (match<(const BYTE*)source) + { + refDelta = dictDelta; + lowLimit = dictionary; + } + else + { + refDelta = 0; + lowLimit = (const BYTE*)source; + } + } + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, ctx, tableType, base); + + } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0) + || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip)) + || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) ); + } + + /* Catch up */ + while ((ip>anchor) && (match+refDelta > lowLimit) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; } + + { + /* Encode Literal length */ + unsigned litLength = (unsigned)(ip - anchor); + token = op++; + if ((outputLimited) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit))) + return 0; /* Check output limit */ + if (litLength>=RUN_MASK) + { + int len = (int)litLength-RUN_MASK; + *token=(RUN_MASK<= 255 ; len-=255) *op++ = 255; + *op++ = (BYTE)len; + } + else *token = (BYTE)(litLength< matchlimit) limit = matchlimit; + matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); + ip += MINMATCH + matchLength; + if (ip==limit) + { + unsigned more = LZ4_count(ip, (const BYTE*)source, matchlimit); + matchLength += more; + ip += more; + } + } + else + { + matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); + ip += MINMATCH + matchLength; + } + + if ((outputLimited) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > olimit))) + return 0; /* Check output limit */ + if (matchLength>=ML_MASK) + { + *token += ML_MASK; + matchLength -= ML_MASK; + for (; matchLength >= 510 ; matchLength-=510) { *op++ = 255; *op++ = 255; } + if (matchLength >= 255) { matchLength-=255; *op++ = 255; } + *op++ = (BYTE)matchLength; + } + else *token += (BYTE)(matchLength); + } + + anchor = ip; + + /* Test end of chunk */ + if (ip > mflimit) break; + + /* Fill table */ + LZ4_putPosition(ip-2, ctx, tableType, base); + + /* Test next position */ + match = LZ4_getPosition(ip, ctx, tableType, base); + if (dict==usingExtDict) + { + if (match<(const BYTE*)source) + { + refDelta = dictDelta; + lowLimit = dictionary; + } + else + { + refDelta = 0; + lowLimit = (const BYTE*)source; + } + } + LZ4_putPosition(ip, ctx, tableType, base); + if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1) + && (match+MAX_DISTANCE>=ip) + && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) ) + { token=op++; *token=0; goto _next_match; } + + /* Prepare next loop */ + forwardH = LZ4_hashPosition(++ip, tableType); + } + +_last_literals: + /* Encode Last Literals */ + { + int lastRun = (int)(iend - anchor); + if ((outputLimited) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) + return 0; /* Check output limit */ + if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } + else *op++ = (BYTE)(lastRun<= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */ + LZ4_resetStream(lz4s); + return lz4s; +} + +int LZ4_freeStream (LZ4_stream_t* LZ4_stream) +{ + FREEMEM(LZ4_stream); + return (0); +} + + +int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) +{ + LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict; + const BYTE* p = (const BYTE*)dictionary; + const BYTE* const dictEnd = p + dictSize; + const BYTE* base; + + if (dict->initCheck) LZ4_resetStream(LZ4_dict); /* Uninitialized structure detected */ + + if (dictSize < MINMATCH) + { + dict->dictionary = NULL; + dict->dictSize = 0; + return 0; + } + + if (p <= dictEnd - 64 KB) p = dictEnd - 64 KB; + base = p - dict->currentOffset; + dict->dictionary = p; + dict->dictSize = (U32)(dictEnd - p); + dict->currentOffset += dict->dictSize; + + while (p <= dictEnd-MINMATCH) + { + LZ4_putPosition(p, dict, byU32, base); + p+=3; + } + + return dict->dictSize; +} + + +static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src) +{ + if ((LZ4_dict->currentOffset > 0x80000000) || + ((size_t)LZ4_dict->currentOffset > (size_t)src)) /* address space overflow */ + { + /* rescale hash table */ + U32 delta = LZ4_dict->currentOffset - 64 KB; + const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; + int i; + for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0; + else LZ4_dict->hashTable[i] -= delta; + } + LZ4_dict->currentOffset = 64 KB; + if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB; + LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; + } +} + + +FORCE_INLINE int LZ4_compress_continue_generic (void* LZ4_stream, const char* source, char* dest, int inputSize, + int maxOutputSize, limitedOutput_directive limit) +{ + LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_stream; + const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; + + const BYTE* smallest = (const BYTE*) source; + if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */ + if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd; + LZ4_renormDictT(streamPtr, smallest); + + /* Check overlapping input/dictionary space */ + { + const BYTE* sourceEnd = (const BYTE*) source + inputSize; + if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) + { + streamPtr->dictSize = (U32)(dictEnd - sourceEnd); + if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB; + if (streamPtr->dictSize < 4) streamPtr->dictSize = 0; + streamPtr->dictionary = dictEnd - streamPtr->dictSize; + } + } + + /* prefix mode : source data follows dictionary */ + if (dictEnd == (const BYTE*)source) + { + int result; + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) + result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, dictSmall); + else + result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, withPrefix64k, noDictIssue); + streamPtr->dictSize += (U32)inputSize; + streamPtr->currentOffset += (U32)inputSize; + return result; + } + + /* external dictionary mode */ + { + int result; + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) + result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, dictSmall); + else + result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limit, byU32, usingExtDict, noDictIssue); + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)inputSize; + streamPtr->currentOffset += (U32)inputSize; + return result; + } +} + +int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) +{ + return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, 0, notLimited); +} + +int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize) +{ + return LZ4_compress_continue_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput); +} + + +/* Hidden debug function, to force separate dictionary mode */ +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize) +{ + LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_dict; + int result; + const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; + + const BYTE* smallest = dictEnd; + if (smallest > (const BYTE*) source) smallest = (const BYTE*) source; + LZ4_renormDictT((LZ4_stream_t_internal*)LZ4_dict, smallest); + + result = LZ4_compress_generic(LZ4_dict, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue); + + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)inputSize; + streamPtr->currentOffset += (U32)inputSize; + + return result; +} + + +int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) +{ + LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict; + const BYTE* previousDictEnd = dict->dictionary + dict->dictSize; + + if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */ + if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize; + + memmove(safeBuffer, previousDictEnd - dictSize, dictSize); + + dict->dictionary = (const BYTE*)safeBuffer; + dict->dictSize = (U32)dictSize; + + return dictSize; +} + + + +/**************************** + Decompression functions +****************************/ +/* + * This generic decompression function cover all use cases. + * It shall be instantiated several times, using different sets of directives + * Note that it is essential this generic function is really inlined, + * in order to remove useless branches during compilation optimization. + */ +FORCE_INLINE int LZ4_decompress_generic( + const char* const source, + char* const dest, + int inputSize, + int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */ + + int endOnInput, /* endOnOutputSize, endOnInputSize */ + int partialDecoding, /* full, partial */ + int targetOutputSize, /* only used if partialDecoding==partial */ + int dict, /* noDict, withPrefix64k, usingExtDict */ + const BYTE* const lowPrefix, /* == dest if dict == noDict */ + const BYTE* const dictStart, /* only if dict==usingExtDict */ + const size_t dictSize /* note : = 0 if noDict */ + ) +{ + /* Local Variables */ + const BYTE* restrict ip = (const BYTE*) source; + const BYTE* const iend = ip + inputSize; + + BYTE* op = (BYTE*) dest; + BYTE* const oend = op + outputSize; + BYTE* cpy; + BYTE* oexit = op + targetOutputSize; + const BYTE* const lowLimit = lowPrefix - dictSize; + + const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize; + const size_t dec32table[] = {4, 1, 2, 1, 4, 4, 4, 4}; + const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3}; + + const int safeDecode = (endOnInput==endOnInputSize); + const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB))); + + + /* Special cases */ + if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */ + if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */ + if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1); + + + /* Main Loop */ + while (1) + { + unsigned token; + size_t length; + const BYTE* match; + + /* get literal length */ + token = *ip++; + if ((length=(token>>ML_BITS)) == RUN_MASK) + { + unsigned s; + do + { + s = *ip++; + length += s; + } + while (likely((endOnInput)?ip(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) ) + || ((!endOnInput) && (cpy>oend-COPYLENGTH))) + { + if (partialDecoding) + { + if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */ + if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */ + } + else + { + if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */ + if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */ + } + memcpy(op, ip, length); + ip += length; + op += length; + break; /* Necessarily EOF, due to parsing restrictions */ + } + LZ4_wildCopy(op, ip, cpy); + ip += length; op = cpy; + + /* get offset */ + match = cpy - LZ4_readLE16(ip); ip+=2; + if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside destination buffer */ + + /* get matchlength */ + length = token & ML_MASK; + if (length == ML_MASK) + { + unsigned s; + do + { + if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error; + s = *ip++; + length += s; + } while (s==255); + if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)op)) goto _output_error; /* overflow detection */ + } + length += MINMATCH; + + /* check external dictionary */ + if ((dict==usingExtDict) && (match < lowPrefix)) + { + if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */ + + if (length <= (size_t)(lowPrefix-match)) + { + /* match can be copied as a single segment from external dictionary */ + match = dictEnd - (lowPrefix-match); + memcpy(op, match, length); + op += length; + } + else + { + /* match encompass external dictionary and current segment */ + size_t copySize = (size_t)(lowPrefix-match); + memcpy(op, dictEnd - copySize, copySize); + op += copySize; + copySize = length - copySize; + if (copySize > (size_t)(op-lowPrefix)) /* overlap within current segment */ + { + BYTE* const endOfMatch = op + copySize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) *op++ = *copyFrom++; + } + else + { + memcpy(op, lowPrefix, copySize); + op += copySize; + } + } + continue; + } + + /* copy repeated sequence */ + cpy = op + length; + if (unlikely((op-match)<8)) + { + const size_t dec64 = dec64table[op-match]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[op-match]; + LZ4_copy4(op+4, match); + op += 8; match -= dec64; + } else { LZ4_copy8(op, match); op+=8; match+=8; } + + if (unlikely(cpy>oend-12)) + { + if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals */ + if (op < oend-8) + { + LZ4_wildCopy(op, match, oend-8); + match += (oend-8) - op; + op = oend-8; + } + while (opprefixSize = (size_t) dictSize; + lz4sd->prefixEnd = (BYTE*) dictionary + dictSize; + lz4sd->externalDict = NULL; + lz4sd->extDictSize = 0; + return 1; +} + +/* +*_continue() : + These decoding functions allow decompression of multiple blocks in "streaming" mode. + Previously decoded blocks must still be available at the memory position where they were decoded. + If it's not possible, save the relevant part of decoded data into a safe buffer, + and indicate where it stands using LZ4_setStreamDecode() +*/ +int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode; + int result; + + if (lz4sd->prefixEnd == (BYTE*)dest) + { + result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, full, 0, + usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += result; + lz4sd->prefixEnd += result; + } + else + { + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, full, 0, + usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = result; + lz4sd->prefixEnd = (BYTE*)dest + result; + } + + return result; +} + +int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize) +{ + LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode; + int result; + + if (lz4sd->prefixEnd == (BYTE*)dest) + { + result = LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, full, 0, + usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += originalSize; + lz4sd->prefixEnd += originalSize; + } + else + { + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = (BYTE*)dest - lz4sd->extDictSize; + result = LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, full, 0, + usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = originalSize; + lz4sd->prefixEnd = (BYTE*)dest + originalSize; + } + + return result; +} + + +/* +Advanced decoding functions : +*_usingDict() : + These decoding functions work the same as "_continue" ones, + the dictionary must be explicitly provided within parameters +*/ + +FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize) +{ + if (dictSize==0) + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0); + if (dictStart+dictSize == dest) + { + if (dictSize >= (int)(64 KB - 1)) + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0); + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0); + } + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (BYTE*)dictStart, dictSize); +} + +int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) +{ + return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize); +} + +int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize) +{ + return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize); +} + +/* debug function */ +int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (BYTE*)dictStart, dictSize); +} + + +/*************************************************** + Obsolete Functions +***************************************************/ +/* +These function names are deprecated and should no longer be used. +They are only provided here for compatibility with older user programs. +- LZ4_uncompress is totally equivalent to LZ4_decompress_fast +- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe +*/ +int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); } +int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); } + + +/* Obsolete Streaming functions */ + +int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; } + +static void LZ4_init(LZ4_stream_t_internal* lz4ds, const BYTE* base) +{ + MEM_INIT(lz4ds, 0, LZ4_STREAMSIZE); + lz4ds->bufferStart = base; +} + +int LZ4_resetStreamState(void* state, const char* inputBuffer) +{ + if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */ + LZ4_init((LZ4_stream_t_internal*)state, (const BYTE*)inputBuffer); + return 0; +} + +void* LZ4_create (const char* inputBuffer) +{ + void* lz4ds = ALLOCATOR(8, LZ4_STREAMSIZE_U64); + LZ4_init ((LZ4_stream_t_internal*)lz4ds, (const BYTE*)inputBuffer); + return lz4ds; +} + +char* LZ4_slideInputBuffer (void* LZ4_Data) +{ + LZ4_stream_t_internal* ctx = (LZ4_stream_t_internal*)LZ4_Data; + int dictSize = LZ4_saveDict((LZ4_stream_t*)ctx, (char*)ctx->bufferStart, 64 KB); + return (char*)(ctx->bufferStart + dictSize); +} + +/* Obsolete compresson functions using User-allocated state */ + +int LZ4_sizeofState() { return LZ4_STREAMSIZE; } + +int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize) +{ + if (((size_t)(state)&3) != 0) return 0; /* Error : state is not aligned on 4-bytes boundary */ + MEM_INIT(state, 0, LZ4_STREAMSIZE); + + if (inputSize < LZ4_64Klimit) + return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue); + else + return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue); +} + +int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize) +{ + if (((size_t)(state)&3) != 0) return 0; /* Error : state is not aligned on 4-bytes boundary */ + MEM_INIT(state, 0, LZ4_STREAMSIZE); + + if (inputSize < LZ4_64Klimit) + return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue); + else + return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue); +} + +/* Obsolete streaming decompression functions */ + +int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB); +} + +int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) +{ + return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB); +} + +#endif /* LZ4_COMMONDEFS_ONLY */ + diff --git a/vendor/src/github.com/cloudflare/golz4/src/lz4.h b/vendor/src/github.com/cloudflare/golz4/src/lz4.h new file mode 100644 index 0000000000000..7778caad3aeec --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/src/lz4.h @@ -0,0 +1,315 @@ +/* + LZ4 - Fast LZ compression algorithm + Header File + Copyright (C) 2011-2014, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 source repository : http://code.google.com/p/lz4/ + - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c +*/ +#pragma once + +#if defined (__cplusplus) +extern "C" { +#endif + +/* + * lz4.h provides raw compression format functions, for optimal performance and integration into programs. + * If you need to generate data using an inter-operable format (respecting the framing specification), + * please use lz4frame.h instead. +*/ + +/************************************** + Version +**************************************/ +#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ +#define LZ4_VERSION_MINOR 5 /* for new (non-breaking) interface capabilities */ +#define LZ4_VERSION_RELEASE 0 /* for tweaks, bug-fixes, or development */ +#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) +int LZ4_versionNumber (void); + +/************************************** + Tuning parameter +**************************************/ +/* + * LZ4_MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache + */ +#define LZ4_MEMORY_USAGE 14 + + +/************************************** + Simple Functions +**************************************/ + +int LZ4_compress (const char* source, char* dest, int sourceSize); +int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize); + +/* +LZ4_compress() : + Compresses 'sourceSize' bytes from 'source' into 'dest'. + Destination buffer must be already allocated, + and must be sized to handle worst cases situations (input data not compressible) + Worst case size evaluation is provided by function LZ4_compressBound() + inputSize : Max supported value is LZ4_MAX_INPUT_SIZE + return : the number of bytes written in buffer dest + or 0 if the compression fails + +LZ4_decompress_safe() : + compressedSize : is obviously the source size + maxDecompressedSize : is the size of the destination buffer, which must be already allocated. + return : the number of bytes decompressed into the destination buffer (necessarily <= maxDecompressedSize) + If the destination buffer is not large enough, decoding will stop and output an error code (<0). + If the source stream is detected malformed, the function will stop decoding and return a negative result. + This function is protected against buffer overflow exploits, + and never writes outside of output buffer, nor reads outside of input buffer. + It is also protected against malicious data packets. +*/ + + +/************************************** + Advanced Functions +**************************************/ +#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ +#define LZ4_COMPRESSBOUND(isize) ((unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) + +/* +LZ4_compressBound() : + Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) + This function is primarily useful for memory allocation purposes (output buffer size). + Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). + + isize : is the input size. Max supported value is LZ4_MAX_INPUT_SIZE + return : maximum output size in a "worst case" scenario + or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE) +*/ +int LZ4_compressBound(int isize); + + +/* +LZ4_compress_limitedOutput() : + Compress 'sourceSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. + If it cannot achieve it, compression will stop, and result of the function will be zero. + This saves time and memory on detecting non-compressible (or barely compressible) data. + This function never writes outside of provided output buffer. + + sourceSize : Max supported value is LZ4_MAX_INPUT_VALUE + maxOutputSize : is the size of the destination buffer (which must be already allocated) + return : the number of bytes written in buffer 'dest' + or 0 if compression fails +*/ +int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize); + + +/* +LZ4_compress_withState() : + Same compression functions, but using an externally allocated memory space to store compression state. + Use LZ4_sizeofState() to know how much memory must be allocated, + and then, provide it as 'void* state' to compression functions. +*/ +int LZ4_sizeofState(void); +int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); +int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); + + +/* +LZ4_decompress_fast() : + originalSize : is the original and therefore uncompressed size + return : the number of bytes read from the source buffer (in other words, the compressed size) + If the source stream is detected malformed, the function will stop decoding and return a negative result. + Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes. + note : This function fully respect memory boundaries for properly formed compressed data. + It is a bit faster than LZ4_decompress_safe(). + However, it does not provide any protection against intentionally modified data stream (malicious input). + Use this function in trusted environment only (data to decode comes from a trusted source). +*/ +int LZ4_decompress_fast (const char* source, char* dest, int originalSize); + + +/* +LZ4_decompress_safe_partial() : + This function decompress a compressed block of size 'compressedSize' at position 'source' + into destination buffer 'dest' of size 'maxDecompressedSize'. + The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached, + reducing decompression time. + return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize) + Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller. + Always control how many bytes were decoded. + If the source stream is detected malformed, the function will stop decoding and return a negative result. + This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets +*/ +int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize); + + +/*********************************************** + Streaming Compression Functions +***********************************************/ + +#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4) +#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(long long)) +/* + * LZ4_stream_t + * information structure to track an LZ4 stream. + * important : init this structure content before first use ! + * note : only allocated directly the structure if you are statically linking LZ4 + * If you are using liblz4 as a DLL, please use below construction methods instead. + */ +typedef struct { long long table[LZ4_STREAMSIZE_U64]; } LZ4_stream_t; + +/* + * LZ4_resetStream + * Use this function to init an allocated LZ4_stream_t structure + */ +void LZ4_resetStream (LZ4_stream_t* LZ4_streamPtr); + +/* + * LZ4_createStream will allocate and initialize an LZ4_stream_t structure + * LZ4_freeStream releases its memory. + * In the context of a DLL (liblz4), please use these methods rather than the static struct. + * They are more future proof, in case of a change of LZ4_stream_t size. + */ +LZ4_stream_t* LZ4_createStream(void); +int LZ4_freeStream (LZ4_stream_t* LZ4_streamPtr); + +/* + * LZ4_loadDict + * Use this function to load a static dictionary into LZ4_stream. + * Any previous data will be forgotten, only 'dictionary' will remain in memory. + * Loading a size of 0 is allowed. + * Return : dictionary size, in bytes (necessarily <= 64 KB) + */ +int LZ4_loadDict (LZ4_stream_t* LZ4_streamPtr, const char* dictionary, int dictSize); + +/* + * LZ4_compress_continue + * Compress data block 'source', using blocks compressed before as dictionary to improve compression ratio + * Previous data blocks are assumed to still be present at their previous location. + */ +int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); + +/* + * LZ4_compress_limitedOutput_continue + * Same as before, but also specify a maximum target compressed size (maxOutputSize) + * If objective cannot be met, compression exits, and returns a zero. + */ +int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); + +/* + * LZ4_saveDict + * If previously compressed data block is not guaranteed to remain available at its memory location + * save it into a safer place (char* safeBuffer) + * Note : you don't need to call LZ4_loadDict() afterwards, + * dictionary is immediately usable, you can therefore call again LZ4_compress_continue() + * Return : dictionary size in bytes, or 0 if error + * Note : any dictSize > 64 KB will be interpreted as 64KB. + */ +int LZ4_saveDict (LZ4_stream_t* LZ4_streamPtr, char* safeBuffer, int dictSize); + + +/************************************************ + Streaming Decompression Functions +************************************************/ + +#define LZ4_STREAMDECODESIZE_U64 4 +#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long)) +typedef struct { unsigned long long table[LZ4_STREAMDECODESIZE_U64]; } LZ4_streamDecode_t; +/* + * LZ4_streamDecode_t + * information structure to track an LZ4 stream. + * init this structure content using LZ4_setStreamDecode or memset() before first use ! + * + * In the context of a DLL (liblz4) please prefer usage of construction methods below. + * They are more future proof, in case of a change of LZ4_streamDecode_t size in the future. + * LZ4_createStreamDecode will allocate and initialize an LZ4_streamDecode_t structure + * LZ4_freeStreamDecode releases its memory. + */ +LZ4_streamDecode_t* LZ4_createStreamDecode(void); +int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); + +/* + * LZ4_setStreamDecode + * Use this function to instruct where to find the dictionary. + * Setting a size of 0 is allowed (same effect as reset). + * Return : 1 if OK, 0 if error + */ +int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); + +/* +*_continue() : + These decoding functions allow decompression of multiple blocks in "streaming" mode. + Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB) + If this condition is not possible, save the relevant part of decoded data into a safe buffer, + and indicate where is its new address using LZ4_setStreamDecode() +*/ +int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxDecompressedSize); +int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize); + + +/* +Advanced decoding functions : +*_usingDict() : + These decoding functions work the same as + a combination of LZ4_setDictDecode() followed by LZ4_decompress_x_continue() + They are stand-alone and don't use nor update an LZ4_streamDecode_t structure. +*/ +int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize); +int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize); + + + +/************************************** + Obsolete Functions +**************************************/ +/* +Obsolete decompression functions +These function names are deprecated and should no longer be used. +They are only provided here for compatibility with older user programs. +- LZ4_uncompress is the same as LZ4_decompress_fast +- LZ4_uncompress_unknownOutputSize is the same as LZ4_decompress_safe +These function prototypes are now disabled; uncomment them if you really need them. +It is highly recommended to stop using these functions and migrate to newer ones */ +/* int LZ4_uncompress (const char* source, char* dest, int outputSize); */ +/* int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); */ + + +/* Obsolete streaming functions; use new streaming interface whenever possible */ +void* LZ4_create (const char* inputBuffer); +int LZ4_sizeofStreamState(void); +int LZ4_resetStreamState(void* state, const char* inputBuffer); +char* LZ4_slideInputBuffer (void* state); + +/* Obsolete streaming decoding functions */ +int LZ4_decompress_safe_withPrefix64k (const char* source, char* dest, int compressedSize, int maxOutputSize); +int LZ4_decompress_fast_withPrefix64k (const char* source, char* dest, int originalSize); + + +#if defined (__cplusplus) +} +#endif diff --git a/vendor/src/github.com/cloudflare/golz4/src/lz4hc.c b/vendor/src/github.com/cloudflare/golz4/src/lz4hc.c new file mode 100644 index 0000000000000..5549969c0c2f8 --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/src/lz4hc.c @@ -0,0 +1,751 @@ +/* +LZ4 HC - High Compression Mode of LZ4 +Copyright (C) 2011-2014, Yann Collet. +BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +You can contact the author at : +- LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html +- LZ4 source repository : http://code.google.com/p/lz4/ +*/ + + + +/************************************** + Tuning Parameter +**************************************/ +static const int LZ4HC_compressionLevel_default = 8; + + +/************************************** + Includes +**************************************/ +#include "lz4hc.h" + + +/************************************** + Local Compiler Options +**************************************/ +#if defined(__GNUC__) +# pragma GCC diagnostic ignored "-Wunused-function" +#endif + +#if defined (__clang__) +# pragma clang diagnostic ignored "-Wunused-function" +#endif + + +/************************************** + Common LZ4 definition +**************************************/ +#define LZ4_COMMONDEFS_ONLY +#include "lz4.c" + + +/************************************** + Local Constants +**************************************/ +#define DICTIONARY_LOGSIZE 16 +#define MAXD (1<> ((MINMATCH*8)-HASH_LOG)) +#define DELTANEXT(p) chainTable[(size_t)(p) & MAXD_MASK] +#define GETNEXT(p) ((p) - (size_t)DELTANEXT(p)) + +static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); } + + + +/************************************** + HC Compression +**************************************/ +static void LZ4HC_init (LZ4HC_Data_Structure* hc4, const BYTE* start) +{ + MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable)); + MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable)); + hc4->nextToUpdate = 64 KB; + hc4->base = start - 64 KB; + hc4->inputBuffer = start; + hc4->end = start; + hc4->dictBase = start - 64 KB; + hc4->dictLimit = 64 KB; + hc4->lowLimit = 64 KB; +} + + +/* Update chains up to ip (excluded) */ +FORCE_INLINE void LZ4HC_Insert (LZ4HC_Data_Structure* hc4, const BYTE* ip) +{ + U16* chainTable = hc4->chainTable; + U32* HashTable = hc4->hashTable; + const BYTE* const base = hc4->base; + const U32 target = (U32)(ip - base); + U32 idx = hc4->nextToUpdate; + + while(idx < target) + { + U32 h = LZ4HC_hashPtr(base+idx); + size_t delta = idx - HashTable[h]; + if (delta>MAX_DISTANCE) delta = MAX_DISTANCE; + chainTable[idx & 0xFFFF] = (U16)delta; + HashTable[h] = idx; + idx++; + } + + hc4->nextToUpdate = target; +} + + +FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_Data_Structure* hc4, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iLimit, + const BYTE** matchpos, + const int maxNbAttempts) +{ + U16* const chainTable = hc4->chainTable; + U32* const HashTable = hc4->hashTable; + const BYTE* const base = hc4->base; + const BYTE* const dictBase = hc4->dictBase; + const U32 dictLimit = hc4->dictLimit; + const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1); + U32 matchIndex; + const BYTE* match; + int nbAttempts=maxNbAttempts; + size_t ml=0; + + /* HC4 match finder */ + LZ4HC_Insert(hc4, ip); + matchIndex = HashTable[LZ4HC_hashPtr(ip)]; + + while ((matchIndex>=lowLimit) && (nbAttempts)) + { + nbAttempts--; + if (matchIndex >= dictLimit) + { + match = base + matchIndex; + if (*(match+ml) == *(ip+ml) + && (LZ4_read32(match) == LZ4_read32(ip))) + { + size_t mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, iLimit) + MINMATCH; + if (mlt > ml) { ml = mlt; *matchpos = match; } + } + } + else + { + match = dictBase + matchIndex; + if (LZ4_read32(match) == LZ4_read32(ip)) + { + size_t mlt; + const BYTE* vLimit = ip + (dictLimit - matchIndex); + if (vLimit > iLimit) vLimit = iLimit; + mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, vLimit) + MINMATCH; + if ((ip+mlt == vLimit) && (vLimit < iLimit)) + mlt += LZ4_count(ip+mlt, base+dictLimit, iLimit); + if (mlt > ml) { ml = mlt; *matchpos = base + matchIndex; } /* virtual matchpos */ + } + } + matchIndex -= chainTable[matchIndex & 0xFFFF]; + } + + return (int)ml; +} + + +FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch ( + LZ4HC_Data_Structure* hc4, + const BYTE* ip, + const BYTE* iLowLimit, + const BYTE* iHighLimit, + int longest, + const BYTE** matchpos, + const BYTE** startpos, + const int maxNbAttempts) +{ + U16* const chainTable = hc4->chainTable; + U32* const HashTable = hc4->hashTable; + const BYTE* const base = hc4->base; + const U32 dictLimit = hc4->dictLimit; + const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1); + const BYTE* const dictBase = hc4->dictBase; + const BYTE* match; + U32 matchIndex; + int nbAttempts = maxNbAttempts; + int delta = (int)(ip-iLowLimit); + + + /* First Match */ + LZ4HC_Insert(hc4, ip); + matchIndex = HashTable[LZ4HC_hashPtr(ip)]; + + while ((matchIndex>=lowLimit) && (nbAttempts)) + { + nbAttempts--; + if (matchIndex >= dictLimit) + { + match = base + matchIndex; + if (*(iLowLimit + longest) == *(match - delta + longest)) + if (LZ4_read32(match) == LZ4_read32(ip)) + { + const BYTE* startt = ip; + const BYTE* tmpMatch = match; + const BYTE* const matchEnd = ip + MINMATCH + LZ4_count(ip+MINMATCH, match+MINMATCH, iHighLimit); + + while ((startt>iLowLimit) && (tmpMatch > iLowLimit) && (startt[-1] == tmpMatch[-1])) {startt--; tmpMatch--;} + + if ((matchEnd-startt) > longest) + { + longest = (int)(matchEnd-startt); + *matchpos = tmpMatch; + *startpos = startt; + } + } + } + else + { + match = dictBase + matchIndex; + if (LZ4_read32(match) == LZ4_read32(ip)) + { + size_t mlt; + int back=0; + const BYTE* vLimit = ip + (dictLimit - matchIndex); + if (vLimit > iHighLimit) vLimit = iHighLimit; + mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, vLimit) + MINMATCH; + if ((ip+mlt == vLimit) && (vLimit < iHighLimit)) + mlt += LZ4_count(ip+mlt, base+dictLimit, iHighLimit); + while ((ip+back > iLowLimit) && (matchIndex+back > lowLimit) && (ip[back-1] == match[back-1])) back--; + mlt -= back; + if ((int)mlt > longest) { longest = (int)mlt; *matchpos = base + matchIndex + back; *startpos = ip+back; } + } + } + matchIndex -= chainTable[matchIndex & 0xFFFF]; + } + + return longest; +} + + +typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive; + +#define LZ4HC_DEBUG 0 +#if LZ4HC_DEBUG +static unsigned debug = 0; +#endif + +FORCE_INLINE int LZ4HC_encodeSequence ( + const BYTE** ip, + BYTE** op, + const BYTE** anchor, + int matchLength, + const BYTE* const match, + limitedOutput_directive limitedOutputBuffer, + BYTE* oend) +{ + int length; + BYTE* token; + +#if LZ4HC_DEBUG + if (debug) printf("literal : %u -- match : %u -- offset : %u\n", (U32)(*ip - *anchor), (U32)matchLength, (U32)(*ip-match)); +#endif + + /* Encode Literal length */ + length = (int)(*ip - *anchor); + token = (*op)++; + if ((limitedOutputBuffer) && ((*op + (length>>8) + length + (2 + 1 + LASTLITERALS)) > oend)) return 1; /* Check output limit */ + if (length>=(int)RUN_MASK) { int len; *token=(RUN_MASK< 254 ; len-=255) *(*op)++ = 255; *(*op)++ = (BYTE)len; } + else *token = (BYTE)(length<>8) + (1 + LASTLITERALS) > oend)) return 1; /* Check output limit */ + if (length>=(int)ML_MASK) { *token+=ML_MASK; length-=ML_MASK; for(; length > 509 ; length-=510) { *(*op)++ = 255; *(*op)++ = 255; } if (length > 254) { length-=255; *(*op)++ = 255; } *(*op)++ = (BYTE)length; } + else *token += (BYTE)(length); + + /* Prepare next loop */ + *ip += matchLength; + *anchor = *ip; + + return 0; +} + + +static int LZ4HC_compress_generic ( + void* ctxvoid, + const char* source, + char* dest, + int inputSize, + int maxOutputSize, + int compressionLevel, + limitedOutput_directive limit + ) +{ + LZ4HC_Data_Structure* ctx = (LZ4HC_Data_Structure*) ctxvoid; + const BYTE* ip = (const BYTE*) source; + const BYTE* anchor = ip; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimit = iend - MFLIMIT; + const BYTE* const matchlimit = (iend - LASTLITERALS); + + BYTE* op = (BYTE*) dest; + BYTE* const oend = op + maxOutputSize; + + unsigned maxNbAttempts; + int ml, ml2, ml3, ml0; + const BYTE* ref=NULL; + const BYTE* start2=NULL; + const BYTE* ref2=NULL; + const BYTE* start3=NULL; + const BYTE* ref3=NULL; + const BYTE* start0; + const BYTE* ref0; + + + /* init */ + if (compressionLevel > g_maxCompressionLevel) compressionLevel = g_maxCompressionLevel; + if (compressionLevel < 1) compressionLevel = LZ4HC_compressionLevel_default; + maxNbAttempts = 1 << (compressionLevel-1); + ctx->end += inputSize; + + ip++; + + /* Main Loop */ + while (ip < mflimit) + { + ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref), maxNbAttempts); + if (!ml) { ip++; continue; } + + /* saved, in case we would skip too much */ + start0 = ip; + ref0 = ref; + ml0 = ml; + +_Search2: + if (ip+ml < mflimit) + ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 1, matchlimit, ml, &ref2, &start2, maxNbAttempts); + else ml2 = ml; + + if (ml2 == ml) /* No better match */ + { + if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0; + continue; + } + + if (start0 < ip) + { + if (start2 < ip + ml0) /* empirical */ + { + ip = start0; + ref = ref0; + ml = ml0; + } + } + + /* Here, start0==ip */ + if ((start2 - ip) < 3) /* First Match too small : removed */ + { + ml = ml2; + ip = start2; + ref =ref2; + goto _Search2; + } + +_Search3: + /* + * Currently we have : + * ml2 > ml1, and + * ip1+3 <= ip2 (usually < ip1+ml1) + */ + if ((start2 - ip) < OPTIMAL_ML) + { + int correction; + int new_ml = ml; + if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML; + if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH; + correction = new_ml - (int)(start2 - ip); + if (correction > 0) + { + start2 += correction; + ref2 += correction; + ml2 -= correction; + } + } + /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */ + + if (start2 + ml2 < mflimit) + ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3, maxNbAttempts); + else ml3 = ml2; + + if (ml3 == ml2) /* No better match : 2 sequences to encode */ + { + /* ip & ref are known; Now for ml */ + if (start2 < ip+ml) ml = (int)(start2 - ip); + /* Now, encode 2 sequences */ + if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0; + ip = start2; + if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml2, ref2, limit, oend)) return 0; + continue; + } + + if (start3 < ip+ml+3) /* Not enough space for match 2 : remove it */ + { + if (start3 >= (ip+ml)) /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */ + { + if (start2 < ip+ml) + { + int correction = (int)(ip+ml - start2); + start2 += correction; + ref2 += correction; + ml2 -= correction; + if (ml2 < MINMATCH) + { + start2 = start3; + ref2 = ref3; + ml2 = ml3; + } + } + + if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0; + ip = start3; + ref = ref3; + ml = ml3; + + start0 = start2; + ref0 = ref2; + ml0 = ml2; + goto _Search2; + } + + start2 = start3; + ref2 = ref3; + ml2 = ml3; + goto _Search3; + } + + /* + * OK, now we have 3 ascending matches; let's write at least the first one + * ip & ref are known; Now for ml + */ + if (start2 < ip+ml) + { + if ((start2 - ip) < (int)ML_MASK) + { + int correction; + if (ml > OPTIMAL_ML) ml = OPTIMAL_ML; + if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH; + correction = ml - (int)(start2 - ip); + if (correction > 0) + { + start2 += correction; + ref2 += correction; + ml2 -= correction; + } + } + else + { + ml = (int)(start2 - ip); + } + } + if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0; + + ip = start2; + ref = ref2; + ml = ml2; + + start2 = start3; + ref2 = ref3; + ml2 = ml3; + + goto _Search3; + } + + /* Encode Last Literals */ + { + int lastRun = (int)(iend - anchor); + if ((limit) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0; /* Check output limit */ + if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK< 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; } + else *op++ = (BYTE)(lastRun<base = NULL; + ((LZ4HC_Data_Structure*)LZ4_streamHCPtr)->compressionLevel = (unsigned)compressionLevel; +} + +int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, const char* dictionary, int dictSize) +{ + LZ4HC_Data_Structure* ctxPtr = (LZ4HC_Data_Structure*) LZ4_streamHCPtr; + if (dictSize > 64 KB) + { + dictionary += dictSize - 64 KB; + dictSize = 64 KB; + } + LZ4HC_init (ctxPtr, (const BYTE*)dictionary); + if (dictSize >= 4) LZ4HC_Insert (ctxPtr, (const BYTE*)dictionary +(dictSize-3)); + ctxPtr->end = (const BYTE*)dictionary + dictSize; + return dictSize; +} + + +/* compression */ + +static void LZ4HC_setExternalDict(LZ4HC_Data_Structure* ctxPtr, const BYTE* newBlock) +{ + if (ctxPtr->end >= ctxPtr->base + 4) + LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */ + /* Only one memory segment for extDict, so any previous extDict is lost at this stage */ + ctxPtr->lowLimit = ctxPtr->dictLimit; + ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base); + ctxPtr->dictBase = ctxPtr->base; + ctxPtr->base = newBlock - ctxPtr->dictLimit; + ctxPtr->end = newBlock; + ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */ +} + +static int LZ4_compressHC_continue_generic (LZ4HC_Data_Structure* ctxPtr, + const char* source, char* dest, + int inputSize, int maxOutputSize, limitedOutput_directive limit) +{ + /* auto-init if forgotten */ + if (ctxPtr->base == NULL) + LZ4HC_init (ctxPtr, (const BYTE*) source); + + /* Check overflow */ + if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) + { + size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit; + if (dictSize > 64 KB) dictSize = 64 KB; + + LZ4_loadDictHC((LZ4_streamHC_t*)ctxPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize); + } + + /* Check if blocks follow each other */ + if ((const BYTE*)source != ctxPtr->end) LZ4HC_setExternalDict(ctxPtr, (const BYTE*)source); + + /* Check overlapping input/dictionary space */ + { + const BYTE* sourceEnd = (const BYTE*) source + inputSize; + const BYTE* dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit; + const BYTE* dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit; + if ((sourceEnd > dictBegin) && ((BYTE*)source < dictEnd)) + { + if (sourceEnd > dictEnd) sourceEnd = dictEnd; + ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase); + if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit; + } + } + + return LZ4HC_compress_generic (ctxPtr, source, dest, inputSize, maxOutputSize, ctxPtr->compressionLevel, limit); +} + +int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize) +{ + return LZ4_compressHC_continue_generic ((LZ4HC_Data_Structure*)LZ4_streamHCPtr, source, dest, inputSize, 0, noLimit); +} + +int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize) +{ + return LZ4_compressHC_continue_generic ((LZ4HC_Data_Structure*)LZ4_streamHCPtr, source, dest, inputSize, maxOutputSize, limitedOutput); +} + + +/* dictionary saving */ + +int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize) +{ + LZ4HC_Data_Structure* streamPtr = (LZ4HC_Data_Structure*)LZ4_streamHCPtr; + int prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit)); + if (dictSize > 64 KB) dictSize = 64 KB; + if (dictSize < 4) dictSize = 0; + if (dictSize > prefixSize) dictSize = prefixSize; + memcpy(safeBuffer, streamPtr->end - dictSize, dictSize); + { + U32 endIndex = (U32)(streamPtr->end - streamPtr->base); + streamPtr->end = (const BYTE*)safeBuffer + dictSize; + streamPtr->base = streamPtr->end - endIndex; + streamPtr->dictLimit = endIndex - dictSize; + streamPtr->lowLimit = endIndex - dictSize; + if (streamPtr->nextToUpdate < streamPtr->dictLimit) streamPtr->nextToUpdate = streamPtr->dictLimit; + } + return dictSize; +} + + +/*********************************** + * Deprecated Functions + ***********************************/ +int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; } + +int LZ4_resetStreamStateHC(void* state, const char* inputBuffer) +{ + if ((((size_t)state) & (sizeof(void*)-1)) != 0) return 1; /* Error : pointer is not aligned for pointer (32 or 64 bits) */ + LZ4HC_init((LZ4HC_Data_Structure*)state, (const BYTE*)inputBuffer); + return 0; +} + +void* LZ4_createHC (const char* inputBuffer) +{ + void* hc4 = ALLOCATOR(1, sizeof(LZ4HC_Data_Structure)); + LZ4HC_init ((LZ4HC_Data_Structure*)hc4, (const BYTE*)inputBuffer); + return hc4; +} + +int LZ4_freeHC (void* LZ4HC_Data) +{ + FREEMEM(LZ4HC_Data); + return (0); +} + +/* +int LZ4_compressHC_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize) +{ +return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, 0, 0, noLimit); +} +int LZ4_compressHC_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize) +{ +return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, maxOutputSize, 0, limitedOutput); +} +*/ + +int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel) +{ + return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, 0, compressionLevel, noLimit); +} + +int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel) +{ + return LZ4HC_compress_generic (LZ4HC_Data, source, dest, inputSize, maxOutputSize, compressionLevel, limitedOutput); +} + +char* LZ4_slideInputBufferHC(void* LZ4HC_Data) +{ + LZ4HC_Data_Structure* hc4 = (LZ4HC_Data_Structure*)LZ4HC_Data; + int dictSize = LZ4_saveDictHC((LZ4_streamHC_t*)LZ4HC_Data, (char*)(hc4->inputBuffer), 64 KB); + return (char*)(hc4->inputBuffer + dictSize); +} diff --git a/vendor/src/github.com/cloudflare/golz4/src/lz4hc.h b/vendor/src/github.com/cloudflare/golz4/src/lz4hc.h new file mode 100644 index 0000000000000..ce813abaa8999 --- /dev/null +++ b/vendor/src/github.com/cloudflare/golz4/src/lz4hc.h @@ -0,0 +1,174 @@ +/* + LZ4 HC - High Compression Mode of LZ4 + Header File + Copyright (C) 2011-2014, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html + - LZ4 source repository : http://code.google.com/p/lz4/ +*/ +#pragma once + + +#if defined (__cplusplus) +extern "C" { +#endif + + +int LZ4_compressHC (const char* source, char* dest, int inputSize); +/* +LZ4_compressHC : + return : the number of bytes in compressed buffer dest + or 0 if compression fails. + note : destination buffer must be already allocated. + To avoid any problem, size it to handle worst cases situations (input data not compressible) + Worst case size evaluation is provided by function LZ4_compressBound() (see "lz4.h") +*/ + +int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize); +/* +LZ4_compress_limitedOutput() : + Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. + If it cannot achieve it, compression will stop, and result of the function will be zero. + This function never writes outside of provided output buffer. + + inputSize : Max supported value is 1 GB + maxOutputSize : is maximum allowed size into the destination buffer (which must be already allocated) + return : the number of output bytes written in buffer 'dest' + or 0 if compression fails. +*/ + + +int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel); +int LZ4_compressHC2_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); +/* + Same functions as above, but with programmable 'compressionLevel'. + Recommended values are between 4 and 9, although any value between 0 and 16 will work. + 'compressionLevel'==0 means use default 'compressionLevel' value. + Values above 16 behave the same as 16. + Equivalent variants exist for all other compression functions below. +*/ + +/* Note : + Decompression functions are provided within LZ4 source code (see "lz4.h") (BSD license) +*/ + + +/************************************** + Using an external allocation +**************************************/ +int LZ4_sizeofStateHC(void); +int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize); +int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); + +int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel); +int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); + +/* +These functions are provided should you prefer to allocate memory for compression tables with your own allocation methods. +To know how much memory must be allocated for the compression tables, use : +int LZ4_sizeofStateHC(); + +Note that tables must be aligned for pointer (32 or 64 bits), otherwise compression will fail (return code 0). + +The allocated memory can be provided to the compression functions using 'void* state' parameter. +LZ4_compress_withStateHC() and LZ4_compress_limitedOutput_withStateHC() are equivalent to previously described functions. +They just use the externally allocated memory for state instead of allocating their own (on stack, or on heap). +*/ + + + +/************************************** + Experimental Streaming Functions +**************************************/ +#define LZ4_STREAMHCSIZE_U64 32774 +#define LZ4_STREAMHCSIZE (LZ4_STREAMHCSIZE_U64 * sizeof(unsigned long long)) +typedef struct { unsigned long long table[LZ4_STREAMHCSIZE_U64]; } LZ4_streamHC_t; +/* +LZ4_streamHC_t +This structure allows static allocation of LZ4 HC streaming state. +State must then be initialized using LZ4_resetStreamHC() before first use. + +Static allocation should only be used with statically linked library. +If you want to use LZ4 as a DLL, please use construction functions below, which are more future-proof. +*/ + + +LZ4_streamHC_t* LZ4_createStreamHC(void); +int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr); +/* +These functions create and release memory for LZ4 HC streaming state. +Newly created states are already initialized. +Existing state space can be re-used anytime using LZ4_resetStreamHC(). +If you use LZ4 as a DLL, please use these functions instead of direct struct allocation, +to avoid size mismatch between different versions. +*/ + +void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel); +int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, const char* dictionary, int dictSize); + +int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize); +int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize); + +int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int maxDictSize); + +/* +These functions compress data in successive blocks of any size, using previous blocks as dictionary. +One key assumption is that each previous block will remain read-accessible while compressing next block. + +Before starting compression, state must be properly initialized, using LZ4_resetStreamHC(). +A first "fictional block" can then be designated as initial dictionary, using LZ4_loadDictHC() (Optional). + +Then, use LZ4_compressHC_continue() or LZ4_compressHC_limitedOutput_continue() to compress each successive block. +They work like usual LZ4_compressHC() or LZ4_compressHC_limitedOutput(), but use previous memory blocks to improve compression. +Previous memory blocks (including initial dictionary when present) must remain accessible and unmodified during compression. + +If, for any reason, previous data block can't be preserved in memory during next compression block, +you must save it to a safer memory space, +using LZ4_saveDictHC(). +*/ + + + +/************************************** + * Deprecated Streaming Functions + * ************************************/ +/* Note : these streaming functions follows the older model, and should no longer be used */ +void* LZ4_createHC (const char* inputBuffer); +char* LZ4_slideInputBufferHC (void* LZ4HC_Data); +int LZ4_freeHC (void* LZ4HC_Data); + +int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel); +int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel); + +int LZ4_sizeofStreamStateHC(void); +int LZ4_resetStreamStateHC(void* state, const char* inputBuffer); + + +#if defined (__cplusplus) +} +#endif diff --git a/vendor/src/github.com/galaxydi/go-loghub/.travis.yml b/vendor/src/github.com/galaxydi/go-loghub/.travis.yml new file mode 100644 index 0000000000000..93cf4140647f1 --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.6 + +install: + - go get github.com/mattn/goveralls + - go get github.com/gogo/protobuf/proto + - go get github.com/cloudflare/golz4 + - go get github.com/golang/glog + - go get github.com/stretchr/testify/suite + +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/src/github.com/galaxydi/go-loghub/Makefile b/vendor/src/github.com/galaxydi/go-loghub/Makefile new file mode 100644 index 0000000000000..90049eb000481 --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/Makefile @@ -0,0 +1,16 @@ +GOPATH=$(shell go env | grep GOPATH | sed 's/GOPATH=//' | sed 's/"//g') +gogoproto="$(GOPATH)/src/github.com/gogo/protobuf/gogoproto" +protobuf="$(GOPATH)/src/github.com/gogo/protobuf/protobuf" + +all: log.pb.go + go build + +log.pb.go: log.proto + go get github.com/gogo/protobuf/proto + go get github.com/gogo/protobuf/protoc-gen-gogo + go get github.com/gogo/protobuf/gogoproto + protoc --proto_path=.:${gogoproto}:${protobuf} --gogo_out=. log.proto + +test: + go test + diff --git a/vendor/src/github.com/galaxydi/go-loghub/README.md b/vendor/src/github.com/galaxydi/go-loghub/README.md new file mode 100644 index 0000000000000..bad004cc4ebe9 --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/README.md @@ -0,0 +1,45 @@ +This is a Golang SDK for Alibaba Cloud [Log Service](https://sls.console.aliyun.com/). + +API Reference : + +* [Chinese](https://help.aliyun.com/document_detail/29007.html) +* [English](https://intl.aliyun.com/help/doc-detail/29007.htm) + +[![Build Status](https://travis-ci.org/galaxydi/go-loghub.svg?branch=master)](https://travis-ci.org/galaxydi/go-loghub) +[![Coverage Status](https://coveralls.io/repos/github/galaxydi/go-loghub/badge.svg?branch=master&foo=bar)](https://coveralls.io/github/galaxydi/go-loghub?branch=master&foo=bar) + + +# Install Instruction + +### Third Dependencies + +``` +go get github.com/cloudflare/golz4 +go get github.com/golang/glog +go get github.com/gogo/protobuf/proto +go get github.com/stretchr/testify/suite +``` + +### LogHub Golang SDK + +``` +go get github.com/galaxydi/go-loghub +``` + +# Example + +### Write and Read LogHub + +[loghub_sample.go](example/loghub/loghub_sample.go) + +### Use Index on LogHub (SLS) + +[index_sample.go](example/index/index_sample.go) + +### Create Config for Logtail + +[log_config_sample.go](example/config/log_config_sample.go) + +### Create Machine Group for Logtail + +[machine_group_sample.go](example/machine_group/machine_group_sample.go) diff --git a/vendor/src/github.com/galaxydi/go-loghub/client.go b/vendor/src/github.com/galaxydi/go-loghub/client.go new file mode 100644 index 0000000000000..8094536a41891 --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/client.go @@ -0,0 +1,134 @@ +package sls + +import ( + "encoding/json" + "fmt" +) + +// Error defines sls error +type Error struct { + Code string `json:"errorCode"` + Message string `json:"errorMessage"` + RequestID string `json:"requestID"` +} + +// NewClientError new client error +func NewClientError(message string) *Error { + err := new(Error) + err.Code = "ClientError" + err.Message = message + return err +} + +func (e Error) String() string { + b, err := json.MarshalIndent(e, "", " ") + if err != nil { + return "" + } + return string(b) +} + +func (e Error) Error() string { + return e.String() +} + +// Client ... +type Client struct { + Endpoint string // IP or hostname of SLS endpoint + AccessKeyID string + AccessKeySecret string + SecurityToken string +} + +func convert(c *Client, projName string) *LogProject { + return &LogProject{ + Name: projName, + Endpoint: c.Endpoint, + AccessKeyID: c.AccessKeyID, + AccessKeySecret: c.AccessKeySecret, + SecurityToken: c.SecurityToken, + } +} + +// CreateProject create a new loghub project. +func (c *Client) CreateProject(name, description string) (*LogProject, error) { + type Body struct { + ProjectName string `json:"projectName"` + Description string `json:"description"` + } + body, err := json.Marshal(Body{ + ProjectName: name, + Description: description, + }) + if err != nil { + return nil, err + } + + h := map[string]string{ + "x-log-bodyrawsize": fmt.Sprintf("%d", len(body)), + "Content-Type": "application/json", + "Accept-Encoding": "deflate", // TODO: support lz4 + } + + uri := "/" + proj := convert(c, name) + _, err = request(proj, "POST", uri, h, body) + if err != nil { + return nil, err + } + + return proj, nil +} + +// GetProject ... +func (c *Client) GetProject(name string) (*LogProject, error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + + uri := "/" + proj := convert(c, name) + _, err := request(proj, "GET", uri, h, nil) + if err != nil { + return nil, err + } + + return proj, nil +} + +// CheckProjectExist check project exist or not +func (c *Client) CheckProjectExist(name string) (bool, error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + uri := "/" + proj := convert(c, name) + _, err := request(proj, "GET", uri, h, nil) + if err != nil { + if _, ok := err.(*Error); ok { + slsErr := err.(*Error) + if slsErr.Code == "ProjectNotExist" { + return false, nil + } + return false, slsErr + } + return false, err + } + return true, nil +} + +// DeleteProject ... +func (c *Client) DeleteProject(name string) error { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + + proj := convert(c, name) + uri := "/" + _, err := request(proj, "DELETE", uri, h, nil) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/src/github.com/galaxydi/go-loghub/config.go b/vendor/src/github.com/galaxydi/go-loghub/config.go new file mode 100644 index 0000000000000..0152040c9edfa --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/config.go @@ -0,0 +1,19 @@ +package sls + +const ( + version = "0.6.0" // SDK version + signatureMethod = "hmac-sha1" // Signature method + + // OffsetNewest stands for the log head offset, i.e. the offset that will be + // assigned to the next message that will be produced to the shard. + OffsetNewest = "end" + // OffsetOldest stands for the oldest offset available on the logstore for a + // shard. + OffsetOldest = "begin" + + // ProgressHeader stands for the progress header in GetLogs response + ProgressHeader = "X-Log-Progress" + + // GetLogsCountHeader stands for the count header in GetLogs response + GetLogsCountHeader = "X-Log-Count" +) diff --git a/vendor/src/github.com/galaxydi/go-loghub/log.pb.go b/vendor/src/github.com/galaxydi/go-loghub/log.pb.go new file mode 100644 index 0000000000000..948aeed6fcc08 --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/log.pb.go @@ -0,0 +1,1034 @@ +package sls + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// discarding unused import gogoproto "." + +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + +import io "io" + +var ( + // ErrInvalidLengthLog invalid log length + ErrInvalidLengthLog = fmt.Errorf("proto: negative length found during unmarshaling") + + // ErrIntOverflowLog overflow err + ErrIntOverflowLog = fmt.Errorf("proto: integer overflow") +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Log defines SLS Log struct +type Log struct { + Time *uint32 `protobuf:"varint,1,req,name=Time" json:"Time,omitempty"` + Contents []*LogContent `protobuf:"bytes,2,rep,name=Contents" json:"Contents,omitempty"` + XXXUnrecognized []byte `json:"-"` +} + +// Reset the Log +func (m *Log) Reset() { *m = Log{} } +func (m *Log) String() string { return proto.CompactTextString(m) } + +// ProtoMessage empty implement +func (*Log) ProtoMessage() {} + +// GetTime get log timestamp +func (m *Log) GetTime() uint32 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +// GetContents get log's contents +func (m *Log) GetContents() []*LogContent { + if m != nil { + return m.Contents + } + return nil +} + +// LogContent defines log content in SLS +type LogContent struct { + Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,2,req,name=Value" json:"Value,omitempty"` + XXXUnrecognized []byte `json:"-"` +} + +// Reset set empty log content +func (m *LogContent) Reset() { *m = LogContent{} } +func (m *LogContent) String() string { return proto.CompactTextString(m) } + +// ProtoMessage empty implement +func (*LogContent) ProtoMessage() {} + +// GetKey get log content's key +func (m *LogContent) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +// GetValue get log content's value +func (m *LogContent) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +// LogGroup defines log group in SLS +type LogGroup struct { + Logs []*Log `protobuf:"bytes,1,rep,name=Logs" json:"Logs,omitempty"` + Reserved *string `protobuf:"bytes,2,opt,name=Reserved" json:"Reserved,omitempty"` + Topic *string `protobuf:"bytes,3,opt,name=Topic" json:"Topic,omitempty"` + Source *string `protobuf:"bytes,4,opt,name=Source" json:"Source,omitempty"` + XXXUnrecognized []byte `json:"-"` +} + +// Reset set empty log group +func (m *LogGroup) Reset() { *m = LogGroup{} } +func (m *LogGroup) String() string { return proto.CompactTextString(m) } + +// ProtoMessage empty implement +func (*LogGroup) ProtoMessage() {} + +// GetLogs get log group's logs +func (m *LogGroup) GetLogs() []*Log { + if m != nil { + return m.Logs + } + return nil +} + +// GetReserved get reserved field +func (m *LogGroup) GetReserved() string { + if m != nil && m.Reserved != nil { + return *m.Reserved + } + return "" +} + +// GetTopic get topic +func (m *LogGroup) GetTopic() string { + if m != nil && m.Topic != nil { + return *m.Topic + } + return "" +} + +// GetSource get source ip +func (m *LogGroup) GetSource() string { + if m != nil && m.Source != nil { + return *m.Source + } + return "" +} + +// LogGroupList defines log group list +type LogGroupList struct { + LogGroups []*LogGroup `protobuf:"bytes,1,rep,name=logGroups" json:"logGroups,omitempty"` + XXXUnrecognized []byte `json:"-"` +} + +// Reset set empty log group list +func (m *LogGroupList) Reset() { *m = LogGroupList{} } +func (m *LogGroupList) String() string { return proto.CompactTextString(m) } + +// ProtoMessage empty implement +func (*LogGroupList) ProtoMessage() {} + +// GetLogGroups from log group list +func (m *LogGroupList) GetLogGroups() []*LogGroup { + if m != nil { + return m.LogGroups + } + return nil +} + +// Marshal the Log struct +func (m *Log) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +// MarshalTo another Marshal method +func (m *Log) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Time == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Time") + } + data[i] = 0x8 + i++ + i = encodeVarintLog(data, i, uint64(*m.Time)) + if len(m.Contents) > 0 { + for _, msg := range m.Contents { + data[i] = 0x12 + i++ + i = encodeVarintLog(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXXUnrecognized != nil { + i += copy(data[i:], m.XXXUnrecognized) + } + return i, nil +} + +// Marshal LogContent +func (m *LogContent) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +// MarshalTo return a int with Marshal +func (m *LogContent) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Key == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Key") + } + + data[i] = 0xa + i++ + i = encodeVarintLog(data, i, uint64(len(*m.Key))) + i += copy(data[i:], *m.Key) + + if m.Value == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Value") + } + + data[i] = 0x12 + i++ + i = encodeVarintLog(data, i, uint64(len(*m.Value))) + i += copy(data[i:], *m.Value) + + if m.XXXUnrecognized != nil { + i += copy(data[i:], m.XXXUnrecognized) + } + return i, nil +} + +// Marshal LogGroup +func (m *LogGroup) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +// MarshalTo LogGroup +func (m *LogGroup) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Logs) > 0 { + for _, msg := range m.Logs { + data[i] = 0xa + i++ + i = encodeVarintLog(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Reserved != nil { + data[i] = 0x12 + i++ + i = encodeVarintLog(data, i, uint64(len(*m.Reserved))) + i += copy(data[i:], *m.Reserved) + } + if m.Topic != nil { + data[i] = 0x1a + i++ + i = encodeVarintLog(data, i, uint64(len(*m.Topic))) + i += copy(data[i:], *m.Topic) + } + if m.Source != nil { + data[i] = 0x22 + i++ + i = encodeVarintLog(data, i, uint64(len(*m.Source))) + i += copy(data[i:], *m.Source) + } + if m.XXXUnrecognized != nil { + i += copy(data[i:], m.XXXUnrecognized) + } + return i, nil +} + +// Marshal LogGroupList +func (m *LogGroupList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +// MarshalTo marshal LogGroupList +func (m *LogGroupList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.LogGroups) > 0 { + for _, msg := range m.LogGroups { + data[i] = 0xa + i++ + i = encodeVarintLog(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXXUnrecognized != nil { + i += copy(data[i:], m.XXXUnrecognized) + } + return i, nil +} + +func encodeFixed64Log(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Log(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintLog(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} + +// Size get log size +func (m *Log) Size() (n int) { + var l int + _ = l + if m.Time != nil { + n += 1 + sovLog(uint64(*m.Time)) + } + if len(m.Contents) > 0 { + for _, e := range m.Contents { + l = e.Size() + n += 1 + l + sovLog(uint64(l)) + } + } + if m.XXXUnrecognized != nil { + n += len(m.XXXUnrecognized) + } + return n +} + +// Size get LogContent size +func (m *LogContent) Size() (n int) { + var l int + _ = l + if m.Key != nil { + l = len(*m.Key) + n += 1 + l + sovLog(uint64(l)) + } + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovLog(uint64(l)) + } + if m.XXXUnrecognized != nil { + n += len(m.XXXUnrecognized) + } + return n +} + +// Size get LogGroup size +func (m *LogGroup) Size() (n int) { + var l int + _ = l + if len(m.Logs) > 0 { + for _, e := range m.Logs { + l = e.Size() + n += 1 + l + sovLog(uint64(l)) + } + } + if m.Reserved != nil { + l = len(*m.Reserved) + n += 1 + l + sovLog(uint64(l)) + } + if m.Topic != nil { + l = len(*m.Topic) + n += 1 + l + sovLog(uint64(l)) + } + if m.Source != nil { + l = len(*m.Source) + n += 1 + l + sovLog(uint64(l)) + } + if m.XXXUnrecognized != nil { + n += len(m.XXXUnrecognized) + } + return n +} + +// Size get LogGroupList size +func (m *LogGroupList) Size() (n int) { + var l int + _ = l + if len(m.LogGroups) > 0 { + for _, e := range m.LogGroups { + l = e.Size() + n += 1 + l + sovLog(uint64(l)) + } + } + if m.XXXUnrecognized != nil { + n += len(m.XXXUnrecognized) + } + return n +} + +func sovLog(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozLog(x uint64) (n int) { + return sovLog(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// Unmarshal Log +func (m *Log) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Log: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Log: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Time = &v + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Contents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Contents = append(m.Contents, &LogContent{}) + if err := m.Contents[len(m.Contents)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLog(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLog + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Time") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +// Unmarshal LogContent +func (m *LogContent) Unmarshal(data []byte) error { + var hasFields [1]uint64 + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Content: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Content: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Key = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + hasFields[0] |= uint64(0x00000002) + default: + iNdEx = preIndex + skippy, err := skipLog(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLog + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Key") + } + if hasFields[0]&uint64(0x00000002) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Value") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +// Unmarshal LogGroup +func (m *LogGroup) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logs = append(m.Logs, &Log{}) + if err := m.Logs[len(m.Logs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reserved", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Reserved = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Topic = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.Source = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLog(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLog + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +// Unmarshal LogGroupList +func (m *LogGroupList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogGroupList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogGroupList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLog + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLog + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogGroups = append(m.LogGroups, &LogGroup{}) + if err := m.LogGroups[len(m.LogGroups)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLog(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLog + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLog(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthLog + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLog + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipLog(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/src/github.com/galaxydi/go-loghub/log.proto b/vendor/src/github.com/galaxydi/go-loghub/log.proto new file mode 100644 index 0000000000000..d227a96f3ec1c --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/log.proto @@ -0,0 +1,31 @@ +package sls; + +import "gogo.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +message Log +{ + required uint32 Time = 1;// UNIX Time Format + message Content + { + required string Key = 1; + required string Value = 2; + } + repeated Content Contents= 2; + +} +message LogGroup +{ + repeated Log Logs= 1; + optional string Reserved = 2; // reserved fields + optional string Topic = 3; + optional string Source = 4; +} + +message LogGroupList +{ + repeated LogGroup logGroups = 1; +} diff --git a/vendor/src/github.com/galaxydi/go-loghub/log_config.go b/vendor/src/github.com/galaxydi/go-loghub/log_config.go new file mode 100644 index 0000000000000..d988ff9c2d2c4 --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/log_config.go @@ -0,0 +1,43 @@ +package sls + +// InputDetail defines log_config input +type InputDetail struct { + LogType string `json:"logType"` + LogPath string `json:"logPath"` + FilePattern string `json:"filePattern"` + LocalStorage bool `json:"localStorage"` + TimeKey string `json:"timeKey"` + TimeFormat string `json:"timeFormat"` + LogBeginRegex string `json:"logBeginRegex"` + Regex string `json:"regex"` + Keys []string `json:"key"` + FilterKeys []string `json:"filterKey"` + FilterRegex []string `json:"filterRegex"` + TopicFormat string `json:"topicFormat"` +} + +// OutputDetail defines output +type OutputDetail struct { + ProjectName string `json:"projectName"` + LogStoreName string `json:"logstoreName"` +} + +// LogConfig defines log config +type LogConfig struct { + Name string `json:"configName"` + InputType string `json:"inputType"` + InputDetail InputDetail `json:"inputDetail"` + OutputType string `json:"outputType"` + OutputDetail OutputDetail `json:"outputDetail"` + + CreateTime uint32 + LastModifyTime uint32 + + project *LogProject +} + +// GetAppliedMachineGroup returns applied machine group of this config. +func (c *LogConfig) GetAppliedMachineGroup(confName string) (groupNames []string, err error) { + groupNames, err = c.project.GetAppliedMachineGroups(c.Name) + return +} diff --git a/vendor/src/github.com/galaxydi/go-loghub/log_project.go b/vendor/src/github.com/galaxydi/go-loghub/log_project.go new file mode 100644 index 0000000000000..6ee287dff3134 --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/log_project.go @@ -0,0 +1,598 @@ +package sls + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +// LogProject defines log project +type LogProject struct { + Name string // Project name + Endpoint string // IP or hostname of SLS endpoint + AccessKeyID string + AccessKeySecret string + SecurityToken string +} + +// NewLogProject creates a new SLS project. +func NewLogProject(name, endpoint, accessKeyID, accessKeySecret string) (p *LogProject, err error) { + p = &LogProject{ + Name: name, + Endpoint: endpoint, + AccessKeyID: accessKeyID, + AccessKeySecret: accessKeySecret, + } + return p, nil +} + +// WithToken add token parameter +func (p *LogProject) WithToken(token string) (*LogProject, error) { + p.SecurityToken = token + return p, nil +} + +// ListLogStore returns all logstore names of project p. +func (p *LogProject) ListLogStore() ([]string, error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + + uri := fmt.Sprintf("/logstores") + r, err := request(p, "GET", uri, h, nil) + if err != nil { + return nil, NewClientError(err.Error()) + } + + buf, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(buf, err) + return nil, err + } + + type Body struct { + Count int + LogStores []string + } + body := &Body{} + json.Unmarshal(buf, body) + storeNames := body.LogStores + return storeNames, nil +} + +// GetLogStore returns logstore according by logstore name. +func (p *LogProject) GetLogStore(name string) (*LogStore, error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + + r, err := request(p, "GET", "/logstores/"+name, h, nil) + if err != nil { + return nil, NewClientError(err.Error()) + } + + buf, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(buf, err) + return nil, err + } + + s := &LogStore{} + json.Unmarshal(buf, s) + s.Name = name + s.project = p + return s, nil +} + +// CreateLogStore creates a new logstore in SLS, +// where name is logstore name, +// and ttl is time-to-live(in day) of logs, +// and shardCnt is the number of shards. +func (p *LogProject) CreateLogStore(name string, ttl, shardCnt int) error { + type Body struct { + Name string `json:"logstoreName"` + TTL int `json:"ttl"` + ShardCount int `json:"shardCount"` + } + store := &Body{ + Name: name, + TTL: ttl, + ShardCount: shardCnt, + } + body, err := json.Marshal(store) + if err != nil { + return NewClientError(err.Error()) + } + + h := map[string]string{ + "x-log-bodyrawsize": fmt.Sprintf("%v", len(body)), + "Content-Type": "application/json", + "Accept-Encoding": "deflate", // TODO: support lz4 + } + + r, err := request(p, "POST", "/logstores", h, body) + if err != nil { + return NewClientError(err.Error()) + } + + body, _ = ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return err + } + return nil +} + +// DeleteLogStore deletes a logstore according by logstore name. +func (p *LogProject) DeleteLogStore(name string) (err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + + r, err := request(p, "DELETE", "/logstores/"+name, h, nil) + if err != nil { + return NewClientError(err.Error()) + } + + body, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return err + } + return nil +} + +// UpdateLogStore updates a logstore according by logstore name, +// obviously we can't modify the logstore name itself. +func (p *LogProject) UpdateLogStore(name string, ttl, shardCnt int) (err error) { + type Body struct { + Name string `json:"logstoreName"` + TTL int `json:"ttl"` + ShardCount int `json:"shardCount"` + } + store := &Body{ + Name: name, + TTL: ttl, + ShardCount: shardCnt, + } + body, err := json.Marshal(store) + if err != nil { + return NewClientError(err.Error()) + } + + h := map[string]string{ + "x-log-bodyrawsize": fmt.Sprintf("%v", len(body)), + "Content-Type": "application/json", + "Accept-Encoding": "deflate", // TODO: support lz4 + } + r, err := request(p, "PUT", "/logstores/"+name, h, body) + if err != nil { + return NewClientError(err.Error()) + } + + body, _ = ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return err + } + return nil +} + +// ListMachineGroup returns machine group name list and the total number of machine groups. +// The offset starts from 0 and the size is the max number of machine groups could be returned. +func (p *LogProject) ListMachineGroup(offset, size int) (m []string, total int, err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + if size <= 0 { + size = 500 + } + uri := fmt.Sprintf("/machinegroups?offset=%v&size=%v", offset, size) + r, err := request(p, "GET", uri, h, nil) + if err != nil { + return nil, 0, NewClientError(err.Error()) + } + + buf, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(buf, err) + return nil, 0, err + } + + type Body struct { + MachineGroups []string + Count int + Total int + } + body := &Body{} + json.Unmarshal(buf, body) + m = body.MachineGroups + total = body.Total + return m, total, nil +} + +// CheckLogstoreExist check logstore exist or not +func (p *LogProject) CheckLogstoreExist(name string) (bool, error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + _, err := request(p, "GET", "/logstores/"+name, h, nil) + if err != nil { + if _, ok := err.(*Error); ok { + slsErr := err.(*Error) + if slsErr.Code == "LogStoreNotExist" { + return false, nil + } + return false, slsErr + } + return false, err + } + return true, nil +} + +// CheckMachineGroupExist check machine group exist or not +func (p *LogProject) CheckMachineGroupExist(name string) (bool, error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + _, err := request(p, "GET", "/machinegroups/"+name, h, nil) + + if err != nil { + if _, ok := err.(*Error); ok { + slsErr := err.(*Error) + if slsErr.Code == "MachineGroupNotExist" { + return false, nil + } + return false, slsErr + } + return false, err + } + return true, nil +} + +// GetMachineGroup retruns machine group according by machine group name. +func (p *LogProject) GetMachineGroup(name string) (m *MachineGroup, err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + resp, err := request(p, "GET", "/machinegroups/"+name, h, nil) + if err != nil { + return nil, NewClientError(err.Error()) + } + + buf, _ := ioutil.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(buf, err) + return nil, err + } + + m = new(MachineGroup) + json.Unmarshal(buf, m) + m.project = p + return m, nil +} + +// CreateMachineGroup creates a new machine group in SLS. +func (p *LogProject) CreateMachineGroup(m *MachineGroup) error { + body, err := json.Marshal(m) + if err != nil { + return NewClientError(err.Error()) + } + + h := map[string]string{ + "x-log-bodyrawsize": fmt.Sprintf("%v", len(body)), + "Content-Type": "application/json", + "Accept-Encoding": "deflate", // TODO: support lz4 + } + resp, err := request(p, "POST", "/machinegroups", h, body) + if err != nil { + return NewClientError(err.Error()) + } + + body, _ = ioutil.ReadAll(resp.Body) + if resp.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return err + } + return nil +} + +// UpdateMachineGroup updates a machine group. +func (p *LogProject) UpdateMachineGroup(m *MachineGroup) (err error) { + body, err := json.Marshal(m) + if err != nil { + return NewClientError(err.Error()) + } + + h := map[string]string{ + "x-log-bodyrawsize": fmt.Sprintf("%v", len(body)), + "Content-Type": "application/json", + "Accept-Encoding": "deflate", // TODO: support lz4 + } + r, err := request(p, "PUT", "/machinegroups/"+m.Name, h, body) + if err != nil { + return NewClientError(err.Error()) + } + + body, _ = ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return err + } + return nil +} + +// DeleteMachineGroup deletes machine group according machine group name. +func (p *LogProject) DeleteMachineGroup(name string) (err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + r, err := request(p, "DELETE", "/machinegroups/"+name, h, nil) + if err != nil { + return NewClientError(err.Error()) + } + + body, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return err + } + + return nil +} + +// ListConfig returns config names list and the total number of configs. +// The offset starts from 0 and the size is the max number of configs could be returned. +func (p *LogProject) ListConfig(offset, size int) (cfgNames []string, total int, err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + if size <= 0 { + size = 100 + } + uri := fmt.Sprintf("/configs?offset=%v&size=%v", offset, size) + r, err := request(p, "GET", uri, h, nil) + if err != nil { + return nil, 0, NewClientError(err.Error()) + } + + buf, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(buf, err) + return nil, 0, err + } + + type Body struct { + Total int + Configs []string + } + body := &Body{} + json.Unmarshal(buf, body) + cfgNames = body.Configs + total = body.Total + return cfgNames, total, nil +} + +// CheckConfigExist check config exist or not +func (p *LogProject) CheckConfigExist(name string) (bool, error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + _, err := request(p, "GET", "/configs/"+name, h, nil) + if err != nil { + if _, ok := err.(*Error); ok { + slsErr := err.(*Error) + if slsErr.Code == "ConfigNotExist" { + return false, nil + } + return false, slsErr + } + return false, err + } + return true, nil +} + +// GetConfig returns config according by config name. +func (p *LogProject) GetConfig(name string) (c *LogConfig, err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + r, err := request(p, "GET", "/configs/"+name, h, nil) + if err != nil { + return nil, NewClientError(err.Error()) + } + + buf, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(buf, err) + return nil, err + } + + c = &LogConfig{} + json.Unmarshal(buf, c) + c.project = p + return c, nil +} + +// UpdateConfig updates a config. +func (p *LogProject) UpdateConfig(c *LogConfig) (err error) { + body, err := json.Marshal(c) + if err != nil { + return NewClientError(err.Error()) + } + + h := map[string]string{ + "x-log-bodyrawsize": fmt.Sprintf("%v", len(body)), + "Content-Type": "application/json", + "Accept-Encoding": "deflate", // TODO: support lz4 + } + r, err := request(p, "PUT", "/configs/"+c.Name, h, body) + if err != nil { + return NewClientError(err.Error()) + } + + body, _ = ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return err + } + return nil +} + +// CreateConfig creates a new config in SLS. +func (p *LogProject) CreateConfig(c *LogConfig) (err error) { + body, err := json.Marshal(c) + if err != nil { + return NewClientError(err.Error()) + } + + h := map[string]string{ + "x-log-bodyrawsize": fmt.Sprintf("%v", len(body)), + "Content-Type": "application/json", + "Accept-Encoding": "deflate", // TODO: support lz4 + } + r, err := request(p, "POST", "/configs", h, body) + if err != nil { + return NewClientError(err.Error()) + } + + body, err = ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return err + } + return nil +} + +// DeleteConfig deletes a config according by config name. +func (p *LogProject) DeleteConfig(name string) (err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + r, err := request(p, "DELETE", "/configs/"+name, h, nil) + if err != nil { + return NewClientError(err.Error()) + } + + body, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return err + } + + return nil +} + +// GetAppliedMachineGroups returns applied machine group names list according config name. +func (p *LogProject) GetAppliedMachineGroups(confName string) (groupNames []string, err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + uri := fmt.Sprintf("/configs/%v/machinegroups", confName) + r, err := request(p, "GET", uri, h, nil) + if err != nil { + return nil, NewClientError(err.Error()) + } + + buf, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(buf, err) + return nil, err + } + + type Body struct { + Count int + Machinegroups []string + } + body := &Body{} + json.Unmarshal(buf, body) + groupNames = body.Machinegroups + return groupNames, nil +} + +// GetAppliedConfigs returns applied config names list according machine group name groupName. +func (p *LogProject) GetAppliedConfigs(groupName string) (confNames []string, err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + uri := fmt.Sprintf("/machinegroups/%v/configs", groupName) + r, err := request(p, "GET", uri, h, nil) + if err != nil { + return nil, NewClientError(err.Error()) + } + + buf, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(buf, err) + return nil, err + } + + type Cfg struct { + Count int `json:"count"` + Configs []string `json:"configs"` + } + body := &Cfg{} + json.Unmarshal(buf, body) + confNames = body.Configs + return confNames, nil +} + +// ApplyConfigToMachineGroup applies config to machine group. +func (p *LogProject) ApplyConfigToMachineGroup(confName, groupName string) (err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + uri := fmt.Sprintf("/machinegroups/%v/configs/%v", groupName, confName) + r, err := request(p, "PUT", uri, h, nil) + if err != nil { + return NewClientError(err.Error()) + } + + buf, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(buf, err) + return err + } + + return nil +} + +// RemoveConfigFromMachineGroup removes config from machine group. +func (p *LogProject) RemoveConfigFromMachineGroup(confName, groupName string) (err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + uri := fmt.Sprintf("/machinegroups/%v/configs/%v", groupName, confName) + r, err := request(p, "DELETE", uri, h, nil) + if err != nil { + return NewClientError(err.Error()) + } + + buf, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(buf, err) + return err + } + + return nil +} diff --git a/vendor/src/github.com/galaxydi/go-loghub/log_store.go b/vendor/src/github.com/galaxydi/go-loghub/log_store.go new file mode 100644 index 0000000000000..ec52b2996549e --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/log_store.go @@ -0,0 +1,436 @@ +package sls + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httputil" + "strconv" + + lz4 "github.com/cloudflare/golz4" + "github.com/gogo/protobuf/proto" + "github.com/golang/glog" +) + +// LogStore defines LogStore struct +type LogStore struct { + Name string `json:"logstoreName"` + TTL int + ShardCount int + + CreateTime uint32 + LastModifyTime uint32 + + project *LogProject +} + +// Shard defines shard struct +type Shard struct { + ShardID int `json:"shardID"` +} + +// ListShards returns shard id list of this logstore. +func (s *LogStore) ListShards() (shardIDs []int, err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + uri := fmt.Sprintf("/logstores/%v/shards", s.Name) + r, err := request(s.project, "GET", uri, h, nil) + if err != nil { + return nil, NewClientError(err.Error()) + } + buf, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := &Error{} + json.Unmarshal(buf, err) + return nil, err + } + + var shards []*Shard + json.Unmarshal(buf, &shards) + for _, v := range shards { + shardIDs = append(shardIDs, v.ShardID) + } + return shardIDs, nil +} + +// PutLogs put logs into logstore. +// The callers should transform user logs into LogGroup. +func (s *LogStore) PutLogs(lg *LogGroup) (err error) { + if len(lg.Logs) == 0 { + // empty log group + return nil + } + + body, err := proto.Marshal(lg) + if err != nil { + return NewClientError(err.Error()) + } + + // Compresse body with lz4 + out := make([]byte, lz4.CompressBound(body)) + n, err := lz4.Compress(body, out) + if err != nil { + return NewClientError(err.Error()) + } + + h := map[string]string{ + "x-log-compresstype": "lz4", + "x-log-bodyrawsize": fmt.Sprintf("%v", len(body)), + "Content-Type": "application/x-protobuf", + } + + uri := fmt.Sprintf("/logstores/%v", s.Name) + r, err := request(s.project, "POST", uri, h, out[:n]) + if err != nil { + return NewClientError(err.Error()) + } + + body, _ = ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return err + } + return nil +} + +// GetCursor gets log cursor of one shard specified by shardId. +// The from can be in three form: a) unix timestamp in seccond, b) "begin", c) "end". +// For more detail please read: http://gitlab.alibaba-inc.com/sls/doc/blob/master/api/shard.md#logstore +func (s *LogStore) GetCursor(shardID int, from string) (cursor string, err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + uri := fmt.Sprintf("/logstores/%v/shards/%v?type=cursor&from=%v", + s.Name, shardID, from) + r, err := request(s.project, "GET", uri, h, nil) + if err != nil { + return + } + + buf, err := ioutil.ReadAll(r.Body) + if err != nil { + return + } + + if r.StatusCode != http.StatusOK { + errMsg := &Error{} + err = json.Unmarshal(buf, errMsg) + if err != nil { + err = fmt.Errorf("failed to get cursor") + dump, _ := httputil.DumpResponse(r, true) + if glog.V(1) { + glog.Error(string(dump)) + } + return + } + err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) + return + } + + type Body struct { + Cursor string + } + body := &Body{} + + err = json.Unmarshal(buf, body) + if err != nil { + return + } + cursor = body.Cursor + return +} + +// GetLogsBytes gets logs binary data from shard specified by shardId according cursor and endCursor. +// The logGroupMaxCount is the max number of logGroup could be returned. +// The nextCursor is the next curosr can be used to read logs at next time. +func (s *LogStore) GetLogsBytes(shardID int, cursor, endCursor string, + logGroupMaxCount int) (out []byte, nextCursor string, err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + "Accept": "application/x-protobuf", + "Accept-Encoding": "lz4", + } + + uri := "" + if endCursor == "" { + uri = fmt.Sprintf("/logstores/%v/shards/%v?type=logs&cursor=%v&count=%v", + s.Name, shardID, cursor, logGroupMaxCount) + } else { + uri = fmt.Sprintf("/logstores/%v/shards/%v?type=logs&cursor=%v&end_cursor=%v&count=%v", + s.Name, shardID, cursor, endCursor, logGroupMaxCount) + } + + r, err := request(s.project, "GET", uri, h, nil) + if err != nil { + return + } + + buf, err := ioutil.ReadAll(r.Body) + if err != nil { + return + } + + if r.StatusCode != http.StatusOK { + errMsg := &Error{} + err = json.Unmarshal(buf, errMsg) + if err != nil { + err = fmt.Errorf("failed to get cursor") + dump, _ := httputil.DumpResponse(r, true) + if glog.V(1) { + glog.Error(string(dump)) + } + return + } + err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) + return + } + v, ok := r.Header["X-Log-Compresstype"] + if !ok || len(v) == 0 { + err = fmt.Errorf("can't find 'x-log-compresstype' header") + return + } + if v[0] != "lz4" { + err = fmt.Errorf("unexpected compress type:%v", v[0]) + return + } + + v, ok = r.Header["X-Log-Cursor"] + if !ok || len(v) == 0 { + err = fmt.Errorf("can't find 'x-log-cursor' header") + return + } + nextCursor = v[0] + + v, ok = r.Header["X-Log-Bodyrawsize"] + if !ok || len(v) == 0 { + err = fmt.Errorf("can't find 'x-log-bodyrawsize' header") + return + } + bodyRawSize, err := strconv.Atoi(v[0]) + if err != nil { + return + } + + out = make([]byte, bodyRawSize) + err = lz4.Uncompress(buf, out) + if err != nil { + return + } + + return +} + +// LogsBytesDecode decodes logs binary data returned by GetLogsBytes API +func LogsBytesDecode(data []byte) (gl *LogGroupList, err error) { + + gl = &LogGroupList{} + err = proto.Unmarshal(data, gl) + if err != nil { + return nil, err + } + + return gl, nil +} + +// PullLogs gets logs from shard specified by shardId according cursor and endCursor. +// The logGroupMaxCount is the max number of logGroup could be returned. +// The nextCursor is the next cursor can be used to read logs at next time. +func (s *LogStore) PullLogs(shardID int, cursor, endCursor string, + logGroupMaxCount int) (gl *LogGroupList, nextCursor string, err error) { + + out, nextCursor, err := s.GetLogsBytes(shardID, cursor, endCursor, logGroupMaxCount) + if err != nil { + return nil, "", err + } + + gl, err = LogsBytesDecode(out) + if err != nil { + return nil, "", err + } + + return gl, nextCursor, nil +} + +// GetHistograms query logs with [from, to) time range +func (s *LogStore) GetHistograms(topic string, from int64, to int64, queryExp string) (*GetHistogramsResponse, error) { + + h := map[string]string{ + "x-log-bodyrawsize": "0", + "Accept": "application/json", + } + + uri := fmt.Sprintf("/logstores/%v?type=histogram&topic=%v&from=%v&to=%v&query=%v", s.Name, topic, from, to, queryExp) + + r, err := request(s.project, "GET", uri, h, nil) + if err != nil { + return nil, NewClientError(err.Error()) + } + + body, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return nil, err + } + + histograms := []SingleHistogram{} + err = json.Unmarshal(body, &histograms) + if err != nil { + return nil, err + } + + count, err := strconv.ParseInt(r.Header[GetLogsCountHeader][0], 10, 32) + if err != nil { + return nil, err + } + getHistogramsResponse := GetHistogramsResponse{ + Progress: r.Header[ProgressHeader][0], + Count: count, + Histograms: histograms, + } + + return &getHistogramsResponse, nil +} + +// GetLogs query logs with [from, to) time range +func (s *LogStore) GetLogs(topic string, from int64, to int64, queryExp string, + maxLineNum int64, offset int64, reverse bool) (*GetLogsResponse, error) { + + h := map[string]string{ + "x-log-bodyrawsize": "0", + "Accept": "application/json", + } + + uri := fmt.Sprintf("/logstores/%v?type=log&topic=%v&from=%v&to=%v&query=%v&line=%v&offset=%v&reverse=%v", s.Name, topic, from, to, queryExp, maxLineNum, offset, reverse) + + r, err := request(s.project, "GET", uri, h, nil) + if err != nil { + return nil, NewClientError(err.Error()) + } + + body, _ := ioutil.ReadAll(r.Body) + if r.StatusCode != http.StatusOK { + err := new(Error) + json.Unmarshal(body, err) + return nil, err + } + + logs := []map[string]string{} + err = json.Unmarshal(body, &logs) + if err != nil { + return nil, err + } + + count, err := strconv.ParseInt(r.Header[GetLogsCountHeader][0], 10, 32) + if err != nil { + return nil, err + } + + getLogsResponse := GetLogsResponse{ + Progress: r.Header[ProgressHeader][0], + Count: count, + Logs: logs, + } + + return &getLogsResponse, nil +} + +// CreateIndex ... +func (s *LogStore) CreateIndex(index Index) error { + body, err := json.Marshal(index) + if err != nil { + return err + } + + h := map[string]string{ + "x-log-bodyrawsize": fmt.Sprintf("%v", len(body)), + "Content-Type": "application/json", + "Accept-Encoding": "deflate", // TODO: support lz4 + } + + uri := fmt.Sprintf("/logstores/%s/index", s.Name) + _, err = request(s.project, "POST", uri, h, body) + return err +} + +// UpdateIndex ... +func (s *LogStore) UpdateIndex(index Index) error { + body, err := json.Marshal(index) + if err != nil { + return err + } + + h := map[string]string{ + "x-log-bodyrawsize": fmt.Sprintf("%v", len(body)), + "Content-Type": "application/json", + "Accept-Encoding": "deflate", // TODO: support lz4 + } + + uri := fmt.Sprintf("/logstores/%s/index", s.Name) + _, err = request(s.project, "PUT", uri, h, body) + return err +} + +// DeleteIndex ... +func (s *LogStore) DeleteIndex() error { + type Body struct { + project string `json:"projectName"` + store string `json:"logstoreName"` + } + + body, err := json.Marshal(Body{ + project: s.project.Name, + store: s.Name, + }) + if err != nil { + return err + } + + h := map[string]string{ + "x-log-bodyrawsize": fmt.Sprintf("%v", len(body)), + "Content-Type": "application/json", + "Accept-Encoding": "deflate", // TODO: support lz4 + } + + uri := fmt.Sprintf("/logstores/%s/index", s.Name) + _, err = request(s.project, "DELETE", uri, h, body) + return err +} + +func (s *LogStore) GetIndex() (*Index, error) { + type Body struct { + project string `json:"projectName"` + store string `json:"logstoreName"` + } + + body, err := json.Marshal(Body{ + project: s.project.Name, + store: s.Name, + }) + if err != nil { + return nil, err + } + + h := map[string]string{ + "x-log-bodyrawsize": fmt.Sprintf("%v", len(body)), + "Content-Type": "application/json", + "Accept-Encoding": "deflate", // TODO: support lz4 + } + + uri := fmt.Sprintf("/logstores/%s/index", s.Name) + resp, err := request(s.project, "GET", uri, h, body) + if err != nil { + } + + index := &Index{} + data, _ := ioutil.ReadAll(resp.Body) + err = json.Unmarshal(data, index) + if err != nil { + return nil, err + } + + return index, err +} diff --git a/vendor/src/github.com/galaxydi/go-loghub/machine_group.go b/vendor/src/github.com/galaxydi/go-loghub/machine_group.go new file mode 100644 index 0000000000000..1c96b28c5f406 --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/machine_group.go @@ -0,0 +1,95 @@ +package sls + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httputil" + + "github.com/golang/glog" +) + +// MachinGroupAttribute defines machine group attribute +type MachinGroupAttribute struct { + ExternalName string `json:"externalName"` + TopicName string `json:"groupTopic"` +} + +// MachineGroup defines machine group +type MachineGroup struct { + Name string `json:"groupName"` + Type string `json:"groupType"` + MachineIDType string `json:"machineIdentifyType"` + MachineIDList []string `json:"machineList"` + + Attribute MachinGroupAttribute `json:"groupAttribute"` + + CreateTime uint32 + LastModifyTime uint32 + + project *LogProject +} + +// Machine defines machine struct +type Machine struct { + IP string + UniqueID string `json:"machine-uniqueid"` + UserdefinedID string `json:"userdefined-id"` +} + +// MachineList defines machine list +type MachineList struct { + Total int + Machines []*Machine +} + +// ListMachines returns machine list of this machine group. +func (m *MachineGroup) ListMachines() (ms []*Machine, total int, err error) { + h := map[string]string{ + "x-log-bodyrawsize": "0", + } + + uri := fmt.Sprintf("/machinegroups/%v/machines", m.Name) + r, err := request(m.project, "GET", uri, h, nil) + if err != nil { + return + } + + buf, err := ioutil.ReadAll(r.Body) + if err != nil { + return + } + + if r.StatusCode != http.StatusOK { + errMsg := &Error{} + err = json.Unmarshal(buf, errMsg) + if err != nil { + err = fmt.Errorf("failed to remove config from machine group") + dump, _ := httputil.DumpResponse(r, true) + if glog.V(1) { + glog.Error(string(dump)) + } + return + } + err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message) + return + } + + body := &MachineList{} + err = json.Unmarshal(buf, body) + if err != nil { + return + } + + ms = body.Machines + total = body.Total + + return +} + +// GetAppliedConfigs returns applied configs of this machine group. +func (m *MachineGroup) GetAppliedConfigs() (confNames []string, err error) { + confNames, err = m.project.GetAppliedConfigs(m.Name) + return +} diff --git a/vendor/src/github.com/galaxydi/go-loghub/model.go b/vendor/src/github.com/galaxydi/go-loghub/model.go new file mode 100644 index 0000000000000..31f5a1a7a2570 --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/model.go @@ -0,0 +1,43 @@ +package sls + +// GetHistogramsResponse defines response from GetHistograms call +type SingleHistogram struct { + Progress string `json:"progress"` + Count int64 `json:"count"` + From int64 `json:"from"` + To int64 `json:"to"` +} + +type GetHistogramsResponse struct { + Progress string `json:"progress"` + Count int64 `json:"count"` + Histograms []SingleHistogram `json:"histograms"` +} + +// GetLogsResponse defines response from GetLogs call +type GetLogsResponse struct { + Progress string `json:"progress"` + Count int64 `json:"count"` + Logs []map[string]string `json:"logs"` +} + +// IndexKey ... +type IndexKey struct { + Token []string `json:"token"` // tokens that split the log line. + CaseSensitive bool `json:"caseSensitive"` + Type string `json:"type"` // text, long, double +} + +type IndexLine struct { + Token []string `json:"token"` + CaseSensitive bool `json:"caseSensitive"` + IncludeKeys []string `json:"include_keys,omitempty"` + ExcludeKeys []string `json:"exclude_keys,omitempty"` +} + +// Index is an index config for a log store. +type Index struct { + TTL int `json:"ttl"` + Keys map[string]IndexKey `json:"keys,omitempty"` + Line *IndexLine `json:"line,omitempty"` +} diff --git a/vendor/src/github.com/galaxydi/go-loghub/request.go b/vendor/src/github.com/galaxydi/go-loghub/request.go new file mode 100644 index 0000000000000..279a6d4bbb872 --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/request.go @@ -0,0 +1,95 @@ +package sls + +import ( + "bytes" + "crypto/md5" + "fmt" + "net/http" + "net/http/httputil" + + "encoding/json" + "io/ioutil" + + "github.com/golang/glog" +) + +// request sends a request to SLS. +func request(project *LogProject, method, uri string, headers map[string]string, + body []byte) (*http.Response, error) { + + // The caller should provide 'x-log-bodyrawsize' header + if _, ok := headers["x-log-bodyrawsize"]; !ok { + return nil, fmt.Errorf("Can't find 'x-log-bodyrawsize' header") + } + + // SLS public request headers + headers["Host"] = project.Name + "." + project.Endpoint + headers["Date"] = nowRFC1123() + headers["x-log-apiversion"] = version + headers["x-log-signaturemethod"] = signatureMethod + + // Access with token + if project.SecurityToken != "" { + headers["x-acs-security-token"] = project.SecurityToken + } + + if body != nil { + bodyMD5 := fmt.Sprintf("%X", md5.Sum(body)) + headers["Content-MD5"] = bodyMD5 + if _, ok := headers["Content-Type"]; !ok { + return nil, fmt.Errorf("Can't find 'Content-Type' header") + } + } + + // Calc Authorization + // Authorization = "SLS :" + digest, err := signature(project, method, uri, headers) + if err != nil { + return nil, err + } + auth := fmt.Sprintf("SLS %v:%v", project.AccessKeyID, digest) + headers["Authorization"] = auth + + // Initialize http request + reader := bytes.NewReader(body) + urlStr := fmt.Sprintf("https://%v.%v%v", project.Name, project.Endpoint, uri) + req, err := http.NewRequest(method, urlStr, reader) + if err != nil { + return nil, err + } + for k, v := range headers { + req.Header.Add(k, v) + } + + if glog.V(1) { + dump, e := httputil.DumpRequest(req, true) + if e != nil { + glog.Info(e) + } + glog.Infof("HTTP Request:\n%v", string(dump)) + } + + // Get ready to do request + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + + // Parse the sls error from body. + if resp.StatusCode != http.StatusOK { + err := &Error{} + buf, _ := ioutil.ReadAll(resp.Body) + json.Unmarshal(buf, err) + err.RequestID = resp.Header.Get("x-log-requestid") + return nil, err + } + + if glog.V(1) { + dump, e := httputil.DumpResponse(resp, true) + if e != nil { + glog.Info(e) + } + glog.Infof("HTTP Response:\n%v", string(dump)) + } + return resp, nil +} diff --git a/vendor/src/github.com/galaxydi/go-loghub/signature.go b/vendor/src/github.com/galaxydi/go-loghub/signature.go new file mode 100644 index 0000000000000..4d17a7360889f --- /dev/null +++ b/vendor/src/github.com/galaxydi/go-loghub/signature.go @@ -0,0 +1,104 @@ +package sls + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "net/url" + "sort" + "strings" + "time" +) + +// GMT location +var gmtLoc = time.FixedZone("GMT", 0) + +// NowRFC1123 returns now time in RFC1123 format with GMT timezone, +// eg. "Mon, 02 Jan 2006 15:04:05 GMT". +func nowRFC1123() string { + return time.Now().In(gmtLoc).Format(time.RFC1123) +} + +// signature calculates a request's signature digest. +func signature(project *LogProject, method, uri string, + headers map[string]string) (digest string, err error) { + var contentMD5, contentType, date, canoHeaders, canoResource string + var slsHeaderKeys sort.StringSlice + + if val, ok := headers["Content-MD5"]; ok { + contentMD5 = val + } + + if val, ok := headers["Content-Type"]; ok { + contentType = val + } + + date, ok := headers["Date"] + if !ok { + err = fmt.Errorf("Can't find 'Date' header") + return + } + + // Calc CanonicalizedSLSHeaders + slsHeaders := make(map[string]string, len(headers)) + for k, v := range headers { + l := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(l, "x-log-") || strings.HasPrefix(l, "x-acs-") { + slsHeaders[l] = strings.TrimSpace(v) + slsHeaderKeys = append(slsHeaderKeys, l) + } + } + + sort.Sort(slsHeaderKeys) + for i, k := range slsHeaderKeys { + canoHeaders += k + ":" + slsHeaders[k] + if i+1 < len(slsHeaderKeys) { + canoHeaders += "\n" + } + } + + // Calc CanonicalizedResource + u, err := url.Parse(uri) + if err != nil { + return + } + + canoResource += u.EscapedPath() + if u.RawQuery != "" { + var keys sort.StringSlice + + vals := u.Query() + for k := range vals { + keys = append(keys, k) + } + + sort.Sort(keys) + canoResource += "?" + for i, k := range keys { + if i > 0 { + canoResource += "&" + } + + for _, v := range vals[k] { + canoResource += k + "=" + v + } + } + } + + signStr := method + "\n" + + contentMD5 + "\n" + + contentType + "\n" + + date + "\n" + + canoHeaders + "\n" + + canoResource + + // Signature = base64(hmac-sha1(UTF8-Encoding-Of(SignString),AccessKeySecret)) + mac := hmac.New(sha1.New, []byte(project.AccessKeySecret)) + _, err = mac.Write([]byte(signStr)) + if err != nil { + return + } + digest = base64.StdEncoding.EncodeToString(mac.Sum(nil)) + return +} diff --git a/vendor/src/github.com/golang/glog/LICENSE b/vendor/src/github.com/golang/glog/LICENSE new file mode 100644 index 0000000000000..37ec93a14fdcd --- /dev/null +++ b/vendor/src/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/src/github.com/golang/glog/README b/vendor/src/github.com/golang/glog/README new file mode 100644 index 0000000000000..387b4eb68909b --- /dev/null +++ b/vendor/src/github.com/golang/glog/README @@ -0,0 +1,44 @@ +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/src/github.com/golang/glog/glog.go b/vendor/src/github.com/golang/glog/glog.go new file mode 100644 index 0000000000000..54bd7afdcabec --- /dev/null +++ b/vendor/src/github.com/golang/glog/glog.go @@ -0,0 +1,1180 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package glog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if !flag.Parsed() { + os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) + os.Stderr.Write(data) + } else if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + } + if err := sb.rotateFile(now); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 30 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for _ = range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(infoLog, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(warningLog, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(errorLog, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(fatalLog, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/src/github.com/golang/glog/glog_file.go b/vendor/src/github.com/golang/glog/glog_file.go new file mode 100644 index 0000000000000..65075d281110c --- /dev/null +++ b/vendor/src/github.com/golang/glog/glog_file.go @@ -0,0 +1,124 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package glog + +import ( + "errors" + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +// If non-empty, overrides the choice of directory in which to write logs. +// See createLogDirs for the full list of possible destinations. +var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + +func createLogDirs() { + if *logDir != "" { + logDirs = append(logDirs, *logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +}