diff --git a/cmd/thanos/compact.go b/cmd/thanos/compact.go index b8f95498d3..94fabfcb1d 100644 --- a/cmd/thanos/compact.go +++ b/cmd/thanos/compact.go @@ -38,7 +38,7 @@ import ( "github.com/thanos-io/thanos/pkg/extprom" extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" "github.com/thanos-io/thanos/pkg/logging" - "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/thanos/pkg/objstoreutil" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/runutil" httpserver "github.com/thanos-io/thanos/pkg/server/http" @@ -202,7 +202,7 @@ func runCompact( return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.String()) if err != nil { return err } diff --git a/cmd/thanos/downsample.go b/cmd/thanos/downsample.go index bd299c1e3e..a511f561b7 100644 --- a/cmd/thanos/downsample.go +++ b/cmd/thanos/downsample.go @@ -22,14 +22,14 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact/downsample" "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/thanos/pkg/objstoreutil" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/runutil" httpserver "github.com/thanos-io/thanos/pkg/server/http" @@ -80,7 +80,7 @@ func RunDownsample( return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.Downsample.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.Downsample.String()) if err != nil { return err } diff --git a/cmd/thanos/main_test.go b/cmd/thanos/main_test.go index ce383c81f7..d1e75a2c4c 100644 --- a/cmd/thanos/main_test.go +++ b/cmd/thanos/main_test.go @@ -23,7 +23,7 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact/downsample" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index 73f7d03156..1d8714cb94 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -24,6 +24,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/exemplars" @@ -33,8 +34,7 @@ import ( "github.com/thanos-io/thanos/pkg/info" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/logging" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/thanos/pkg/objstoreutil" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/receive" "github.com/thanos-io/thanos/pkg/runutil" @@ -158,7 +158,7 @@ func runReceive( } // The background shipper continuously scans the data directory and uploads // new blocks to object storage service. - bkt, err = client.NewBucket(logger, confContentYaml, reg, comp.String()) + bkt, err = objstoreutil.NewBucket(logger, confContentYaml, reg, comp.String()) if err != nil { return err } diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index 5fc408cbe6..2f33018235 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -53,7 +53,7 @@ import ( "github.com/thanos-io/thanos/pkg/info" "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/logging" - "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/thanos/pkg/objstoreutil" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/promclient" thanosrules "github.com/thanos-io/thanos/pkg/rules" @@ -688,7 +688,7 @@ func runRule( if len(confContentYaml) > 0 { // The background shipper continuously scans the data directory and uploads // new blocks to Google Cloud Storage or an S3-compatible storage service. - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.Rule.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.Rule.String()) if err != nil { return err } diff --git a/cmd/thanos/sidecar.go b/cmd/thanos/sidecar.go index 291782e9a3..33f4307b3c 100644 --- a/cmd/thanos/sidecar.go +++ b/cmd/thanos/sidecar.go @@ -34,7 +34,7 @@ import ( "github.com/thanos-io/thanos/pkg/logging" meta "github.com/thanos-io/thanos/pkg/metadata" thanosmodel "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/thanos/pkg/objstoreutil" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/reloader" @@ -300,7 +300,7 @@ func runSidecar( if uploads { // The background shipper continuously scans the data directory and uploads // new blocks to Google Cloud Storage or an S3-compatible storage service. - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.Sidecar.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.Sidecar.String()) if err != nil { return err } diff --git a/cmd/thanos/store.go b/cmd/thanos/store.go index b7d69307dc..3f4c0e9d7f 100644 --- a/cmd/thanos/store.go +++ b/cmd/thanos/store.go @@ -36,7 +36,7 @@ import ( "github.com/thanos-io/thanos/pkg/info/infopb" "github.com/thanos-io/thanos/pkg/logging" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/thanos/pkg/objstoreutil" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/runutil" grpcserver "github.com/thanos-io/thanos/pkg/server/grpc" @@ -248,7 +248,7 @@ func runStore( return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, conf.component.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, conf.component.String()) if err != nil { return errors.Wrap(err, "create bucket client") } diff --git a/cmd/thanos/tools_bucket.go b/cmd/thanos/tools_bucket.go index 959d2a146c..bdee4f4ce9 100644 --- a/cmd/thanos/tools_bucket.go +++ b/cmd/thanos/tools_bucket.go @@ -41,6 +41,7 @@ import ( "golang.org/x/text/message" "gopkg.in/yaml.v3" + "github.com/thanos-io/objstore" v1 "github.com/thanos-io/thanos/pkg/api/blocks" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -53,8 +54,7 @@ import ( extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" "github.com/thanos-io/thanos/pkg/logging" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/thanos/pkg/objstoreutil" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/replicate" "github.com/thanos-io/thanos/pkg/runutil" @@ -299,7 +299,7 @@ func registerBucketVerify(app extkingpin.AppClause, objStoreConfig *extflag.Path return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.Bucket.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.Bucket.String()) if err != nil { return err } @@ -317,7 +317,7 @@ func registerBucketVerify(app extkingpin.AppClause, objStoreConfig *extflag.Path } } else { // nil Prometheus registerer: don't create conflicting metrics. - backupBkt, err = client.NewBucket(logger, backupconfContentYaml, nil, component.Bucket.String()) + backupBkt, err = objstoreutil.NewBucket(logger, backupconfContentYaml, nil, component.Bucket.String()) if err != nil { return err } @@ -380,7 +380,7 @@ func registerBucketLs(app extkingpin.AppClause, objStoreConfig *extflag.PathOrCo return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.Bucket.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.Bucket.String()) if err != nil { return err } @@ -486,7 +486,7 @@ func registerBucketInspect(app extkingpin.AppClause, objStoreConfig *extflag.Pat return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.Bucket.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.Bucket.String()) if err != nil { return err } @@ -594,7 +594,7 @@ func registerBucketWeb(app extkingpin.AppClause, objStoreConfig *extflag.PathOrC return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.Bucket.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.Bucket.String()) if err != nil { return errors.Wrap(err, "bucket client") } @@ -780,7 +780,7 @@ func registerBucketCleanup(app extkingpin.AppClause, objStoreConfig *extflag.Pat return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.Cleanup.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.Cleanup.String()) if err != nil { return err } @@ -1035,7 +1035,7 @@ func registerBucketMarkBlock(app extkingpin.AppClause, objStoreConfig *extflag.P return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.Mark.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.Mark.String()) if err != nil { return err } @@ -1099,7 +1099,7 @@ func registerBucketRewrite(app extkingpin.AppClause, objStoreConfig *extflag.Pat return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.Rewrite.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.Rewrite.String()) if err != nil { return err } @@ -1306,7 +1306,7 @@ func registerBucketRetention(app extkingpin.AppClause, objStoreConfig *extflag.P return err } - bkt, err := client.NewBucket(logger, confContentYaml, reg, component.Retention.String()) + bkt, err := objstoreutil.NewBucket(logger, confContentYaml, reg, component.Retention.String()) if err != nil { return err } diff --git a/examples/interactive/interactive_test.go b/examples/interactive/interactive_test.go index a635384cf4..3dd1f20d5a 100644 --- a/examples/interactive/interactive_test.go +++ b/examples/interactive/interactive_test.go @@ -15,8 +15,8 @@ import ( e2einteractive "github.com/efficientgo/e2e/interactive" e2emonitoring "github.com/efficientgo/e2e/monitoring" "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/objstore/s3" "github.com/thanos-io/thanos/pkg/testutil" tracingclient "github.com/thanos-io/thanos/pkg/tracing/client" "github.com/thanos-io/thanos/pkg/tracing/jaeger" diff --git a/pkg/api/blocks/v1.go b/pkg/api/blocks/v1.go index 858ecdb49a..aef171c53d 100644 --- a/pkg/api/blocks/v1.go +++ b/pkg/api/blocks/v1.go @@ -20,7 +20,7 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" extpromhttp "github.com/thanos-io/thanos/pkg/extprom/http" "github.com/thanos-io/thanos/pkg/logging" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // BlocksAPI is a very simple API used by Thanos Block Viewer. diff --git a/pkg/api/blocks/v1_test.go b/pkg/api/blocks/v1_test.go index c2a349da76..b27a9935be 100644 --- a/pkg/api/blocks/v1_test.go +++ b/pkg/api/blocks/v1_test.go @@ -25,7 +25,7 @@ import ( baseAPI "github.com/thanos-io/thanos/pkg/api" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/block/block.go b/pkg/block/block.go index d29478ac10..c06773d5d1 100644 --- a/pkg/block/block.go +++ b/pkg/block/block.go @@ -24,7 +24,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/block/block_test.go b/pkg/block/block_test.go index 066f95616f..e45876f0aa 100644 --- a/pkg/block/block_test.go +++ b/pkg/block/block_test.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/block/fetcher.go b/pkg/block/fetcher.go index 24967c05c4..31fac28fa1 100644 --- a/pkg/block/fetcher.go +++ b/pkg/block/fetcher.go @@ -30,7 +30,7 @@ import ( "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/block/fetcher_test.go b/pkg/block/fetcher_test.go index a826856b04..427310843b 100644 --- a/pkg/block/fetcher_test.go +++ b/pkg/block/fetcher_test.go @@ -28,8 +28,8 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/objtesting" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/objtesting" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/block/indexheader/binary_reader.go b/pkg/block/indexheader/binary_reader.go index a945d53fa2..db81496a5f 100644 --- a/pkg/block/indexheader/binary_reader.go +++ b/pkg/block/indexheader/binary_reader.go @@ -28,7 +28,7 @@ import ( "github.com/prometheus/prometheus/tsdb/index" "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/block/indexheader/header_test.go b/pkg/block/indexheader/header_test.go index a53842077e..580fa98a3e 100644 --- a/pkg/block/indexheader/header_test.go +++ b/pkg/block/indexheader/header_test.go @@ -23,8 +23,8 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/filesystem" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/block/indexheader/lazy_binary_reader.go b/pkg/block/indexheader/lazy_binary_reader.go index 30069e2cd7..61895d8391 100644 --- a/pkg/block/indexheader/lazy_binary_reader.go +++ b/pkg/block/indexheader/lazy_binary_reader.go @@ -20,7 +20,7 @@ import ( "go.uber.org/atomic" "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) var ( diff --git a/pkg/block/indexheader/lazy_binary_reader_test.go b/pkg/block/indexheader/lazy_binary_reader_test.go index 1fcb65a631..316109292a 100644 --- a/pkg/block/indexheader/lazy_binary_reader_test.go +++ b/pkg/block/indexheader/lazy_binary_reader_test.go @@ -19,7 +19,7 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" + "github.com/thanos-io/objstore/filesystem" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/block/indexheader/reader_pool.go b/pkg/block/indexheader/reader_pool.go index c1742d5d4c..da4e703429 100644 --- a/pkg/block/indexheader/reader_pool.go +++ b/pkg/block/indexheader/reader_pool.go @@ -14,7 +14,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // ReaderPoolMetrics holds metrics tracked by ReaderPool. diff --git a/pkg/block/indexheader/reader_pool_test.go b/pkg/block/indexheader/reader_pool_test.go index 2beaecf9ff..4b0ac0b5e7 100644 --- a/pkg/block/indexheader/reader_pool_test.go +++ b/pkg/block/indexheader/reader_pool_test.go @@ -17,7 +17,7 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" + "github.com/thanos-io/objstore/filesystem" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/block/metadata/markers.go b/pkg/block/metadata/markers.go index 81480652fc..2de5fcda43 100644 --- a/pkg/block/metadata/markers.go +++ b/pkg/block/metadata/markers.go @@ -13,7 +13,7 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/block/metadata/markers_test.go b/pkg/block/metadata/markers_test.go index 1da73e2c98..ea84db9f67 100644 --- a/pkg/block/metadata/markers_test.go +++ b/pkg/block/metadata/markers_test.go @@ -18,7 +18,7 @@ import ( "github.com/pkg/errors" "go.uber.org/goleak" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/cache/caching_bucket_config.go b/pkg/cache/caching_bucket_config.go index d8a22db297..3422783d6b 100644 --- a/pkg/cache/caching_bucket_config.go +++ b/pkg/cache/caching_bucket_config.go @@ -6,7 +6,7 @@ package cache import ( "time" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // Codec for encoding and decoding results of Iter call. diff --git a/pkg/cache/groupcache.go b/pkg/cache/groupcache.go index 81d11504ac..a23aee79ad 100644 --- a/pkg/cache/groupcache.go +++ b/pkg/cache/groupcache.go @@ -23,7 +23,7 @@ import ( "github.com/thanos-io/thanos/pkg/discovery/dns" "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/cache/cachekey" "github.com/vimeo/galaxycache" diff --git a/pkg/cache/groupcache_test.go b/pkg/cache/groupcache_test.go index 06df4cf642..b38ca53b1a 100644 --- a/pkg/cache/groupcache_test.go +++ b/pkg/cache/groupcache_test.go @@ -20,7 +20,7 @@ import ( "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/discovery/dns" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/prober" httpserver "github.com/thanos-io/thanos/pkg/server/http" "github.com/thanos-io/thanos/pkg/store/cache/cachekey" diff --git a/pkg/compact/blocks_cleaner.go b/pkg/compact/blocks_cleaner.go index 6b86c15f6c..28978330d2 100644 --- a/pkg/compact/blocks_cleaner.go +++ b/pkg/compact/blocks_cleaner.go @@ -13,7 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // BlocksCleaner is a struct that deletes blocks from bucket which are marked for deletion. diff --git a/pkg/compact/clean.go b/pkg/compact/clean.go index 9a7cce5b92..d1cc25bfc8 100644 --- a/pkg/compact/clean.go +++ b/pkg/compact/clean.go @@ -13,7 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) const ( diff --git a/pkg/compact/clean_test.go b/pkg/compact/clean_test.go index 1493846ae5..ce3a37181c 100644 --- a/pkg/compact/clean_test.go +++ b/pkg/compact/clean_test.go @@ -19,7 +19,7 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/compact/compact.go b/pkg/compact/compact.go index 5e55262f5e..9f123ed56f 100644 --- a/pkg/compact/compact.go +++ b/pkg/compact/compact.go @@ -30,7 +30,7 @@ import ( "github.com/thanos-io/thanos/pkg/compact/downsample" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/tracing" ) diff --git a/pkg/compact/compact_e2e_test.go b/pkg/compact/compact_e2e_test.go index 2dc783b9e7..a07338c5d5 100644 --- a/pkg/compact/compact_e2e_test.go +++ b/pkg/compact/compact_e2e_test.go @@ -28,8 +28,8 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/dedup" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/objtesting" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/objtesting" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/compact/compact_test.go b/pkg/compact/compact_test.go index adf9201b3e..bf70522752 100644 --- a/pkg/compact/compact_test.go +++ b/pkg/compact/compact_test.go @@ -25,7 +25,7 @@ import ( "github.com/thanos-io/thanos/pkg/compact/downsample" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/compact/planner.go b/pkg/compact/planner.go index d316994f54..24b07472fc 100644 --- a/pkg/compact/planner.go +++ b/pkg/compact/planner.go @@ -16,7 +16,7 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) type tsdbBasedPlanner struct { diff --git a/pkg/compact/planner_test.go b/pkg/compact/planner_test.go index a54ca1af0f..b7fe805355 100644 --- a/pkg/compact/planner_test.go +++ b/pkg/compact/planner_test.go @@ -22,7 +22,7 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/compact/retention.go b/pkg/compact/retention.go index 691fd8532d..3c789e8e75 100644 --- a/pkg/compact/retention.go +++ b/pkg/compact/retention.go @@ -16,7 +16,7 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // ApplyRetentionPolicyByResolution removes blocks depending on the specified retentionByResolution based on blocks MaxTime. diff --git a/pkg/compact/retention_test.go b/pkg/compact/retention_test.go index cc5cf4d215..13d1de91cb 100644 --- a/pkg/compact/retention_test.go +++ b/pkg/compact/retention_test.go @@ -22,7 +22,7 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/objstore/azure/azure.go b/pkg/objstore/azure/azure.go deleted file mode 100644 index f79b7df237..0000000000 --- a/pkg/objstore/azure/azure.go +++ /dev/null @@ -1,462 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package azure - -import ( - "bytes" - "context" - "io" - "io/ioutil" - "os" - "strings" - "testing" - "time" - - blob "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" -) - -const ( - azureDefaultEndpoint = "blob.core.windows.net" -) - -// Set default retry values to default Azure values. 0 = use Default Azure. -var DefaultConfig = Config{ - PipelineConfig: PipelineConfig{ - MaxTries: 0, - TryTimeout: 0, - RetryDelay: 0, - MaxRetryDelay: 0, - }, - ReaderConfig: ReaderConfig{ - MaxRetryRequests: 0, - }, - HTTPConfig: HTTPConfig{ - IdleConnTimeout: model.Duration(90 * time.Second), - ResponseHeaderTimeout: model.Duration(2 * time.Minute), - TLSHandshakeTimeout: model.Duration(10 * time.Second), - ExpectContinueTimeout: model.Duration(1 * time.Second), - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, - MaxConnsPerHost: 0, - DisableCompression: false, - }, -} - -// Config Azure storage configuration. -type Config struct { - StorageAccountName string `yaml:"storage_account"` - StorageAccountKey string `yaml:"storage_account_key"` - ContainerName string `yaml:"container"` - Endpoint string `yaml:"endpoint"` - MaxRetries int `yaml:"max_retries"` - MSIResource string `yaml:"msi_resource"` - UserAssignedID string `yaml:"user_assigned_id"` - PipelineConfig PipelineConfig `yaml:"pipeline_config"` - ReaderConfig ReaderConfig `yaml:"reader_config"` - HTTPConfig HTTPConfig `yaml:"http_config"` -} - -type ReaderConfig struct { - MaxRetryRequests int `yaml:"max_retry_requests"` -} - -type PipelineConfig struct { - MaxTries int32 `yaml:"max_tries"` - TryTimeout model.Duration `yaml:"try_timeout"` - RetryDelay model.Duration `yaml:"retry_delay"` - MaxRetryDelay model.Duration `yaml:"max_retry_delay"` -} - -type HTTPConfig struct { - IdleConnTimeout model.Duration `yaml:"idle_conn_timeout"` - ResponseHeaderTimeout model.Duration `yaml:"response_header_timeout"` - InsecureSkipVerify bool `yaml:"insecure_skip_verify"` - - TLSHandshakeTimeout model.Duration `yaml:"tls_handshake_timeout"` - ExpectContinueTimeout model.Duration `yaml:"expect_continue_timeout"` - MaxIdleConns int `yaml:"max_idle_conns"` - MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"` - MaxConnsPerHost int `yaml:"max_conns_per_host"` - DisableCompression bool `yaml:"disable_compression"` - - TLSConfig objstore.TLSConfig `yaml:"tls_config"` -} - -// Bucket implements the store.Bucket interface against Azure APIs. -type Bucket struct { - logger log.Logger - containerURL blob.ContainerURL - config *Config -} - -// Validate checks to see if any of the config options are set. -func (conf *Config) validate() error { - - var errMsg []string - if conf.MSIResource == "" { - if conf.UserAssignedID == "" { - if conf.StorageAccountName == "" || - conf.StorageAccountKey == "" { - errMsg = append(errMsg, "invalid Azure storage configuration") - } - if conf.StorageAccountName == "" && conf.StorageAccountKey != "" { - errMsg = append(errMsg, "no Azure storage_account specified while storage_account_key is present in config file; both should be present") - } - if conf.StorageAccountName != "" && conf.StorageAccountKey == "" { - errMsg = append(errMsg, "no Azure storage_account_key specified while storage_account is present in config file; both should be present") - } - } else { - if conf.StorageAccountName == "" { - errMsg = append(errMsg, "UserAssignedID is configured but storage account name is missing") - } - if conf.StorageAccountKey != "" { - errMsg = append(errMsg, "UserAssignedID is configured but storage account key is used") - } - } - } else { - if conf.StorageAccountName == "" { - errMsg = append(errMsg, "MSI resource is configured but storage account name is missing") - } - if conf.StorageAccountKey != "" { - errMsg = append(errMsg, "MSI resource is configured but storage account key is used") - } - } - - if conf.ContainerName == "" { - errMsg = append(errMsg, "no Azure container specified") - } - if conf.Endpoint == "" { - conf.Endpoint = azureDefaultEndpoint - } - - if conf.PipelineConfig.MaxTries < 0 { - errMsg = append(errMsg, "The value of max_tries must be greater than or equal to 0 in the config file") - } - - if conf.ReaderConfig.MaxRetryRequests < 0 { - errMsg = append(errMsg, "The value of max_retry_requests must be greater than or equal to 0 in the config file") - } - - if len(errMsg) > 0 { - return errors.New(strings.Join(errMsg, ", ")) - } - - return nil -} - -// parseConfig unmarshals a buffer into a Config with default values. -func parseConfig(conf []byte) (Config, error) { - config := DefaultConfig - if err := yaml.UnmarshalStrict(conf, &config); err != nil { - return Config{}, err - } - - // If we don't have config specific retry values but we do have the generic MaxRetries. - // This is for backwards compatibility but also ease of configuration. - if config.MaxRetries > 0 { - if config.PipelineConfig.MaxTries == 0 { - config.PipelineConfig.MaxTries = int32(config.MaxRetries) - } - if config.ReaderConfig.MaxRetryRequests == 0 { - config.ReaderConfig.MaxRetryRequests = config.MaxRetries - } - } - - return config, nil -} - -// NewBucket returns a new Bucket using the provided Azure config. -func NewBucket(logger log.Logger, azureConfig []byte, component string) (*Bucket, error) { - level.Debug(logger).Log("msg", "creating new Azure bucket connection", "component", component) - - conf, err := parseConfig(azureConfig) - if err != nil { - return nil, err - } - - return NewBucketWithConfig(logger, conf, component) -} - -// NewBucketWithConfig returns a new Bucket using the provided Azure config struct. -func NewBucketWithConfig(logger log.Logger, conf Config, component string) (*Bucket, error) { - if err := conf.validate(); err != nil { - return nil, err - } - - ctx := context.Background() - container, err := createContainer(ctx, logger, conf) - if err != nil { - ret, ok := err.(blob.StorageError) - if !ok { - return nil, errors.Wrapf(err, "Azure API return unexpected error: %T\n", err) - } - if ret.ServiceCode() == "ContainerAlreadyExists" { - level.Debug(logger).Log("msg", "Getting connection to existing Azure blob container", "container", conf.ContainerName) - container, err = getContainer(ctx, logger, conf) - if err != nil { - return nil, errors.Wrapf(err, "cannot get existing Azure blob container: %s", container) - } - } else { - return nil, errors.Wrapf(err, "error creating Azure blob container: %s", container) - } - } else { - level.Info(logger).Log("msg", "Azure blob container successfully created", "address", container) - } - - bkt := &Bucket{ - logger: logger, - containerURL: container, - config: &conf, - } - return bkt, nil -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - prefix := dir - if prefix != "" && !strings.HasSuffix(prefix, DirDelim) { - prefix += DirDelim - } - - marker := blob.Marker{} - params := objstore.ApplyIterOptions(options...) - listOptions := blob.ListBlobsSegmentOptions{Prefix: prefix} - - for i := 1; ; i++ { - var ( - blobPrefixes []blob.BlobPrefix - blobItems []blob.BlobItemInternal - ) - - if params.Recursive { - list, err := b.containerURL.ListBlobsFlatSegment(ctx, marker, listOptions) - if err != nil { - return errors.Wrapf(err, "cannot list flat blobs with prefix %s (iteration #%d)", dir, i) - } - - marker = list.NextMarker - blobItems = list.Segment.BlobItems - blobPrefixes = nil - } else { - list, err := b.containerURL.ListBlobsHierarchySegment(ctx, marker, DirDelim, listOptions) - if err != nil { - return errors.Wrapf(err, "cannot list hierarchy blobs with prefix %s (iteration #%d)", dir, i) - } - - marker = list.NextMarker - blobItems = list.Segment.BlobItems - blobPrefixes = list.Segment.BlobPrefixes - } - - var listNames []string - - for _, blob := range blobItems { - listNames = append(listNames, blob.Name) - } - - for _, blobPrefix := range blobPrefixes { - listNames = append(listNames, blobPrefix.Name) - } - - for _, name := range listNames { - if err := f(name); err != nil { - return err - } - } - - // Continue iterating if we are not done. - if !marker.NotDone() { - break - } - - level.Debug(b.logger).Log("msg", "requesting next iteration of listing blobs", "last_entries", len(listNames), "iteration", i) - } - - return nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - if err == nil { - return false - } - - errorCode := parseError(err.Error()) - if errorCode == "InvalidUri" || errorCode == "BlobNotFound" { - return true - } - - return false -} - -func (b *Bucket) getBlobReader(ctx context.Context, name string, offset, length int64) (io.ReadCloser, error) { - level.Debug(b.logger).Log("msg", "getting blob", "blob", name, "offset", offset, "length", length) - if name == "" { - return nil, errors.New("X-Ms-Error-Code: [EmptyContainerName]") - } - exists, err := b.Exists(ctx, name) - if err != nil { - return nil, errors.Wrapf(err, "cannot get blob reader: %s", name) - } - - if !exists { - return nil, errors.New("X-Ms-Error-Code: [BlobNotFound]") - } - - blobURL := getBlobURL(name, b.containerURL) - if err != nil { - return nil, errors.Wrapf(err, "cannot get Azure blob URL, address: %s", name) - } - var props *blob.BlobGetPropertiesResponse - props, err = blobURL.GetProperties(ctx, blob.BlobAccessConditions{}, blob.ClientProvidedKeyOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "cannot get properties for container: %s", name) - } - - var size int64 - // If a length is specified and it won't go past the end of the file, - // then set it as the size. - if length > 0 && length <= props.ContentLength()-offset { - size = length - level.Debug(b.logger).Log("msg", "set size to length", "size", size, "length", length, "offset", offset, "name", name) - } else { - size = props.ContentLength() - offset - level.Debug(b.logger).Log("msg", "set size to go to EOF", "contentlength", props.ContentLength(), "size", size, "length", length, "offset", offset, "name", name) - } - - destBuffer := make([]byte, size) - - if err := blob.DownloadBlobToBuffer(context.Background(), blobURL.BlobURL, offset, size, - destBuffer, blob.DownloadFromBlobOptions{ - BlockSize: blob.BlobDefaultDownloadBlockSize, - Parallelism: uint16(3), - Progress: nil, - RetryReaderOptionsPerBlock: blob.RetryReaderOptions{ - MaxRetryRequests: b.config.ReaderConfig.MaxRetryRequests, - }, - }, - ); err != nil { - return nil, errors.Wrapf(err, "cannot download blob, address: %s", blobURL.BlobURL) - } - - return ioutil.NopCloser(bytes.NewReader(destBuffer)), nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.getBlobReader(ctx, name, 0, blob.CountToEnd) -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.getBlobReader(ctx, name, off, length) -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { - blobURL := getBlobURL(name, b.containerURL) - - props, err := blobURL.GetProperties(ctx, blob.BlobAccessConditions{}, blob.ClientProvidedKeyOptions{}) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: props.ContentLength(), - LastModified: props.LastModified(), - }, nil -} - -// Exists checks if the given object exists. -func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { - level.Debug(b.logger).Log("msg", "check if blob exists", "blob", name) - blobURL := getBlobURL(name, b.containerURL) - - if _, err := blobURL.GetProperties(ctx, blob.BlobAccessConditions{}, blob.ClientProvidedKeyOptions{}); err != nil { - if b.IsObjNotFoundErr(err) { - return false, nil - } - return false, errors.Wrapf(err, "cannot get properties for Azure blob, address: %s", name) - } - - return true, nil -} - -// Upload the contents of the reader as an object into the bucket. -func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { - level.Debug(b.logger).Log("msg", "Uploading blob", "blob", name) - blobURL := getBlobURL(name, b.containerURL) - - if _, err := blob.UploadStreamToBlockBlob(ctx, r, blobURL, - blob.UploadStreamToBlockBlobOptions{ - BufferSize: 3 * 1024 * 1024, - MaxBuffers: 4, - }, - ); err != nil { - return errors.Wrapf(err, "cannot upload Azure blob, address: %s", name) - } - return nil -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(ctx context.Context, name string) error { - level.Debug(b.logger).Log("msg", "Deleting blob", "blob", name) - blobURL := getBlobURL(name, b.containerURL) - - if _, err := blobURL.Delete(ctx, blob.DeleteSnapshotsOptionInclude, blob.BlobAccessConditions{}); err != nil { - return errors.Wrapf(err, "error deleting blob, address: %s", name) - } - return nil -} - -// Name returns Azure container name. -func (b *Bucket) Name() string { - return b.config.ContainerName -} - -// NewTestBucket creates test bkt client that before returning creates temporary bucket. -// In a close function it empties and deletes the bucket. -func NewTestBucket(t testing.TB, component string) (objstore.Bucket, func(), error) { - t.Log("Using test Azure bucket.") - - conf := &Config{ - StorageAccountName: os.Getenv("AZURE_STORAGE_ACCOUNT"), - StorageAccountKey: os.Getenv("AZURE_STORAGE_ACCESS_KEY"), - ContainerName: objstore.CreateTemporaryTestBucketName(t), - } - - bc, err := yaml.Marshal(conf) - if err != nil { - return nil, nil, err - } - - ctx := context.Background() - - bkt, err := NewBucket(log.NewNopLogger(), bc, component) - if err != nil { - t.Errorf("Cannot create Azure storage container:") - return nil, nil, err - } - - return bkt, func() { - objstore.EmptyBucket(t, ctx, bkt) - err = bkt.Delete(ctx, conf.ContainerName) - if err != nil { - t.Logf("deleting bucket failed: %s", err) - } - }, nil -} - -// Close bucket. -func (b *Bucket) Close() error { - return nil -} diff --git a/pkg/objstore/azure/azure_test.go b/pkg/objstore/azure/azure_test.go deleted file mode 100644 index ffef1c8a0e..0000000000 --- a/pkg/objstore/azure/azure_test.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package azure - -import ( - "testing" - "time" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -type TestCase struct { - name string - config []byte - wantFailParse bool - wantFailValidate bool -} - -var validConfig = []byte(`storage_account: "myStorageAccount" -storage_account_key: "abc123" -container: "MyContainer" -endpoint: "blob.core.windows.net" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": 0`) - -var tests = []TestCase{ - { - name: "validConfig", - config: validConfig, - wantFailParse: false, - wantFailValidate: false, - }, - { - name: "Missing storage account", - config: []byte(`storage_account: "" -storage_account_key: "abc123" -container: "MyContainer" -endpoint: "blob.core.windows.net" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": 0`), - wantFailParse: false, - wantFailValidate: true, - }, - { - name: "Missing storage account key", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "" -container: "MyContainer" -endpoint: "blob.core.windows.net" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": 0`), - wantFailParse: false, - wantFailValidate: true, - }, - { - name: "Negative max_tries", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "asdfsdf" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "max_tries": -1 - "try_timeout": 0`), - wantFailParse: false, - wantFailValidate: true, - }, - { - name: "Negative max_retry_requests", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "asdfsdf" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": -100 -pipeline_config: - "try_timeout": 0`), - wantFailParse: false, - wantFailValidate: true, - }, - { - name: "Not a Duration", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "asdfsdf" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": 10`), - wantFailParse: true, - wantFailValidate: true, - }, - { - name: "Valid Duration", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "asdfsdf" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": "10s"`), - wantFailParse: false, - wantFailValidate: false, - }, - { - name: "msi resource used with storage accounts", - config: []byte(`storage_account: "asdfasdf" -storage_account_key: "asdfsdf" -msi_resource: "https://example.blob.core.windows.net" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": "10s"`), - wantFailParse: false, - wantFailValidate: true, - }, - { - name: "Valid MSI Resource", - config: []byte(`storage_account: "myAccount" -storage_account_key: "" -msi_resource: "https://example.blob.core.windows.net" -container: "MyContainer" -endpoint: "not.valid" -reader_config: - "max_retry_requests": 100 -pipeline_config: - "try_timeout": "10s"`), - wantFailParse: false, - wantFailValidate: false, - }, - { - name: "Valid User Assigned Identity Config without Resource", - config: []byte(`storage_account: "myAccount" -storage_account_key: "" -user_assigned_id: "1234-56578678-655" -container: "MyContainer"`), - wantFailParse: false, - wantFailValidate: false, - }, - { - name: "Valid User Assigned Identity Config with Resource", - config: []byte(`storage_account: "myAccount" -storage_account_key: "" -user_assigned_id: "1234-56578678-655" -msi_resource: "https://example.blob.core.windows.net" -container: "MyContainer"`), - wantFailParse: false, - wantFailValidate: false, - }, -} - -func TestConfig_validate(t *testing.T) { - - for _, testCase := range tests { - - conf, err := parseConfig(testCase.config) - - if (err != nil) != testCase.wantFailParse { - t.Errorf("%s error = %v, wantFailParse %v", testCase.name, err, testCase.wantFailParse) - continue - } - - validateErr := conf.validate() - if (validateErr != nil) != testCase.wantFailValidate { - t.Errorf("%s error = %v, wantFailValidate %v", testCase.name, validateErr, testCase.wantFailValidate) - } - } - -} - -func TestParseConfig_DefaultHTTPConfig(t *testing.T) { - - cfg, err := parseConfig(validConfig) - testutil.Ok(t, err) - - if time.Duration(cfg.HTTPConfig.IdleConnTimeout) != time.Duration(90*time.Second) { - t.Errorf("parsing of idle_conn_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(90*time.Second)) - } - - if time.Duration(cfg.HTTPConfig.ResponseHeaderTimeout) != time.Duration(2*time.Minute) { - t.Errorf("parsing of response_header_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(2*time.Minute)) - } - - if cfg.HTTPConfig.InsecureSkipVerify { - t.Errorf("parsing of insecure_skip_verify failed: got %v, expected %v", cfg.HTTPConfig.InsecureSkipVerify, false) - } -} - -func TestParseConfig_CustomHTTPConfigWithTLS(t *testing.T) { - input := []byte(`storage_account: "myStorageAccount" -storage_account_key: "abc123" -container: "MyContainer" -endpoint: "blob.core.windows.net" -http_config: - tls_config: - ca_file: /certs/ca.crt - cert_file: /certs/cert.crt - key_file: /certs/key.key - server_name: server - insecure_skip_verify: false - `) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - testutil.Equals(t, "/certs/ca.crt", cfg.HTTPConfig.TLSConfig.CAFile) - testutil.Equals(t, "/certs/cert.crt", cfg.HTTPConfig.TLSConfig.CertFile) - testutil.Equals(t, "/certs/key.key", cfg.HTTPConfig.TLSConfig.KeyFile) - testutil.Equals(t, "server", cfg.HTTPConfig.TLSConfig.ServerName) - testutil.Equals(t, false, cfg.HTTPConfig.TLSConfig.InsecureSkipVerify) -} - -func TestParseConfig_CustomLegacyInsecureSkipVerify(t *testing.T) { - input := []byte(`storage_account: "myStorageAccount" -storage_account_key: "abc123" -container: "MyContainer" -endpoint: "blob.core.windows.net" -http_config: - insecure_skip_verify: true - tls_config: - insecure_skip_verify: false - `) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - transport, err := DefaultTransport(cfg) - testutil.Ok(t, err) - testutil.Equals(t, true, transport.TLSClientConfig.InsecureSkipVerify) -} diff --git a/pkg/objstore/azure/helpers.go b/pkg/objstore/azure/helpers.go deleted file mode 100644 index 3a2e31954f..0000000000 --- a/pkg/objstore/azure/helpers.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package azure - -import ( - "context" - "fmt" - "net" - "net/http" - "net/url" - "regexp" - "time" - - "github.com/Azure/azure-pipeline-go/pipeline" - blob "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure/auth" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/thanos-io/thanos/pkg/objstore" -) - -// DirDelim is the delimiter used to model a directory structure in an object store bucket. -const DirDelim = "/" - -var errorCodeRegex = regexp.MustCompile(`X-Ms-Error-Code:\D*\[(\w+)\]`) - -func init() { - // Disable `ForceLog` in Azure storage module - // As the time of this patch, the logging function in the storage module isn't correctly - // detecting expected REST errors like 404 and so outputs them to syslog along with a stacktrace. - // https://github.com/Azure/azure-storage-blob-go/issues/214 - // - // This needs to be done at startup because the underlying variable is not thread safe. - // https://github.com/Azure/azure-pipeline-go/blob/dc95902f1d32034f8f743ccc6c3f2eb36b84da27/pipeline/core.go#L276-L283 - pipeline.SetForceLogEnabled(false) -} - -func getAzureStorageCredentials(logger log.Logger, conf Config) (blob.Credential, error) { - if conf.MSIResource != "" || conf.UserAssignedID != "" { - spt, err := getServicePrincipalToken(logger, conf) - if err != nil { - return nil, err - } - if err := spt.Refresh(); err != nil { - return nil, err - } - - return blob.NewTokenCredential(spt.Token().AccessToken, func(tc blob.TokenCredential) time.Duration { - err := spt.Refresh() - if err != nil { - level.Error(logger).Log("msg", "could not refresh MSI token", "err", err) - // Retry later as the error can be related to API throttling - return 30 * time.Second - } - tc.SetToken(spt.Token().AccessToken) - return spt.Token().Expires().Sub(time.Now().Add(2 * time.Minute)) - }), nil - } - - credential, err := blob.NewSharedKeyCredential(conf.StorageAccountName, conf.StorageAccountKey) - if err != nil { - return nil, err - } - return credential, nil -} - -func getServicePrincipalToken(logger log.Logger, conf Config) (*adal.ServicePrincipalToken, error) { - resource := conf.MSIResource - if resource == "" { - resource = fmt.Sprintf("https://%s.%s", conf.StorageAccountName, conf.Endpoint) - } - - msiConfig := auth.MSIConfig{ - Resource: resource, - } - - if conf.UserAssignedID != "" { - level.Debug(logger).Log("msg", "using user assigned identity", "clientId", conf.UserAssignedID) - msiConfig.ClientID = conf.UserAssignedID - } else { - level.Debug(logger).Log("msg", "using system assigned identity") - } - - return msiConfig.ServicePrincipalToken() -} - -func getContainerURL(ctx context.Context, logger log.Logger, conf Config) (blob.ContainerURL, error) { - credentials, err := getAzureStorageCredentials(logger, conf) - - if err != nil { - return blob.ContainerURL{}, err - } - - retryOptions := blob.RetryOptions{ - MaxTries: conf.PipelineConfig.MaxTries, - TryTimeout: time.Duration(conf.PipelineConfig.TryTimeout), - RetryDelay: time.Duration(conf.PipelineConfig.RetryDelay), - MaxRetryDelay: time.Duration(conf.PipelineConfig.MaxRetryDelay), - } - - if deadline, ok := ctx.Deadline(); ok { - retryOptions.TryTimeout = time.Until(deadline) - } - - dt, err := DefaultTransport(conf) - if err != nil { - return blob.ContainerURL{}, err - } - client := http.Client{ - Transport: dt, - } - - p := blob.NewPipeline(credentials, blob.PipelineOptions{ - Retry: retryOptions, - Telemetry: blob.TelemetryOptions{Value: "Thanos"}, - RequestLog: blob.RequestLogOptions{ - // Log a warning if an operation takes longer than the specified duration. - // (-1=no logging; 0=default 3s threshold) - LogWarningIfTryOverThreshold: -1, - }, - Log: pipeline.LogOptions{ - ShouldLog: nil, - }, - HTTPSender: pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - resp, err := client.Do(request.WithContext(ctx)) - - return pipeline.NewHTTPResponse(resp), err - } - }), - }) - u, err := url.Parse(fmt.Sprintf("https://%s.%s", conf.StorageAccountName, conf.Endpoint)) - if err != nil { - return blob.ContainerURL{}, err - } - service := blob.NewServiceURL(*u, p) - - return service.NewContainerURL(conf.ContainerName), nil -} - -func DefaultTransport(config Config) (*http.Transport, error) { - tlsConfig, err := objstore.NewTLSConfig(&config.HTTPConfig.TLSConfig) - if err != nil { - return nil, err - } - - if config.HTTPConfig.InsecureSkipVerify { - tlsConfig.InsecureSkipVerify = true - } - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - - MaxIdleConns: config.HTTPConfig.MaxIdleConns, - MaxIdleConnsPerHost: config.HTTPConfig.MaxIdleConnsPerHost, - IdleConnTimeout: time.Duration(config.HTTPConfig.IdleConnTimeout), - MaxConnsPerHost: config.HTTPConfig.MaxConnsPerHost, - TLSHandshakeTimeout: time.Duration(config.HTTPConfig.TLSHandshakeTimeout), - ExpectContinueTimeout: time.Duration(config.HTTPConfig.ExpectContinueTimeout), - - ResponseHeaderTimeout: time.Duration(config.HTTPConfig.ResponseHeaderTimeout), - DisableCompression: config.HTTPConfig.DisableCompression, - TLSClientConfig: tlsConfig, - }, nil -} - -func getContainer(ctx context.Context, logger log.Logger, conf Config) (blob.ContainerURL, error) { - c, err := getContainerURL(ctx, logger, conf) - if err != nil { - return blob.ContainerURL{}, err - } - // Getting container properties to check if it exists or not. Returns error which will be parsed further. - _, err = c.GetProperties(ctx, blob.LeaseAccessConditions{}) - return c, err -} - -func createContainer(ctx context.Context, logger log.Logger, conf Config) (blob.ContainerURL, error) { - c, err := getContainerURL(ctx, logger, conf) - if err != nil { - return blob.ContainerURL{}, err - } - _, err = c.Create( - ctx, - blob.Metadata{}, - blob.PublicAccessNone) - return c, err -} - -func getBlobURL(blobName string, c blob.ContainerURL) blob.BlockBlobURL { - return c.NewBlockBlobURL(blobName) -} - -func parseError(errorCode string) string { - match := errorCodeRegex.FindStringSubmatch(errorCode) - if len(match) == 2 { - return match[1] - } - return errorCode -} diff --git a/pkg/objstore/azure/helpers_test.go b/pkg/objstore/azure/helpers_test.go deleted file mode 100644 index c3139a8c07..0000000000 --- a/pkg/objstore/azure/helpers_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package azure - -import ( - "context" - "testing" - - "github.com/go-kit/log" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func Test_getContainerURL(t *testing.T) { - type args struct { - conf Config - } - tests := []struct { - name string - args args - want string - wantErr bool - }{ - { - name: "default", - args: args{ - conf: Config{ - StorageAccountName: "foo", - StorageAccountKey: "Zm9vCg==", - ContainerName: "roo", - Endpoint: azureDefaultEndpoint, - }, - }, - want: "https://foo.blob.core.windows.net/roo", - wantErr: false, - }, - { - name: "azure china", - args: args{ - conf: Config{ - StorageAccountName: "foo", - StorageAccountKey: "Zm9vCg==", - ContainerName: "roo", - Endpoint: "blob.core.chinacloudapi.cn", - }, - }, - want: "https://foo.blob.core.chinacloudapi.cn/roo", - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - got, err := getContainerURL(ctx, log.NewNopLogger(), tt.args.conf) - if (err != nil) != tt.wantErr { - t.Errorf("getContainerURL() error = %v, wantErr %v", err, tt.wantErr) - return - } - testutil.Equals(t, tt.want, got.String()) - }) - } -} diff --git a/pkg/objstore/bos/bos.go b/pkg/objstore/bos/bos.go deleted file mode 100644 index a34f3eccda..0000000000 --- a/pkg/objstore/bos/bos.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package bos - -import ( - "context" - "fmt" - "io" - "math" - "math/rand" - "net/http" - "os" - "strings" - "testing" - "time" - - "github.com/baidubce/bce-sdk-go/bce" - "github.com/baidubce/bce-sdk-go/services/bos" - "github.com/baidubce/bce-sdk-go/services/bos/api" - "github.com/go-kit/log" - "github.com/pkg/errors" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" -) - -// partSize 128MB. -const partSize = 1024 * 1024 * 128 - -// Bucket implements the store.Bucket interface against bos-compatible(Baidu Object Storage) APIs. -type Bucket struct { - logger log.Logger - client *bos.Client - name string -} - -// Config encapsulates the necessary config values to instantiate an bos client. -type Config struct { - Bucket string `yaml:"bucket"` - Endpoint string `yaml:"endpoint"` - AccessKey string `yaml:"access_key"` - SecretKey string `yaml:"secret_key"` -} - -func (conf *Config) validate() error { - if conf.Bucket == "" || - conf.Endpoint == "" || - conf.AccessKey == "" || - conf.SecretKey == "" { - return errors.New("insufficient BOS configuration information") - } - - return nil -} - -// parseConfig unmarshal a buffer into a Config with default HTTPConfig values. -func parseConfig(conf []byte) (Config, error) { - config := Config{} - if err := yaml.Unmarshal(conf, &config); err != nil { - return Config{}, err - } - - return config, nil -} - -// NewBucket new bos bucket. -func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) { - if logger == nil { - logger = log.NewNopLogger() - } - - config, err := parseConfig(conf) - if err != nil { - return nil, errors.Wrap(err, "parsing BOS configuration") - } - - return NewBucketWithConfig(logger, config, component) -} - -// NewBucketWithConfig returns a new Bucket using the provided bos config struct. -func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) { - if err := config.validate(); err != nil { - return nil, errors.Wrap(err, "validating BOS configuration") - } - - client, err := bos.NewClient(config.AccessKey, config.SecretKey, config.Endpoint) - if err != nil { - return nil, errors.Wrap(err, "creating BOS client") - } - - client.Config.UserAgent = fmt.Sprintf("thanos-%s", component) - - bkt := &Bucket{ - logger: logger, - client: client, - name: config.Bucket, - } - return bkt, nil -} - -// Name returns the bucket name for the provider. -func (b *Bucket) Name() string { - return b.name -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(_ context.Context, name string) error { - return b.client.DeleteObject(b.name, name) -} - -// Upload the contents of the reader as an object into the bucket. -func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) error { - size, err := objstore.TryToGetSize(r) - if err != nil { - return errors.Wrapf(err, "getting size of %s", name) - } - - partNums, lastSlice := int(math.Floor(float64(size)/partSize)), size%partSize - if partNums == 0 { - body, err := bce.NewBodyFromSizedReader(r, lastSlice) - if err != nil { - return errors.Wrapf(err, "failed to create SizedReader for %s", name) - } - - if _, err := b.client.PutObject(b.name, name, body, nil); err != nil { - return errors.Wrapf(err, "failed to upload %s", name) - } - - return nil - } - - result, err := b.client.BasicInitiateMultipartUpload(b.name, name) - if err != nil { - return errors.Wrapf(err, "failed to initiate MultipartUpload for %s", name) - } - - uploadEveryPart := func(partSize int64, part int, uploadId string) (string, error) { - body, err := bce.NewBodyFromSizedReader(r, partSize) - if err != nil { - return "", err - } - - etag, err := b.client.UploadPart(b.name, name, uploadId, part, body, nil) - if err != nil { - if err := b.client.AbortMultipartUpload(b.name, name, uploadId); err != nil { - return etag, err - } - return etag, err - } - return etag, nil - } - - var parts []api.UploadInfoType - - for part := 1; part <= partNums; part++ { - etag, err := uploadEveryPart(partSize, part, result.UploadId) - if err != nil { - return errors.Wrapf(err, "failed to upload part %d for %s", part, name) - } - parts = append(parts, api.UploadInfoType{PartNumber: part, ETag: etag}) - } - - if lastSlice != 0 { - etag, err := uploadEveryPart(lastSlice, partNums+1, result.UploadId) - if err != nil { - return errors.Wrapf(err, "failed to upload the last part for %s", name) - } - parts = append(parts, api.UploadInfoType{PartNumber: partNums + 1, ETag: etag}) - } - - if _, err := b.client.CompleteMultipartUploadFromStruct(b.name, name, result.UploadId, &api.CompleteMultipartUploadArgs{Parts: parts}); err != nil { - return errors.Wrapf(err, "failed to set %s upload completed", name) - } - return nil -} - -// Iter calls f for each entry in the given directory (not recursive). The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, opt ...objstore.IterOption) error { - if dir != "" { - dir = strings.TrimSuffix(dir, objstore.DirDelim) + objstore.DirDelim - } - - delimiter := objstore.DirDelim - - if objstore.ApplyIterOptions(opt...).Recursive { - delimiter = "" - } - - var marker string - for { - if err := ctx.Err(); err != nil { - return err - } - - objects, err := b.client.ListObjects(b.name, &api.ListObjectsArgs{ - Delimiter: delimiter, - Marker: marker, - MaxKeys: 1000, - Prefix: dir, - }) - if err != nil { - return err - } - - marker = objects.NextMarker - for _, object := range objects.Contents { - if err := f(object.Key); err != nil { - return err - } - } - - for _, object := range objects.CommonPrefixes { - if err := f(object.Prefix); err != nil { - return err - } - } - if !objects.IsTruncated { - break - } - } - return nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.getRange(ctx, b.name, name, 0, -1) -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.getRange(ctx, b.name, name, off, length) -} - -// Exists checks if the given object exists in the bucket. -func (b *Bucket) Exists(_ context.Context, name string) (bool, error) { - _, err := b.client.GetObjectMeta(b.name, name) - if err != nil { - if b.IsObjNotFoundErr(err) { - return false, nil - } - return false, errors.Wrapf(err, "getting object metadata of %s", name) - } - return true, nil -} - -func (b *Bucket) Close() error { - return nil -} - -// ObjectSize returns the size of the specified object. -func (b *Bucket) ObjectSize(_ context.Context, name string) (uint64, error) { - objMeta, err := b.client.GetObjectMeta(b.name, name) - if err != nil { - return 0, err - } - return uint64(objMeta.ContentLength), nil -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) { - objMeta, err := b.client.GetObjectMeta(b.name, name) - if err != nil { - return objstore.ObjectAttributes{}, errors.Wrapf(err, "gettting objectmeta of %s", name) - } - - lastModified, err := time.Parse(time.RFC1123, objMeta.LastModified) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: objMeta.ContentLength, - LastModified: lastModified, - }, nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - switch bosErr := errors.Cause(err).(type) { - case *bce.BceServiceError: - if bosErr.StatusCode == http.StatusNotFound || bosErr.Code == "NoSuchKey" { - return true - } - } - return false -} - -func (b *Bucket) getRange(_ context.Context, bucketName, objectKey string, off, length int64) (io.ReadCloser, error) { - if len(objectKey) == 0 { - return nil, errors.Errorf("given object name should not empty") - } - - ranges := []int64{off} - if length != -1 { - ranges = append(ranges, off+length-1) - } - - obj, err := b.client.GetObject(bucketName, objectKey, map[string]string{}, ranges...) - if err != nil { - return nil, err - } - - return obj.Body, nil -} - -func configFromEnv() Config { - c := Config{ - Bucket: os.Getenv("BOS_BUCKET"), - Endpoint: os.Getenv("BOS_ENDPOINT"), - AccessKey: os.Getenv("BOS_ACCESS_KEY"), - SecretKey: os.Getenv("BOS_SECRET_KEY"), - } - return c -} - -// NewTestBucket creates test bkt client that before returning creates temporary bucket. -// In a close function it empties and deletes the bucket. -func NewTestBucket(t testing.TB) (objstore.Bucket, func(), error) { - c := configFromEnv() - if err := validateForTest(c); err != nil { - return nil, nil, err - } - - if c.Bucket != "" { - if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" { - return nil, nil, errors.New("BOS_BUCKET is defined. Normally this tests will create temporary bucket " + - "and delete it after test. Unset BOS_BUCKET env variable to use default logic. If you really want to run " + - "tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " + - "needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " + - "to safety (accidentally pointing prod bucket for test) as well as BOS not being fully strong consistent.") - } - - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - if err := b.Iter(context.Background(), "", func(f string) error { - return errors.Errorf("bucket %s is not empty", c.Bucket) - }); err != nil { - return nil, nil, errors.Wrapf(err, "checking bucket %s", c.Bucket) - } - - t.Log("WARNING. Reusing", c.Bucket, "BOS bucket for BOS tests. Manual cleanup afterwards is required") - return b, func() {}, nil - } - - src := rand.NewSource(time.Now().UnixNano()) - tmpBucketName := strings.Replace(fmt.Sprintf("test_%x", src.Int63()), "_", "-", -1) - - if len(tmpBucketName) >= 31 { - tmpBucketName = tmpBucketName[:31] - } - - c.Bucket = tmpBucketName - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - if _, err := b.client.PutBucket(b.name); err != nil { - return nil, nil, err - } - - t.Log("created temporary BOS bucket for BOS tests with name", tmpBucketName) - return b, func() { - objstore.EmptyBucket(t, context.Background(), b) - if err := b.client.DeleteBucket(b.name); err != nil { - t.Logf("deleting bucket %s failed: %s", tmpBucketName, err) - } - }, nil -} - -func validateForTest(conf Config) error { - if conf.Endpoint == "" || - conf.AccessKey == "" || - conf.SecretKey == "" { - return errors.New("insufficient BOS configuration information") - } - return nil -} diff --git a/pkg/objstore/client/factory.go b/pkg/objstore/client/factory.go deleted file mode 100644 index f066bde362..0000000000 --- a/pkg/objstore/client/factory.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package client - -import ( - "context" - "fmt" - "strings" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - yaml "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/azure" - "github.com/thanos-io/thanos/pkg/objstore/bos" - "github.com/thanos-io/thanos/pkg/objstore/cos" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" - "github.com/thanos-io/thanos/pkg/objstore/gcs" - "github.com/thanos-io/thanos/pkg/objstore/oss" - "github.com/thanos-io/thanos/pkg/objstore/s3" - "github.com/thanos-io/thanos/pkg/objstore/swift" -) - -type ObjProvider string - -const ( - FILESYSTEM ObjProvider = "FILESYSTEM" - GCS ObjProvider = "GCS" - S3 ObjProvider = "S3" - AZURE ObjProvider = "AZURE" - SWIFT ObjProvider = "SWIFT" - COS ObjProvider = "COS" - ALIYUNOSS ObjProvider = "ALIYUNOSS" - BOS ObjProvider = "BOS" -) - -type BucketConfig struct { - Type ObjProvider `yaml:"type"` - Config interface{} `yaml:"config"` -} - -// NewBucket initializes and returns new object storage clients. -// NOTE: confContentYaml can contain secrets. -func NewBucket(logger log.Logger, confContentYaml []byte, reg prometheus.Registerer, component string) (objstore.InstrumentedBucket, error) { - level.Info(logger).Log("msg", "loading bucket configuration") - bucketConf := &BucketConfig{} - if err := yaml.UnmarshalStrict(confContentYaml, bucketConf); err != nil { - return nil, errors.Wrap(err, "parsing config YAML file") - } - - config, err := yaml.Marshal(bucketConf.Config) - if err != nil { - return nil, errors.Wrap(err, "marshal content of bucket configuration") - } - - var bucket objstore.Bucket - switch strings.ToUpper(string(bucketConf.Type)) { - case string(GCS): - bucket, err = gcs.NewBucket(context.Background(), logger, config, component) - case string(S3): - bucket, err = s3.NewBucket(logger, config, component) - case string(AZURE): - bucket, err = azure.NewBucket(logger, config, component) - case string(SWIFT): - bucket, err = swift.NewContainer(logger, config) - case string(COS): - bucket, err = cos.NewBucket(logger, config, component) - case string(ALIYUNOSS): - bucket, err = oss.NewBucket(logger, config, component) - case string(FILESYSTEM): - bucket, err = filesystem.NewBucketFromConfig(config) - case string(BOS): - bucket, err = bos.NewBucket(logger, config, component) - default: - return nil, errors.Errorf("bucket with type %s is not supported", bucketConf.Type) - } - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("create %s client", bucketConf.Type)) - } - return objstore.NewTracingBucket(objstore.BucketWithMetrics(bucket.Name(), bucket, reg)), nil -} diff --git a/pkg/objstore/client/testconf/blank-gcs.conf.yml b/pkg/objstore/client/testconf/blank-gcs.conf.yml deleted file mode 100644 index cb5ef588cc..0000000000 --- a/pkg/objstore/client/testconf/blank-gcs.conf.yml +++ /dev/null @@ -1 +0,0 @@ -type: GCS \ No newline at end of file diff --git a/pkg/objstore/client/testconf/fake-gcs.conf.yml b/pkg/objstore/client/testconf/fake-gcs.conf.yml deleted file mode 100644 index 538c832788..0000000000 --- a/pkg/objstore/client/testconf/fake-gcs.conf.yml +++ /dev/null @@ -1,3 +0,0 @@ -type: FAKE-GCS -config: - bucket: test-bucket \ No newline at end of file diff --git a/pkg/objstore/clientutil/parse.go b/pkg/objstore/clientutil/parse.go deleted file mode 100644 index 759c42d29c..0000000000 --- a/pkg/objstore/clientutil/parse.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package clientutil - -import ( - "net/http" - "strconv" - "time" - - "github.com/pkg/errors" -) - -// ParseContentLength returns the content length (in bytes) parsed from the Content-Length -// HTTP header in input. -func ParseContentLength(m http.Header) (int64, error) { - const name = "Content-Length" - - v, ok := m[name] - if !ok { - return 0, errors.Errorf("%s header not found", name) - } - - if len(v) == 0 { - return 0, errors.Errorf("%s header has no values", name) - } - - ret, err := strconv.ParseInt(v[0], 10, 64) - if err != nil { - return 0, errors.Wrapf(err, "convert %s", name) - } - - return ret, nil -} - -// ParseLastModified returns the timestamp parsed from the Last-Modified -// HTTP header in input. -// Passing an second parameter, named f, to specify the time format. -// If f is empty then RFC3339 will be used as default format. -func ParseLastModified(m http.Header, f string) (time.Time, error) { - const ( - name = "Last-Modified" - defaultFormat = time.RFC3339 - ) - - v, ok := m[name] - if !ok { - return time.Time{}, errors.Errorf("%s header not found", name) - } - - if len(v) == 0 { - return time.Time{}, errors.Errorf("%s header has no values", name) - } - - if f == "" { - f = defaultFormat - } - - mod, err := time.Parse(f, v[0]) - if err != nil { - return time.Time{}, errors.Wrapf(err, "parse %s", name) - } - - return mod, nil -} diff --git a/pkg/objstore/clientutil/parse_test.go b/pkg/objstore/clientutil/parse_test.go deleted file mode 100644 index e2c44d8aaa..0000000000 --- a/pkg/objstore/clientutil/parse_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package clientutil - -import ( - "net/http" - "testing" - "time" - - alioss "github.com/aliyun/aliyun-oss-go-sdk/oss" - "github.com/thanos-io/thanos/pkg/testutil" -) - -func TestParseLastModified(t *testing.T) { - location, _ := time.LoadLocation("GMT") - tests := map[string]struct { - headerValue string - expectedVal time.Time - expectedErr string - format string - }{ - "no header": { - expectedErr: "Last-Modified header not found", - }, - "empty format string to default RFC3339 format": { - headerValue: "2015-11-06T10:07:11.000Z", - expectedVal: time.Date(2015, time.November, 6, 10, 7, 11, 0, time.UTC), - format: "", - }, - "valid RFC3339 header value": { - headerValue: "2015-11-06T10:07:11.000Z", - expectedVal: time.Date(2015, time.November, 6, 10, 7, 11, 0, time.UTC), - format: time.RFC3339, - }, - "invalid RFC3339 header value": { - headerValue: "invalid", - expectedErr: `parse Last-Modified: parsing time "invalid" as "2006-01-02T15:04:05Z07:00": cannot parse "invalid" as "2006"`, - format: time.RFC3339, - }, - "valid RFC1123 header value": { - headerValue: "Fri, 24 Feb 2012 06:07:48 GMT", - expectedVal: time.Date(2012, time.February, 24, 6, 7, 48, 0, location), - format: time.RFC1123, - }, - "invalid RFC1123 header value": { - headerValue: "invalid", - expectedErr: `parse Last-Modified: parsing time "invalid" as "Mon, 02 Jan 2006 15:04:05 MST": cannot parse "invalid" as "Mon"`, - format: time.RFC1123, - }, - } - - for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - meta := http.Header{} - if testData.headerValue != "" { - meta.Add(alioss.HTTPHeaderLastModified, testData.headerValue) - } - - actual, err := ParseLastModified(meta, testData.format) - - if testData.expectedErr != "" { - testutil.NotOk(t, err) - testutil.Equals(t, testData.expectedErr, err.Error()) - } else { - testutil.Ok(t, err) - testutil.Assert(t, testData.expectedVal.Equal(actual)) - } - }) - } -} - -func TestParseContentLength(t *testing.T) { - tests := map[string]struct { - headerValue string - expectedVal int64 - expectedErr string - }{ - "no header": { - expectedErr: "Content-Length header not found", - }, - "invalid header value": { - headerValue: "invalid", - expectedErr: `convert Content-Length: strconv.ParseInt: parsing "invalid": invalid syntax`, - }, - "valid header value": { - headerValue: "12345", - expectedVal: 12345, - }, - } - - for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - meta := http.Header{} - if testData.headerValue != "" { - meta.Add(alioss.HTTPHeaderContentLength, testData.headerValue) - } - - actual, err := ParseContentLength(meta) - - if testData.expectedErr != "" { - testutil.NotOk(t, err) - testutil.Equals(t, testData.expectedErr, err.Error()) - } else { - testutil.Ok(t, err) - testutil.Equals(t, testData.expectedVal, actual) - } - }) - } -} diff --git a/pkg/objstore/cos/cos.go b/pkg/objstore/cos/cos.go deleted file mode 100644 index 91529db2a5..0000000000 --- a/pkg/objstore/cos/cos.go +++ /dev/null @@ -1,573 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package cos - -import ( - "context" - "fmt" - "io" - "math" - "math/rand" - "net/http" - "net/url" - "os" - "strings" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/tencentyun/cos-go-sdk-v5" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/exthttp" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/clientutil" - "github.com/thanos-io/thanos/pkg/runutil" -) - -// DirDelim is the delimiter used to model a directory structure in an object store bucket. -const dirDelim = "/" - -// Bucket implements the store.Bucket interface against cos-compatible(Tencent Object Storage) APIs. -type Bucket struct { - logger log.Logger - client *cos.Client - name string -} - -// DefaultConfig is the default config for an cos client. default tune the `MaxIdleConnsPerHost`. -var DefaultConfig = Config{ - HTTPConfig: HTTPConfig{ - IdleConnTimeout: model.Duration(90 * time.Second), - ResponseHeaderTimeout: model.Duration(2 * time.Minute), - TLSHandshakeTimeout: model.Duration(10 * time.Second), - ExpectContinueTimeout: model.Duration(1 * time.Second), - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, - MaxConnsPerHost: 0, - }, -} - -// Config encapsulates the necessary config values to instantiate an cos client. -type Config struct { - Bucket string `yaml:"bucket"` - Region string `yaml:"region"` - AppId string `yaml:"app_id"` - Endpoint string `yaml:"endpoint"` - SecretKey string `yaml:"secret_key"` - SecretId string `yaml:"secret_id"` - HTTPConfig HTTPConfig `yaml:"http_config"` -} - -// Validate checks to see if mandatory cos config options are set. -func (conf *Config) validate() error { - if conf.Endpoint != "" { - if _, err := url.Parse(conf.Endpoint); err != nil { - return errors.Wrap(err, "parse endpoint") - } - if conf.SecretId == "" || - conf.SecretKey == "" { - return errors.New("secret_id or secret_key is empty") - } - return nil - } - if conf.Bucket == "" || - conf.AppId == "" || - conf.Region == "" || - conf.SecretId == "" || - conf.SecretKey == "" { - return errors.New("insufficient cos configuration information") - } - return nil -} - -// parseConfig unmarshal a buffer into a Config with default HTTPConfig values. -func parseConfig(conf []byte) (Config, error) { - config := DefaultConfig - if err := yaml.Unmarshal(conf, &config); err != nil { - return Config{}, err - } - - return config, nil -} - -// HTTPConfig stores the http.Transport configuration for the cos client. -type HTTPConfig struct { - IdleConnTimeout model.Duration `yaml:"idle_conn_timeout"` - ResponseHeaderTimeout model.Duration `yaml:"response_header_timeout"` - TLSHandshakeTimeout model.Duration `yaml:"tls_handshake_timeout"` - ExpectContinueTimeout model.Duration `yaml:"expect_continue_timeout"` - MaxIdleConns int `yaml:"max_idle_conns"` - MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"` - MaxConnsPerHost int `yaml:"max_conns_per_host"` -} - -// DefaultTransport build http.Transport from config. -func DefaultTransport(c HTTPConfig) *http.Transport { - transport := exthttp.NewTransport() - transport.IdleConnTimeout = time.Duration(c.IdleConnTimeout) - transport.ResponseHeaderTimeout = time.Duration(c.ResponseHeaderTimeout) - transport.TLSHandshakeTimeout = time.Duration(c.TLSHandshakeTimeout) - transport.ExpectContinueTimeout = time.Duration(c.ExpectContinueTimeout) - transport.MaxIdleConns = c.MaxIdleConns - transport.MaxIdleConnsPerHost = c.MaxIdleConnsPerHost - transport.MaxConnsPerHost = c.MaxConnsPerHost - return transport -} - -// NewBucket returns a new Bucket using the provided cos configuration. -func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) { - if logger == nil { - logger = log.NewNopLogger() - } - - config, err := parseConfig(conf) - if err != nil { - return nil, errors.Wrap(err, "parsing cos configuration") - } - - return NewBucketWithConfig(logger, config, component) -} - -// NewBucketWithConfig returns a new Bucket using the provided cos config values. -func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) { - if err := config.validate(); err != nil { - return nil, errors.Wrap(err, "validate cos configuration") - } - - var bucketURL *url.URL - var err error - if config.Endpoint != "" { - bucketURL, err = url.Parse(config.Endpoint) - if err != nil { - return nil, errors.Wrap(err, "parse endpoint") - } - } else { - bucketURL = cos.NewBucketURL(fmt.Sprintf("%s-%s", config.Bucket, config.AppId), config.Region, true) - } - b := &cos.BaseURL{BucketURL: bucketURL} - client := cos.NewClient(b, &http.Client{ - Transport: &cos.AuthorizationTransport{ - SecretID: config.SecretId, - SecretKey: config.SecretKey, - Transport: DefaultTransport(config.HTTPConfig), - }, - }) - - bkt := &Bucket{ - logger: logger, - client: client, - name: config.Bucket, - } - return bkt, nil -} - -// Name returns the bucket name for COS. -func (b *Bucket) Name() string { - return b.name -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { - resp, err := b.client.Object.Head(ctx, name, nil) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - size, err := clientutil.ParseContentLength(resp.Header) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - // tencent cos return Last-Modified header in RFC1123 format. - // see api doc for details: https://intl.cloud.tencent.com/document/product/436/7729 - mod, err := clientutil.ParseLastModified(resp.Header, time.RFC1123) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: size, - LastModified: mod, - }, nil -} - -var ( - _ cos.FixedLengthReader = (*fixedLengthReader)(nil) -) - -type fixedLengthReader struct { - io.Reader - size int64 -} - -func newFixedLengthReader(r io.Reader, size int64) io.Reader { - return fixedLengthReader{ - Reader: io.LimitReader(r, size), - size: size, - } -} - -// Size implement cos.FixedLengthReader interface. -func (r fixedLengthReader) Size() int64 { - return r.size -} - -// Upload the contents of the reader as an object into the bucket. -func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { - size, err := objstore.TryToGetSize(r) - if err != nil { - return errors.Wrapf(err, "getting size of %s", name) - } - // partSize 128MB. - const partSize = 1024 * 1024 * 128 - partNums, lastSlice := int(math.Floor(float64(size)/partSize)), size%partSize - if partNums == 0 { - if _, err := b.client.Object.Put(ctx, name, r, nil); err != nil { - return errors.Wrapf(err, "Put object: %s", name) - } - return nil - } - // 1. init. - result, _, err := b.client.Object.InitiateMultipartUpload(ctx, name, nil) - if err != nil { - return errors.Wrapf(err, "InitiateMultipartUpload %s", name) - } - uploadEveryPart := func(partSize int64, part int, uploadID string) (string, error) { - r := newFixedLengthReader(r, partSize) - resp, err := b.client.Object.UploadPart(ctx, name, uploadID, part, r, &cos.ObjectUploadPartOptions{ - ContentLength: partSize, - }) - if err != nil { - if _, err := b.client.Object.AbortMultipartUpload(ctx, name, uploadID); err != nil { - return "", err - } - return "", err - } - etag := resp.Header.Get("ETag") - return etag, nil - } - optcom := &cos.CompleteMultipartUploadOptions{} - // 2. upload parts. - for part := 1; part <= partNums; part++ { - etag, err := uploadEveryPart(partSize, part, result.UploadID) - if err != nil { - return errors.Wrapf(err, "uploadPart %d, %s", part, name) - } - optcom.Parts = append(optcom.Parts, cos.Object{ - PartNumber: part, ETag: etag}, - ) - } - // 3. upload last part. - if lastSlice != 0 { - part := partNums + 1 - etag, err := uploadEveryPart(lastSlice, part, result.UploadID) - if err != nil { - return errors.Wrapf(err, "uploadPart %d, %s", part, name) - } - optcom.Parts = append(optcom.Parts, cos.Object{ - PartNumber: part, ETag: etag}, - ) - } - // 4. complete. - if _, _, err := b.client.Object.CompleteMultipartUpload(ctx, name, result.UploadID, optcom); err != nil { - return errors.Wrapf(err, "CompleteMultipartUpload %s", name) - } - return nil -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(ctx context.Context, name string) error { - if _, err := b.client.Object.Delete(ctx, name); err != nil { - return errors.Wrap(err, "delete cos object") - } - return nil -} - -// Iter calls f for each entry in the given directory (not recursive.). The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - if dir != "" { - dir = strings.TrimSuffix(dir, dirDelim) + dirDelim - } - - for object := range b.listObjects(ctx, dir, options...) { - if object.err != nil { - return object.err - } - if object.key == "" { - continue - } - if err := f(object.key); err != nil { - return err - } - } - - return nil -} - -func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("given object name should not empty") - } - - opts := &cos.ObjectGetOptions{} - if length != -1 { - if err := setRange(opts, off, off+length-1); err != nil { - return nil, err - } - } else if off > 0 { - if err := setRange(opts, off, 0); err != nil { - return nil, err - } - } - - resp, err := b.client.Object.Get(ctx, name, opts) - if err != nil { - return nil, err - } - if _, err := resp.Body.Read(nil); err != nil { - runutil.ExhaustCloseWithLogOnErr(b.logger, resp.Body, "cos get range obj close") - return nil, err - } - // Add size info into reader to pass it to Upload function. - r := objectSizerReadCloser{ReadCloser: resp.Body, size: resp.ContentLength} - return r, nil -} - -type objectSizerReadCloser struct { - io.ReadCloser - size int64 -} - -// ObjectSize implement objstore.ObjectSizer. -func (o objectSizerReadCloser) ObjectSize() (int64, error) { - return o.size, nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.getRange(ctx, name, 0, -1) -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.getRange(ctx, name, off, length) -} - -// Exists checks if the given object exists in the bucket. -func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { - if _, err := b.client.Object.Head(ctx, name, nil); err != nil { - if b.IsObjNotFoundErr(err) { - return false, nil - } - return false, errors.Wrap(err, "head cos object") - } - - return true, nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - switch tmpErr := errors.Cause(err).(type) { - case *cos.ErrorResponse: - if tmpErr.Code == "NoSuchKey" || - (tmpErr.Response != nil && tmpErr.Response.StatusCode == http.StatusNotFound) { - return true - } - return false - default: - return false - } -} - -func (b *Bucket) Close() error { return nil } - -type objectInfo struct { - key string - err error -} - -func (b *Bucket) listObjects(ctx context.Context, objectPrefix string, options ...objstore.IterOption) <-chan objectInfo { - objectsCh := make(chan objectInfo, 1) - - // If recursive iteration is enabled we should pass an empty delimiter. - delimiter := dirDelim - if objstore.ApplyIterOptions(options...).Recursive { - delimiter = "" - } - - go func(objectsCh chan<- objectInfo) { - defer close(objectsCh) - var marker string - for { - result, _, err := b.client.Bucket.Get(ctx, &cos.BucketGetOptions{ - Prefix: objectPrefix, - MaxKeys: 1000, - Marker: marker, - Delimiter: delimiter, - }) - if err != nil { - select { - case objectsCh <- objectInfo{ - err: err, - }: - case <-ctx.Done(): - } - return - } - - for _, object := range result.Contents { - select { - case objectsCh <- objectInfo{ - key: object.Key, - }: - case <-ctx.Done(): - return - } - } - - // The result of CommonPrefixes contains the objects - // that have the same keys between Prefix and the key specified by delimiter. - for _, obj := range result.CommonPrefixes { - select { - case objectsCh <- objectInfo{ - key: obj, - }: - case <-ctx.Done(): - return - } - } - - if !result.IsTruncated { - return - } - - marker = result.NextMarker - } - }(objectsCh) - return objectsCh -} - -func setRange(opts *cos.ObjectGetOptions, start, end int64) error { - if start == 0 && end < 0 { - opts.Range = fmt.Sprintf("bytes=%d", end) - } else if 0 < start && end == 0 { - opts.Range = fmt.Sprintf("bytes=%d-", start) - } else if 0 <= start && start <= end { - opts.Range = fmt.Sprintf("bytes=%d-%d", start, end) - } else { - return errors.Errorf("Invalid range specified: start=%d end=%d", start, end) - } - return nil -} - -func configFromEnv() Config { - c := Config{ - Bucket: os.Getenv("COS_BUCKET"), - AppId: os.Getenv("COS_APP_ID"), - Region: os.Getenv("COS_REGION"), - Endpoint: os.Getenv("COS_ENDPOINT"), - SecretId: os.Getenv("COS_SECRET_ID"), - SecretKey: os.Getenv("COS_SECRET_KEY"), - } - - return c -} - -// NewTestBucket creates test bkt client that before returning creates temporary bucket. -// In a close function it empties and deletes the bucket. -func NewTestBucket(t testing.TB) (objstore.Bucket, func(), error) { - c := configFromEnv() - if err := validateForTest(c); err != nil { - return nil, nil, err - } - - if c.Bucket != "" { - if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" { - return nil, nil, errors.New("COS_BUCKET is defined. Normally this tests will create temporary bucket " + - "and delete it after test. Unset COS_BUCKET env variable to use default logic. If you really want to run " + - "tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " + - "needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " + - "to safety (accidentally pointing prod bucket for test) as well as COS not being fully strong consistent.") - } - - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - if err := b.Iter(context.Background(), "", func(f string) error { - return errors.Errorf("bucket %s is not empty", c.Bucket) - }); err != nil { - return nil, nil, errors.Wrapf(err, "cos check bucket %s", c.Bucket) - } - - t.Log("WARNING. Reusing", c.Bucket, "COS bucket for COS tests. Manual cleanup afterwards is required") - return b, func() {}, nil - } - c.Bucket = createTemporaryTestBucketName(t) - - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - if _, err := b.client.Bucket.Put(context.Background(), nil); err != nil { - return nil, nil, err - } - t.Log("created temporary COS bucket for COS tests with name", c.Bucket) - - return b, func() { - objstore.EmptyBucket(t, context.Background(), b) - if _, err := b.client.Bucket.Delete(context.Background()); err != nil { - t.Logf("deleting bucket %s failed: %s", c.Bucket, err) - } - }, nil -} - -func validateForTest(conf Config) error { - if conf.Endpoint != "" { - if _, err := url.Parse(conf.Endpoint); err != nil { - return errors.Wrap(err, "parse endpoint") - } - if conf.SecretId == "" || - conf.SecretKey == "" { - return errors.New("secret_id or secret_key is empty") - } - return nil - } - if conf.AppId == "" || - conf.Region == "" || - conf.SecretId == "" || - conf.SecretKey == "" { - return errors.New("insufficient cos configuration information") - } - return nil -} - -// createTemporaryTestBucketName create a temp cos bucket for test. -// Bucket Naming Conventions: https://intl.cloud.tencent.com/document/product/436/13312#overview -func createTemporaryTestBucketName(t testing.TB) string { - src := rand.New(rand.NewSource(time.Now().UnixNano())) - name := fmt.Sprintf("test_%x_%s", src.Int31(), strings.ToLower(t.Name())) - name = strings.NewReplacer("_", "-", "/", "-").Replace(name) - const maxLength = 50 - if len(name) >= maxLength { - name = name[:maxLength] - } - return strings.TrimSuffix(name, "-") -} diff --git a/pkg/objstore/cos/cos_test.go b/pkg/objstore/cos/cos_test.go deleted file mode 100644 index 12f175de1a..0000000000 --- a/pkg/objstore/cos/cos_test.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package cos - -import ( - "testing" - "time" - - "github.com/prometheus/common/model" - "github.com/thanos-io/thanos/pkg/testutil" -) - -func Test_parseConfig(t *testing.T) { - type args struct { - conf []byte - } - tests := []struct { - name string - args args - want Config - wantErr bool - }{ - { - name: "empty", - args: args{ - conf: []byte(""), - }, - want: DefaultConfig, - wantErr: false, - }, - { - name: "max_idle_conns", - args: args{ - conf: []byte(` -http_config: - max_idle_conns: 200 -`), - }, - want: Config{ - HTTPConfig: HTTPConfig{ - IdleConnTimeout: model.Duration(90 * time.Second), - ResponseHeaderTimeout: model.Duration(2 * time.Minute), - TLSHandshakeTimeout: model.Duration(10 * time.Second), - ExpectContinueTimeout: model.Duration(1 * time.Second), - MaxIdleConns: 200, - MaxIdleConnsPerHost: 100, - MaxConnsPerHost: 0, - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := parseConfig(tt.args.conf) - if (err != nil) != tt.wantErr { - t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) - return - } - testutil.Equals(t, tt.want, got) - }) - } -} - -func TestConfig_validate(t *testing.T) { - type fields struct { - Bucket string - Region string - AppId string - Endpoint string - SecretKey string - SecretId string - HTTPConfig HTTPConfig - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "ok endpoint", - fields: fields{ - Endpoint: "http://bucket-123.cos.ap-beijing.myqcloud.com", - SecretId: "sid", - SecretKey: "skey", - }, - wantErr: false, - }, - { - name: "ok bucket-appid-region", - fields: fields{ - Bucket: "bucket", - AppId: "123", - Region: "ap-beijing", - SecretId: "sid", - SecretKey: "skey", - }, - wantErr: false, - }, - { - name: "missing skey", - fields: fields{ - Bucket: "bucket", - AppId: "123", - Region: "ap-beijing", - }, - wantErr: true, - }, - { - name: "missing bucket", - fields: fields{ - AppId: "123", - Region: "ap-beijing", - SecretId: "sid", - SecretKey: "skey", - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - conf := &Config{ - Bucket: tt.fields.Bucket, - Region: tt.fields.Region, - AppId: tt.fields.AppId, - Endpoint: tt.fields.Endpoint, - SecretKey: tt.fields.SecretKey, - SecretId: tt.fields.SecretId, - HTTPConfig: tt.fields.HTTPConfig, - } - if err := conf.validate(); (err != nil) != tt.wantErr { - t.Errorf("validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/pkg/objstore/filesystem/filesystem.go b/pkg/objstore/filesystem/filesystem.go deleted file mode 100644 index 075a87a3cb..0000000000 --- a/pkg/objstore/filesystem/filesystem.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package filesystem - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/pkg/errors" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/runutil" -) - -// Config stores the configuration for storing and accessing blobs in filesystem. -type Config struct { - Directory string `yaml:"directory"` -} - -// Bucket implements the objstore.Bucket interfaces against filesystem that binary runs on. -// Methods from Bucket interface are thread-safe. Objects are assumed to be immutable. -// NOTE: It does not follow symbolic links. -type Bucket struct { - rootDir string -} - -// NewBucketFromConfig returns a new filesystem.Bucket from config. -func NewBucketFromConfig(conf []byte) (*Bucket, error) { - var c Config - if err := yaml.Unmarshal(conf, &c); err != nil { - return nil, err - } - if c.Directory == "" { - return nil, errors.New("missing directory for filesystem bucket") - } - return NewBucket(c.Directory) -} - -// NewBucket returns a new filesystem.Bucket. -func NewBucket(rootDir string) (*Bucket, error) { - absDir, err := filepath.Abs(rootDir) - if err != nil { - return nil, err - } - return &Bucket{rootDir: absDir}, nil -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - params := objstore.ApplyIterOptions(options...) - absDir := filepath.Join(b.rootDir, dir) - info, err := os.Stat(absDir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return errors.Wrapf(err, "stat %s", absDir) - } - if !info.IsDir() { - return nil - } - - files, err := ioutil.ReadDir(absDir) - if err != nil { - return err - } - for _, file := range files { - name := filepath.Join(dir, file.Name()) - - if file.IsDir() { - empty, err := isDirEmpty(filepath.Join(absDir, file.Name())) - if err != nil { - return err - } - - if empty { - // Skip empty directories. - continue - } - - name += objstore.DirDelim - - if params.Recursive { - // Recursively list files in the subdirectory. - if err := b.Iter(ctx, name, f, options...); err != nil { - return err - } - - // The callback f() has already been called for the subdirectory - // files so we should skip to next filesystem entry. - continue - } - } - if err := f(name); err != nil { - return err - } - } - return nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.GetRange(ctx, name, 0, -1) -} - -type rangeReaderCloser struct { - io.Reader - f *os.File -} - -func (r *rangeReaderCloser) Close() error { - return r.f.Close() -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) { - file := filepath.Join(b.rootDir, name) - stat, err := os.Stat(file) - if err != nil { - return objstore.ObjectAttributes{}, errors.Wrapf(err, "stat %s", file) - } - - return objstore.ObjectAttributes{ - Size: stat.Size(), - LastModified: stat.ModTime(), - }, nil -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(_ context.Context, name string, off, length int64) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("object name is empty") - } - - file := filepath.Join(b.rootDir, name) - if _, err := os.Stat(file); err != nil { - return nil, errors.Wrapf(err, "stat %s", file) - } - - f, err := os.OpenFile(filepath.Clean(file), os.O_RDONLY, 0600) - if err != nil { - return nil, err - } - - if off > 0 { - _, err := f.Seek(off, 0) - if err != nil { - return nil, errors.Wrapf(err, "seek %v", off) - } - } - - if length == -1 { - return f, nil - } - - return &rangeReaderCloser{Reader: io.LimitReader(f, length), f: f}, nil -} - -// Exists checks if the given directory exists in memory. -func (b *Bucket) Exists(_ context.Context, name string) (bool, error) { - info, err := os.Stat(filepath.Join(b.rootDir, name)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, errors.Wrapf(err, "stat %s", filepath.Join(b.rootDir, name)) - } - return !info.IsDir(), nil -} - -// Upload writes the file specified in src to into the memory. -func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) (err error) { - file := filepath.Join(b.rootDir, name) - if err := os.MkdirAll(filepath.Dir(file), os.ModePerm); err != nil { - return err - } - - f, err := os.Create(file) - if err != nil { - return err - } - defer runutil.CloseWithErrCapture(&err, f, "close") - - if _, err := io.Copy(f, r); err != nil { - return errors.Wrapf(err, "copy to %s", file) - } - return nil -} - -func isDirEmpty(name string) (ok bool, err error) { - f, err := os.Open(filepath.Clean(name)) - if os.IsNotExist(err) { - // The directory doesn't exist. We don't consider it an error and we treat it like empty. - return true, nil - } - if err != nil { - return false, err - } - defer runutil.CloseWithErrCapture(&err, f, "dir open") - - if _, err = f.Readdir(1); err == io.EOF || os.IsNotExist(err) { - return true, nil - } - return false, err -} - -// Delete removes all data prefixed with the dir. -func (b *Bucket) Delete(_ context.Context, name string) error { - file := filepath.Join(b.rootDir, name) - for file != b.rootDir { - if err := os.RemoveAll(file); err != nil { - return errors.Wrapf(err, "rm %s", file) - } - file = filepath.Dir(file) - empty, err := isDirEmpty(file) - if err != nil { - return err - } - if !empty { - break - } - } - return nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - return os.IsNotExist(errors.Cause(err)) -} - -func (b *Bucket) Close() error { return nil } - -// Name returns the bucket name. -func (b *Bucket) Name() string { - return fmt.Sprintf("fs: %s", b.rootDir) -} diff --git a/pkg/objstore/filesystem/filesystem_test.go b/pkg/objstore/filesystem/filesystem_test.go deleted file mode 100644 index 7dcf3e3ea5..0000000000 --- a/pkg/objstore/filesystem/filesystem_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package filesystem - -import ( - "context" - "strings" - "sync" - "testing" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func TestDelete_EmptyDirDeletionRaceCondition(t *testing.T) { - const runs = 1000 - - ctx := context.Background() - - for r := 0; r < runs; r++ { - b, err := NewBucket(t.TempDir()) - testutil.Ok(t, err) - - // Upload 2 objects in a subfolder. - testutil.Ok(t, b.Upload(ctx, "subfolder/first", strings.NewReader("first"))) - testutil.Ok(t, b.Upload(ctx, "subfolder/second", strings.NewReader("second"))) - - // Prepare goroutines to concurrently delete the 2 objects (each one deletes a different object) - start := make(chan struct{}) - group := sync.WaitGroup{} - group.Add(2) - - for _, object := range []string{"first", "second"} { - go func(object string) { - defer group.Done() - - <-start - testutil.Ok(t, b.Delete(ctx, "subfolder/"+object)) - }(object) - } - - // Go! - close(start) - group.Wait() - } -} diff --git a/pkg/objstore/gcs/gcs.go b/pkg/objstore/gcs/gcs.go deleted file mode 100644 index ce93f42c0c..0000000000 --- a/pkg/objstore/gcs/gcs.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -// Package gcs implements common object storage abstractions against Google Cloud Storage. -package gcs - -import ( - "context" - "fmt" - "io" - "runtime" - "strings" - "testing" - - "cloud.google.com/go/storage" - "github.com/go-kit/log" - "github.com/pkg/errors" - "github.com/prometheus/common/version" - "golang.org/x/oauth2/google" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" -) - -// DirDelim is the delimiter used to model a directory structure in an object store bucket. -const DirDelim = "/" - -// Config stores the configuration for gcs bucket. -type Config struct { - Bucket string `yaml:"bucket"` - ServiceAccount string `yaml:"service_account"` -} - -// Bucket implements the store.Bucket and shipper.Bucket interfaces against GCS. -type Bucket struct { - logger log.Logger - bkt *storage.BucketHandle - name string - - closer io.Closer -} - -// NewBucket returns a new Bucket against the given bucket handle. -func NewBucket(ctx context.Context, logger log.Logger, conf []byte, component string) (*Bucket, error) { - var gc Config - if err := yaml.Unmarshal(conf, &gc); err != nil { - return nil, err - } - - return NewBucketWithConfig(ctx, logger, gc, component) -} - -// NewBucketWithConfig returns a new Bucket with gcs Config struct. -func NewBucketWithConfig(ctx context.Context, logger log.Logger, gc Config, component string) (*Bucket, error) { - if gc.Bucket == "" { - return nil, errors.New("missing Google Cloud Storage bucket name for stored blocks") - } - - var opts []option.ClientOption - - // If ServiceAccount is provided, use them in GCS client, otherwise fallback to Google default logic. - if gc.ServiceAccount != "" { - credentials, err := google.CredentialsFromJSON(ctx, []byte(gc.ServiceAccount), storage.ScopeFullControl) - if err != nil { - return nil, errors.Wrap(err, "failed to create credentials from JSON") - } - opts = append(opts, option.WithCredentials(credentials)) - } - - opts = append(opts, - option.WithUserAgent(fmt.Sprintf("thanos-%s/%s (%s)", component, version.Version, runtime.Version())), - ) - - gcsClient, err := storage.NewClient(ctx, opts...) - if err != nil { - return nil, err - } - bkt := &Bucket{ - logger: logger, - bkt: gcsClient.Bucket(gc.Bucket), - closer: gcsClient, - name: gc.Bucket, - } - return bkt, nil -} - -// Name returns the bucket name for gcs. -func (b *Bucket) Name() string { - return b.name -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - // Ensure the object name actually ends with a dir suffix. Otherwise we'll just iterate the - // object itself as one prefix item. - if dir != "" { - dir = strings.TrimSuffix(dir, DirDelim) + DirDelim - } - - // If recursive iteration is enabled we should pass an empty delimiter. - delimiter := DirDelim - if objstore.ApplyIterOptions(options...).Recursive { - delimiter = "" - } - - it := b.bkt.Objects(ctx, &storage.Query{ - Prefix: dir, - Delimiter: delimiter, - }) - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - attrs, err := it.Next() - if err == iterator.Done { - return nil - } - if err != nil { - return err - } - if err := f(attrs.Prefix + attrs.Name); err != nil { - return err - } - } -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.bkt.Object(name).NewReader(ctx) -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.bkt.Object(name).NewRangeReader(ctx, off, length) -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { - attrs, err := b.bkt.Object(name).Attrs(ctx) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: attrs.Size, - LastModified: attrs.Updated, - }, nil -} - -// Handle returns the underlying GCS bucket handle. -// Used for testing purposes (we return handle, so it is not instrumented). -func (b *Bucket) Handle() *storage.BucketHandle { - return b.bkt -} - -// Exists checks if the given object exists. -func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { - if _, err := b.bkt.Object(name).Attrs(ctx); err == nil { - return true, nil - } else if err != storage.ErrObjectNotExist { - return false, err - } - return false, nil -} - -// Upload writes the file specified in src to remote GCS location specified as target. -func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { - w := b.bkt.Object(name).NewWriter(ctx) - - if _, err := io.Copy(w, r); err != nil { - return err - } - return w.Close() -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(ctx context.Context, name string) error { - return b.bkt.Object(name).Delete(ctx) -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - return errors.Is(err, storage.ErrObjectNotExist) -} - -func (b *Bucket) Close() error { - return b.closer.Close() -} - -// NewTestBucket creates test bkt client that before returning creates temporary bucket. -// In a close function it empties and deletes the bucket. -func NewTestBucket(t testing.TB, project string) (objstore.Bucket, func(), error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - gTestConfig := Config{ - Bucket: objstore.CreateTemporaryTestBucketName(t), - } - - bc, err := yaml.Marshal(gTestConfig) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(ctx, log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - if err = b.bkt.Create(ctx, project, nil); err != nil { - _ = b.Close() - return nil, nil, err - } - - t.Log("created temporary GCS bucket for GCS tests with name", b.name, "in project", project) - return b, func() { - objstore.EmptyBucket(t, ctx, b) - if err := b.bkt.Delete(ctx); err != nil { - t.Logf("deleting bucket failed: %s", err) - } - if err := b.Close(); err != nil { - t.Logf("closing bucket failed: %s", err) - } - }, nil -} diff --git a/pkg/objstore/gcs/gcs_test.go b/pkg/objstore/gcs/gcs_test.go deleted file mode 100644 index 417c1fe4b1..0000000000 --- a/pkg/objstore/gcs/gcs_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package gcs - -import ( - "context" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/go-kit/log" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func TestBucket_Get_ShouldReturnErrorIfServerTruncateResponse(t *testing.T) { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") - w.Header().Set("Content-Length", "100") - - // Write less bytes than the content length. - _, err := w.Write([]byte("12345")) - testutil.Ok(t, err) - })) - defer srv.Close() - - os.Setenv("STORAGE_EMULATOR_HOST", srv.Listener.Addr().String()) - - cfg := Config{ - Bucket: "test-bucket", - ServiceAccount: "", - } - - bkt, err := NewBucketWithConfig(context.Background(), log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - reader, err := bkt.Get(context.Background(), "test") - testutil.Ok(t, err) - - // We expect an error when reading back. - _, err = ioutil.ReadAll(reader) - testutil.Equals(t, io.ErrUnexpectedEOF, err) -} diff --git a/pkg/objstore/inmem.go b/pkg/objstore/inmem.go deleted file mode 100644 index f90ed6d90c..0000000000 --- a/pkg/objstore/inmem.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "bytes" - "context" - "io" - "io/ioutil" - "sort" - "strings" - "sync" - "time" - - "github.com/pkg/errors" -) - -var errNotFound = errors.New("inmem: object not found") - -// InMemBucket implements the objstore.Bucket interfaces against local memory. -// Methods from Bucket interface are thread-safe. Objects are assumed to be immutable. -type InMemBucket struct { - mtx sync.RWMutex - objects map[string][]byte - attrs map[string]ObjectAttributes -} - -// NewInMemBucket returns a new in memory Bucket. -// NOTE: Returned bucket is just a naive in memory bucket implementation. For test use cases only. -func NewInMemBucket() *InMemBucket { - return &InMemBucket{ - objects: map[string][]byte{}, - attrs: map[string]ObjectAttributes{}, - } -} - -// Objects returns internally stored objects. -// NOTE: For assert purposes. -func (b *InMemBucket) Objects() map[string][]byte { - return b.objects -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *InMemBucket) Iter(_ context.Context, dir string, f func(string) error, options ...IterOption) error { - unique := map[string]struct{}{} - params := ApplyIterOptions(options...) - - var dirPartsCount int - dirParts := strings.SplitAfter(dir, DirDelim) - for _, p := range dirParts { - if p == "" { - continue - } - dirPartsCount++ - } - - b.mtx.RLock() - for filename := range b.objects { - if !strings.HasPrefix(filename, dir) || dir == filename { - continue - } - - if params.Recursive { - // Any object matching the prefix should be included. - unique[filename] = struct{}{} - continue - } - - parts := strings.SplitAfter(filename, DirDelim) - unique[strings.Join(parts[:dirPartsCount+1], "")] = struct{}{} - } - b.mtx.RUnlock() - - var keys []string - for n := range unique { - keys = append(keys, n) - } - sort.Slice(keys, func(i, j int) bool { - if strings.HasSuffix(keys[i], DirDelim) && strings.HasSuffix(keys[j], DirDelim) { - return strings.Compare(keys[i], keys[j]) < 0 - } - if strings.HasSuffix(keys[i], DirDelim) { - return false - } - if strings.HasSuffix(keys[j], DirDelim) { - return true - } - - return strings.Compare(keys[i], keys[j]) < 0 - }) - - for _, k := range keys { - if err := f(k); err != nil { - return err - } - } - return nil -} - -// Get returns a reader for the given object name. -func (b *InMemBucket) Get(_ context.Context, name string) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("inmem: object name is empty") - } - - b.mtx.RLock() - file, ok := b.objects[name] - b.mtx.RUnlock() - if !ok { - return nil, errNotFound - } - - return ioutil.NopCloser(bytes.NewReader(file)), nil -} - -// GetRange returns a new range reader for the given object name and range. -func (b *InMemBucket) GetRange(_ context.Context, name string, off, length int64) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("inmem: object name is empty") - } - - b.mtx.RLock() - file, ok := b.objects[name] - b.mtx.RUnlock() - if !ok { - return nil, errNotFound - } - - if int64(len(file)) < off { - return ioutil.NopCloser(bytes.NewReader(nil)), nil - } - - if length == -1 { - return ioutil.NopCloser(bytes.NewReader(file[off:])), nil - } - - if length <= 0 { - return ioutil.NopCloser(bytes.NewReader(nil)), errors.New("length cannot be smaller or equal 0") - } - - if int64(len(file)) <= off+length { - // Just return maximum of what we have. - length = int64(len(file)) - off - } - - return ioutil.NopCloser(bytes.NewReader(file[off : off+length])), nil -} - -// Exists checks if the given directory exists in memory. -func (b *InMemBucket) Exists(_ context.Context, name string) (bool, error) { - b.mtx.RLock() - defer b.mtx.RUnlock() - _, ok := b.objects[name] - return ok, nil -} - -// Attributes returns information about the specified object. -func (b *InMemBucket) Attributes(_ context.Context, name string) (ObjectAttributes, error) { - b.mtx.RLock() - attrs, ok := b.attrs[name] - b.mtx.RUnlock() - if !ok { - return ObjectAttributes{}, errNotFound - } - return attrs, nil -} - -// Upload writes the file specified in src to into the memory. -func (b *InMemBucket) Upload(_ context.Context, name string, r io.Reader) error { - b.mtx.Lock() - defer b.mtx.Unlock() - body, err := ioutil.ReadAll(r) - if err != nil { - return err - } - b.objects[name] = body - b.attrs[name] = ObjectAttributes{ - Size: int64(len(body)), - LastModified: time.Now(), - } - return nil -} - -// Delete removes all data prefixed with the dir. -func (b *InMemBucket) Delete(_ context.Context, name string) error { - b.mtx.Lock() - defer b.mtx.Unlock() - if _, ok := b.objects[name]; !ok { - return errNotFound - } - delete(b.objects, name) - delete(b.attrs, name) - return nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *InMemBucket) IsObjNotFoundErr(err error) bool { - return errors.Is(err, errNotFound) -} - -func (b *InMemBucket) Close() error { return nil } - -// Name returns the bucket name. -func (b *InMemBucket) Name() string { - return "inmem" -} diff --git a/pkg/objstore/objstore.go b/pkg/objstore/objstore.go deleted file mode 100644 index 5089e492e0..0000000000 --- a/pkg/objstore/objstore.go +++ /dev/null @@ -1,550 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "bytes" - "context" - "io" - "os" - "path/filepath" - "strings" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/thanos-io/thanos/pkg/runutil" -) - -const ( - OpIter = "iter" - OpGet = "get" - OpGetRange = "get_range" - OpExists = "exists" - OpUpload = "upload" - OpDelete = "delete" - OpAttributes = "attributes" -) - -// Bucket provides read and write access to an object storage bucket. -// NOTE: We assume strong consistency for write-read flow. -type Bucket interface { - io.Closer - BucketReader - - // Upload the contents of the reader as an object into the bucket. - // Upload should be idempotent. - Upload(ctx context.Context, name string, r io.Reader) error - - // Delete removes the object with the given name. - // If object does not exists in the moment of deletion, Delete should throw error. - Delete(ctx context.Context, name string) error - - // Name returns the bucket name for the provider. - Name() string -} - -// InstrumentedBucket is a Bucket with optional instrumentation control on reader. -type InstrumentedBucket interface { - Bucket - - // WithExpectedErrs allows to specify a filter that marks certain errors as expected, so it will not increment - // thanos_objstore_bucket_operation_failures_total metric. - WithExpectedErrs(IsOpFailureExpectedFunc) Bucket - - // ReaderWithExpectedErrs allows to specify a filter that marks certain errors as expected, so it will not increment - // thanos_objstore_bucket_operation_failures_total metric. - // TODO(bwplotka): Remove this when moved to Go 1.14 and replace with InstrumentedBucketReader. - ReaderWithExpectedErrs(IsOpFailureExpectedFunc) BucketReader -} - -// BucketReader provides read access to an object storage bucket. -type BucketReader interface { - // Iter calls f for each entry in the given directory (not recursive.). The argument to f is the full - // object name including the prefix of the inspected directory. - // Entries are passed to function in sorted order. - Iter(ctx context.Context, dir string, f func(string) error, options ...IterOption) error - - // Get returns a reader for the given object name. - Get(ctx context.Context, name string) (io.ReadCloser, error) - - // GetRange returns a new range reader for the given object name and range. - GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) - - // Exists checks if the given object exists in the bucket. - Exists(ctx context.Context, name string) (bool, error) - - // IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. - IsObjNotFoundErr(err error) bool - - // Attributes returns information about the specified object. - Attributes(ctx context.Context, name string) (ObjectAttributes, error) -} - -// InstrumentedBucket is a BucketReader with optional instrumentation control. -type InstrumentedBucketReader interface { - BucketReader - - // ReaderWithExpectedErrs allows to specify a filter that marks certain errors as expected, so it will not increment - // thanos_objstore_bucket_operation_failures_total metric. - ReaderWithExpectedErrs(IsOpFailureExpectedFunc) BucketReader -} - -// IterOption configures the provided params. -type IterOption func(params *IterParams) - -// WithRecursiveIter is an option that can be applied to Iter() to recursively list objects -// in the bucket. -func WithRecursiveIter(params *IterParams) { - params.Recursive = true -} - -// IterParams holds the Iter() parameters and is used by objstore clients implementations. -type IterParams struct { - Recursive bool -} - -func ApplyIterOptions(options ...IterOption) IterParams { - out := IterParams{} - for _, opt := range options { - opt(&out) - } - return out -} - -type ObjectAttributes struct { - // Size is the object size in bytes. - Size int64 `json:"size"` - - // LastModified is the timestamp the object was last modified. - LastModified time.Time `json:"last_modified"` -} - -// TryToGetSize tries to get upfront size from reader. -// Some implementations may return only size of unread data in the reader, so it's best to call this method before -// doing any reading. -// -// TODO(https://github.com/thanos-io/thanos/issues/678): Remove guessing length when minio provider will support multipart upload without this. -func TryToGetSize(r io.Reader) (int64, error) { - switch f := r.(type) { - case *os.File: - fileInfo, err := f.Stat() - if err != nil { - return 0, errors.Wrap(err, "os.File.Stat()") - } - return fileInfo.Size(), nil - case *bytes.Buffer: - return int64(f.Len()), nil - case *bytes.Reader: - // Returns length of unread data only. - return int64(f.Len()), nil - case *strings.Reader: - return f.Size(), nil - case ObjectSizer: - return f.ObjectSize() - } - return 0, errors.Errorf("unsupported type of io.Reader: %T", r) -} - -// ObjectSizer can return size of object. -type ObjectSizer interface { - // ObjectSize returns the size of the object in bytes, or error if it is not available. - ObjectSize() (int64, error) -} - -type nopCloserWithObjectSize struct{ io.Reader } - -func (nopCloserWithObjectSize) Close() error { return nil } -func (n nopCloserWithObjectSize) ObjectSize() (int64, error) { return TryToGetSize(n.Reader) } - -// NopCloserWithSize returns a ReadCloser with a no-op Close method wrapping -// the provided Reader r. Returned ReadCloser also implements Size method. -func NopCloserWithSize(r io.Reader) io.ReadCloser { - return nopCloserWithObjectSize{r} -} - -// UploadDir uploads all files in srcdir to the bucket with into a top-level directory -// named dstdir. It is a caller responsibility to clean partial upload in case of failure. -func UploadDir(ctx context.Context, logger log.Logger, bkt Bucket, srcdir, dstdir string) error { - df, err := os.Stat(srcdir) - if err != nil { - return errors.Wrap(err, "stat dir") - } - if !df.IsDir() { - return errors.Errorf("%s is not a directory", srcdir) - } - return filepath.Walk(srcdir, func(src string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - if fi.IsDir() { - return nil - } - dst := filepath.Join(dstdir, strings.TrimPrefix(src, srcdir)) - - return UploadFile(ctx, logger, bkt, src, dst) - }) -} - -// UploadFile uploads the file with the given name to the bucket. -// It is a caller responsibility to clean partial upload in case of failure. -func UploadFile(ctx context.Context, logger log.Logger, bkt Bucket, src, dst string) error { - r, err := os.Open(filepath.Clean(src)) - if err != nil { - return errors.Wrapf(err, "open file %s", src) - } - defer runutil.CloseWithLogOnErr(logger, r, "close file %s", src) - - if err := bkt.Upload(ctx, dst, r); err != nil { - return errors.Wrapf(err, "upload file %s as %s", src, dst) - } - level.Debug(logger).Log("msg", "uploaded file", "from", src, "dst", dst, "bucket", bkt.Name()) - return nil -} - -// DirDelim is the delimiter used to model a directory structure in an object store bucket. -const DirDelim = "/" - -// DownloadFile downloads the src file from the bucket to dst. If dst is an existing -// directory, a file with the same name as the source is created in dst. -// If destination file is already existing, download file will overwrite it. -func DownloadFile(ctx context.Context, logger log.Logger, bkt BucketReader, src, dst string) (err error) { - if fi, err := os.Stat(dst); err == nil { - if fi.IsDir() { - dst = filepath.Join(dst, filepath.Base(src)) - } - } else if !os.IsNotExist(err) { - return err - } - - rc, err := bkt.Get(ctx, src) - if err != nil { - return errors.Wrapf(err, "get file %s", src) - } - defer runutil.CloseWithLogOnErr(logger, rc, "download block's file reader") - - f, err := os.Create(dst) - if err != nil { - return errors.Wrap(err, "create file") - } - defer func() { - if err != nil { - if rerr := os.Remove(dst); rerr != nil { - level.Warn(logger).Log("msg", "failed to remove partially downloaded file", "file", dst, "err", rerr) - } - } - }() - defer runutil.CloseWithLogOnErr(logger, f, "download block's output file") - - if _, err = io.Copy(f, rc); err != nil { - return errors.Wrap(err, "copy object to file") - } - return nil -} - -// DownloadDir downloads all object found in the directory into the local directory. -func DownloadDir(ctx context.Context, logger log.Logger, bkt BucketReader, originalSrc, src, dst string, ignoredPaths ...string) error { - if err := os.MkdirAll(dst, 0750); err != nil { - return errors.Wrap(err, "create dir") - } - - var downloadedFiles []string - if err := bkt.Iter(ctx, src, func(name string) error { - if strings.HasSuffix(name, DirDelim) { - return DownloadDir(ctx, logger, bkt, originalSrc, name, filepath.Join(dst, filepath.Base(name)), ignoredPaths...) - } - for _, ignoredPath := range ignoredPaths { - if ignoredPath == strings.TrimPrefix(name, string(originalSrc)+DirDelim) { - level.Debug(logger).Log("msg", "not downloading again because a provided path matches this one", "file", name) - return nil - } - } - if err := DownloadFile(ctx, logger, bkt, name, dst); err != nil { - return err - } - - downloadedFiles = append(downloadedFiles, dst) - return nil - }); err != nil { - // Best-effort cleanup if the download failed. - for _, f := range downloadedFiles { - if rerr := os.Remove(f); rerr != nil { - level.Warn(logger).Log("msg", "failed to remove file on partial dir download error", "file", f, "err", rerr) - } - } - return err - } - - return nil -} - -// IsOpFailureExpectedFunc allows to mark certain errors as expected, so they will not increment thanos_objstore_bucket_operation_failures_total metric. -type IsOpFailureExpectedFunc func(error) bool - -var _ InstrumentedBucket = &metricBucket{} - -// BucketWithMetrics takes a bucket and registers metrics with the given registry for -// operations run against the bucket. -func BucketWithMetrics(name string, b Bucket, reg prometheus.Registerer) *metricBucket { - bkt := &metricBucket{ - bkt: b, - isOpFailureExpected: func(err error) bool { return false }, - ops: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_objstore_bucket_operations_total", - Help: "Total number of all attempted operations against a bucket.", - ConstLabels: prometheus.Labels{"bucket": name}, - }, []string{"operation"}), - - opsFailures: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_objstore_bucket_operation_failures_total", - Help: "Total number of operations against a bucket that failed, but were not expected to fail in certain way from caller perspective. Those errors have to be investigated.", - ConstLabels: prometheus.Labels{"bucket": name}, - }, []string{"operation"}), - - opsDuration: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ - Name: "thanos_objstore_bucket_operation_duration_seconds", - Help: "Duration of successful operations against the bucket", - ConstLabels: prometheus.Labels{"bucket": name}, - Buckets: []float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120}, - }, []string{"operation"}), - - lastSuccessfulUploadTime: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ - Name: "thanos_objstore_bucket_last_successful_upload_time", - Help: "Second timestamp of the last successful upload to the bucket.", - }, []string{"bucket"}), - } - for _, op := range []string{ - OpIter, - OpGet, - OpGetRange, - OpExists, - OpUpload, - OpDelete, - OpAttributes, - } { - bkt.ops.WithLabelValues(op) - bkt.opsFailures.WithLabelValues(op) - bkt.opsDuration.WithLabelValues(op) - } - bkt.lastSuccessfulUploadTime.WithLabelValues(b.Name()) - return bkt -} - -type metricBucket struct { - bkt Bucket - - ops *prometheus.CounterVec - opsFailures *prometheus.CounterVec - isOpFailureExpected IsOpFailureExpectedFunc - - opsDuration *prometheus.HistogramVec - lastSuccessfulUploadTime *prometheus.GaugeVec -} - -func (b *metricBucket) WithExpectedErrs(fn IsOpFailureExpectedFunc) Bucket { - return &metricBucket{ - bkt: b.bkt, - ops: b.ops, - opsFailures: b.opsFailures, - isOpFailureExpected: fn, - opsDuration: b.opsDuration, - lastSuccessfulUploadTime: b.lastSuccessfulUploadTime, - } -} - -func (b *metricBucket) ReaderWithExpectedErrs(fn IsOpFailureExpectedFunc) BucketReader { - return b.WithExpectedErrs(fn) -} - -func (b *metricBucket) Iter(ctx context.Context, dir string, f func(name string) error, options ...IterOption) error { - const op = OpIter - b.ops.WithLabelValues(op).Inc() - - err := b.bkt.Iter(ctx, dir, f, options...) - if err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - } - return err -} - -func (b *metricBucket) Attributes(ctx context.Context, name string) (ObjectAttributes, error) { - const op = OpAttributes - b.ops.WithLabelValues(op).Inc() - - start := time.Now() - attrs, err := b.bkt.Attributes(ctx, name) - if err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return attrs, err - } - b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) - return attrs, nil -} - -func (b *metricBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - const op = OpGet - b.ops.WithLabelValues(op).Inc() - - rc, err := b.bkt.Get(ctx, name) - if err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return nil, err - } - return newTimingReadCloser( - rc, - op, - b.opsDuration, - b.opsFailures, - b.isOpFailureExpected, - ), nil -} - -func (b *metricBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - const op = OpGetRange - b.ops.WithLabelValues(op).Inc() - - rc, err := b.bkt.GetRange(ctx, name, off, length) - if err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return nil, err - } - return newTimingReadCloser( - rc, - op, - b.opsDuration, - b.opsFailures, - b.isOpFailureExpected, - ), nil -} - -func (b *metricBucket) Exists(ctx context.Context, name string) (bool, error) { - const op = OpExists - b.ops.WithLabelValues(op).Inc() - - start := time.Now() - ok, err := b.bkt.Exists(ctx, name) - if err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return false, err - } - b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) - return ok, nil -} - -func (b *metricBucket) Upload(ctx context.Context, name string, r io.Reader) error { - const op = OpUpload - b.ops.WithLabelValues(op).Inc() - - start := time.Now() - if err := b.bkt.Upload(ctx, name, r); err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return err - } - b.lastSuccessfulUploadTime.WithLabelValues(b.bkt.Name()).SetToCurrentTime() - b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) - return nil -} - -func (b *metricBucket) Delete(ctx context.Context, name string) error { - const op = OpDelete - b.ops.WithLabelValues(op).Inc() - - start := time.Now() - if err := b.bkt.Delete(ctx, name); err != nil { - if !b.isOpFailureExpected(err) && ctx.Err() != context.Canceled { - b.opsFailures.WithLabelValues(op).Inc() - } - return err - } - b.opsDuration.WithLabelValues(op).Observe(time.Since(start).Seconds()) - - return nil -} - -func (b *metricBucket) IsObjNotFoundErr(err error) bool { - return b.bkt.IsObjNotFoundErr(err) -} - -func (b *metricBucket) Close() error { - return b.bkt.Close() -} - -func (b *metricBucket) Name() string { - return b.bkt.Name() -} - -type timingReadCloser struct { - io.ReadCloser - objSize int64 - objSizeErr error - - alreadyGotErr bool - - start time.Time - op string - duration *prometheus.HistogramVec - failed *prometheus.CounterVec - isFailureExpected IsOpFailureExpectedFunc -} - -func newTimingReadCloser(rc io.ReadCloser, op string, dur *prometheus.HistogramVec, failed *prometheus.CounterVec, isFailureExpected IsOpFailureExpectedFunc) *timingReadCloser { - // Initialize the metrics with 0. - dur.WithLabelValues(op) - failed.WithLabelValues(op) - objSize, objSizeErr := TryToGetSize(rc) - return &timingReadCloser{ - ReadCloser: rc, - objSize: objSize, - objSizeErr: objSizeErr, - start: time.Now(), - op: op, - duration: dur, - failed: failed, - isFailureExpected: isFailureExpected, - } -} - -func (t *timingReadCloser) ObjectSize() (int64, error) { - return t.objSize, t.objSizeErr -} - -func (rc *timingReadCloser) Close() error { - err := rc.ReadCloser.Close() - if !rc.alreadyGotErr && err != nil { - rc.failed.WithLabelValues(rc.op).Inc() - } - if !rc.alreadyGotErr && err == nil { - rc.duration.WithLabelValues(rc.op).Observe(time.Since(rc.start).Seconds()) - rc.alreadyGotErr = true - } - return err -} - -func (rc *timingReadCloser) Read(b []byte) (n int, err error) { - n, err = rc.ReadCloser.Read(b) - // Report metric just once. - if !rc.alreadyGotErr && err != nil && err != io.EOF { - if !rc.isFailureExpected(err) { - rc.failed.WithLabelValues(rc.op).Inc() - } - rc.alreadyGotErr = true - } - return n, err -} diff --git a/pkg/objstore/objstore_test.go b/pkg/objstore/objstore_test.go deleted file mode 100644 index e57e23324e..0000000000 --- a/pkg/objstore/objstore_test.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "bytes" - "io" - "testing" - - promtest "github.com/prometheus/client_golang/prometheus/testutil" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func TestMetricBucket_Close(t *testing.T) { - bkt := BucketWithMetrics("abc", NewInMemBucket(), nil) - // Expected initialized metrics. - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.ops)) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsFailures)) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsDuration)) - - AcceptanceTest(t, bkt.WithExpectedErrs(bkt.IsObjNotFoundErr)) - testutil.Equals(t, float64(9), promtest.ToFloat64(bkt.ops.WithLabelValues(OpIter))) - testutil.Equals(t, float64(2), promtest.ToFloat64(bkt.ops.WithLabelValues(OpAttributes))) - testutil.Equals(t, float64(3), promtest.ToFloat64(bkt.ops.WithLabelValues(OpGet))) - testutil.Equals(t, float64(3), promtest.ToFloat64(bkt.ops.WithLabelValues(OpGetRange))) - testutil.Equals(t, float64(2), promtest.ToFloat64(bkt.ops.WithLabelValues(OpExists))) - testutil.Equals(t, float64(9), promtest.ToFloat64(bkt.ops.WithLabelValues(OpUpload))) - testutil.Equals(t, float64(3), promtest.ToFloat64(bkt.ops.WithLabelValues(OpDelete))) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.ops)) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpIter))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpAttributes))) - testutil.Equals(t, float64(1), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpGet))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpGetRange))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpExists))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpUpload))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpDelete))) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsFailures)) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsDuration)) - lastUpload := promtest.ToFloat64(bkt.lastSuccessfulUploadTime) - testutil.Assert(t, lastUpload > 0, "last upload not greater than 0, val: %f", lastUpload) - - // Clear bucket, but don't clear metrics to ensure we use same. - bkt.bkt = NewInMemBucket() - AcceptanceTest(t, bkt) - testutil.Equals(t, float64(18), promtest.ToFloat64(bkt.ops.WithLabelValues(OpIter))) - testutil.Equals(t, float64(4), promtest.ToFloat64(bkt.ops.WithLabelValues(OpAttributes))) - testutil.Equals(t, float64(6), promtest.ToFloat64(bkt.ops.WithLabelValues(OpGet))) - testutil.Equals(t, float64(6), promtest.ToFloat64(bkt.ops.WithLabelValues(OpGetRange))) - testutil.Equals(t, float64(4), promtest.ToFloat64(bkt.ops.WithLabelValues(OpExists))) - testutil.Equals(t, float64(18), promtest.ToFloat64(bkt.ops.WithLabelValues(OpUpload))) - testutil.Equals(t, float64(6), promtest.ToFloat64(bkt.ops.WithLabelValues(OpDelete))) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.ops)) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpIter))) - // Not expected not found error here. - testutil.Equals(t, float64(1), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpAttributes))) - // Not expected not found errors, this should increment failure metric on get for not found as well, so +2. - testutil.Equals(t, float64(3), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpGet))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpGetRange))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpExists))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpUpload))) - testutil.Equals(t, float64(0), promtest.ToFloat64(bkt.opsFailures.WithLabelValues(OpDelete))) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsFailures)) - testutil.Equals(t, 7, promtest.CollectAndCount(bkt.opsDuration)) - testutil.Assert(t, promtest.ToFloat64(bkt.lastSuccessfulUploadTime) > lastUpload) -} - -func TestTracingReader(t *testing.T) { - r := bytes.NewReader([]byte("hello world")) - tr := newTracingReadCloser(NopCloserWithSize(r), nil) - - size, err := TryToGetSize(tr) - - testutil.Ok(t, err) - testutil.Equals(t, int64(11), size) - - smallBuf := make([]byte, 4) - n, err := io.ReadFull(tr, smallBuf) - testutil.Ok(t, err) - testutil.Equals(t, 4, n) - - // Verify that size is still the same, after reading 4 bytes. - size, err = TryToGetSize(tr) - - testutil.Ok(t, err) - testutil.Equals(t, int64(11), size) -} - -func TestTimingTracingReader(t *testing.T) { - m := BucketWithMetrics("", NewInMemBucket(), nil) - r := bytes.NewReader([]byte("hello world")) - - tr := NopCloserWithSize(r) - tr = newTimingReadCloser(tr, "", m.opsDuration, m.opsFailures, func(err error) bool { - return false - }) - tr = newTracingReadCloser(tr, nil) - - size, err := TryToGetSize(tr) - - testutil.Ok(t, err) - testutil.Equals(t, int64(11), size) - - smallBuf := make([]byte, 4) - n, err := io.ReadFull(tr, smallBuf) - testutil.Ok(t, err) - testutil.Equals(t, 4, n) - - // Verify that size is still the same, after reading 4 bytes. - size, err = TryToGetSize(tr) - - testutil.Ok(t, err) - testutil.Equals(t, int64(11), size) -} diff --git a/pkg/objstore/objtesting/acceptance_e2e_test.go b/pkg/objstore/objtesting/acceptance_e2e_test.go deleted file mode 100644 index 4b2c6a2030..0000000000 --- a/pkg/objstore/objtesting/acceptance_e2e_test.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objtesting - -import ( - "testing" - - "github.com/thanos-io/thanos/pkg/objstore" -) - -// TestObjStoreAcceptanceTest_e2e tests all known implementation against interface behavior contract we agreed on. -// This ensures consistent behavior across all implementations. -// NOTE: This test assumes strong consistency, but in the same way it does not guarantee that if it passes, the -// used object store is strongly consistent. -func TestObjStore_AcceptanceTest_e2e(t *testing.T) { - ForeachStore(t, objstore.AcceptanceTest) -} diff --git a/pkg/objstore/objtesting/foreach.go b/pkg/objstore/objtesting/foreach.go deleted file mode 100644 index 6d1cad859f..0000000000 --- a/pkg/objstore/objtesting/foreach.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objtesting - -import ( - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/thanos-io/thanos/pkg/objstore/bos" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" - - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/azure" - "github.com/thanos-io/thanos/pkg/objstore/cos" - "github.com/thanos-io/thanos/pkg/objstore/gcs" - "github.com/thanos-io/thanos/pkg/objstore/oss" - "github.com/thanos-io/thanos/pkg/objstore/s3" - "github.com/thanos-io/thanos/pkg/objstore/swift" - "github.com/thanos-io/thanos/pkg/testutil" -) - -// IsObjStoreSkipped returns true if given provider ID is found in THANOS_TEST_OBJSTORE_SKIP array delimited by comma e.g: -// THANOS_TEST_OBJSTORE_SKIP=GCS,S3,AZURE,SWIFT,COS,ALIYUNOSS,BOS. -func IsObjStoreSkipped(t *testing.T, provider client.ObjProvider) bool { - if e, ok := os.LookupEnv("THANOS_TEST_OBJSTORE_SKIP"); ok { - obstores := strings.Split(e, ",") - for _, objstore := range obstores { - if objstore == string(provider) { - t.Logf("%s found in THANOS_TEST_OBJSTORE_SKIP array. Skipping.", provider) - return true - } - } - } - - return false -} - -// ForeachStore runs given test using all available objstore implementations. -// For each it creates a new bucket with a random name and a cleanup function -// that deletes it after test was run. -// Use THANOS_TEST_OBJSTORE_SKIP to skip explicitly certain object storages. -func ForeachStore(t *testing.T, testFn func(t *testing.T, bkt objstore.Bucket)) { - t.Parallel() - - // Mandatory Inmem. Not parallel, to detect problem early. - if ok := t.Run("inmem", func(t *testing.T) { - testFn(t, objstore.NewInMemBucket()) - }); !ok { - return - } - - // Mandatory Filesystem. - t.Run("filesystem", func(t *testing.T) { - t.Parallel() - - dir, err := ioutil.TempDir("", "filesystem-foreach-store-test") - testutil.Ok(t, err) - defer testutil.Ok(t, os.RemoveAll(dir)) - - b, err := filesystem.NewBucket(dir) - testutil.Ok(t, err) - testFn(t, b) - }) - - // Optional GCS. - if !IsObjStoreSkipped(t, client.GCS) { - t.Run("gcs", func(t *testing.T) { - bkt, closeFn, err := gcs.NewTestBucket(t, os.Getenv("GCP_PROJECT")) - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - // TODO(bwplotka): Add goleak when https://github.com/GoogleCloudPlatform/google-cloud-go/issues/1025 is resolved. - testFn(t, bkt) - }) - } - - // Optional S3. - if !IsObjStoreSkipped(t, client.S3) { - t.Run("aws s3", func(t *testing.T) { - // TODO(bwplotka): Allow taking location from envvar. - bkt, closeFn, err := s3.NewTestBucket(t, "us-west-2") - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - // TODO(bwplotka): Add goleak when we fix potential leak in minio library. - // We cannot use goleak for detecting our own potential leaks, when goleak detects leaks in minio itself. - // This needs to be investigated more. - - testFn(t, bkt) - }) - } - - // Optional Azure. - if !IsObjStoreSkipped(t, client.AZURE) { - t.Run("azure", func(t *testing.T) { - bkt, closeFn, err := azure.NewTestBucket(t, "e2e-tests") - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - testFn(t, bkt) - }) - } - - // Optional SWIFT. - if !IsObjStoreSkipped(t, client.SWIFT) { - t.Run("swift", func(t *testing.T) { - container, closeFn, err := swift.NewTestContainer(t) - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - testFn(t, container) - }) - } - - // Optional COS. - if !IsObjStoreSkipped(t, client.COS) { - t.Run("Tencent cos", func(t *testing.T) { - bkt, closeFn, err := cos.NewTestBucket(t) - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - testFn(t, bkt) - }) - } - - // Optional OSS. - if !IsObjStoreSkipped(t, client.ALIYUNOSS) { - t.Run("AliYun oss", func(t *testing.T) { - bkt, closeFn, err := oss.NewTestBucket(t) - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - testFn(t, bkt) - }) - } - - // Optional BOS. - if !IsObjStoreSkipped(t, client.BOS) { - t.Run("Baidu BOS", func(t *testing.T) { - bkt, closeFn, err := bos.NewTestBucket(t) - testutil.Ok(t, err) - - t.Parallel() - defer closeFn() - - testFn(t, bkt) - }) - } -} diff --git a/pkg/objstore/oss/oss.go b/pkg/objstore/oss/oss.go deleted file mode 100644 index 5e96c3ddf1..0000000000 --- a/pkg/objstore/oss/oss.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package oss - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "math" - "math/rand" - "net/http" - "os" - "strconv" - "strings" - "testing" - "time" - - alioss "github.com/aliyun/aliyun-oss-go-sdk/oss" - "github.com/go-kit/log" - "github.com/pkg/errors" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/clientutil" -) - -// Part size for multi part upload. -const PartSize = 1024 * 1024 * 128 - -// Config stores the configuration for oss bucket. -type Config struct { - Endpoint string `yaml:"endpoint"` - Bucket string `yaml:"bucket"` - AccessKeyID string `yaml:"access_key_id"` - AccessKeySecret string `yaml:"access_key_secret"` -} - -// Bucket implements the store.Bucket interface. -type Bucket struct { - name string - logger log.Logger - client *alioss.Client - config Config - bucket *alioss.Bucket -} - -func NewTestBucket(t testing.TB) (objstore.Bucket, func(), error) { - c := Config{ - Endpoint: os.Getenv("ALIYUNOSS_ENDPOINT"), - Bucket: os.Getenv("ALIYUNOSS_BUCKET"), - AccessKeyID: os.Getenv("ALIYUNOSS_ACCESS_KEY_ID"), - AccessKeySecret: os.Getenv("ALIYUNOSS_ACCESS_KEY_SECRET"), - } - - if c.Endpoint == "" || c.AccessKeyID == "" || c.AccessKeySecret == "" { - return nil, nil, errors.New("aliyun oss endpoint or access_key_id or access_key_secret " + - "is not present in config file") - } - if c.Bucket != "" && os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "true" { - t.Log("ALIYUNOSS_BUCKET is defined. Normally this tests will create temporary bucket " + - "and delete it after test. Unset ALIYUNOSS_BUCKET env variable to use default logic. If you really want to run " + - "tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true.") - return NewTestBucketFromConfig(t, c, true) - } - return NewTestBucketFromConfig(t, c, false) -} - -// Upload the contents of the reader as an object into the bucket. -func (b *Bucket) Upload(_ context.Context, name string, r io.Reader) error { - // TODO(https://github.com/thanos-io/thanos/issues/678): Remove guessing length when minio provider will support multipart upload without this. - size, err := objstore.TryToGetSize(r) - if err != nil { - return errors.Wrapf(err, "failed to get size apriori to upload %s", name) - } - - chunksnum, lastslice := int(math.Floor(float64(size)/PartSize)), size%PartSize - - ncloser := ioutil.NopCloser(r) - switch chunksnum { - case 0: - if err := b.bucket.PutObject(name, ncloser); err != nil { - return errors.Wrap(err, "failed to upload oss object") - } - default: - { - init, err := b.bucket.InitiateMultipartUpload(name) - if err != nil { - return errors.Wrap(err, "failed to initiate multi-part upload") - } - chunk := 0 - uploadEveryPart := func(everypartsize int64, cnk int) (alioss.UploadPart, error) { - prt, err := b.bucket.UploadPart(init, ncloser, everypartsize, cnk) - if err != nil { - if err := b.bucket.AbortMultipartUpload(init); err != nil { - return prt, errors.Wrap(err, "failed to abort multi-part upload") - } - - return prt, errors.Wrap(err, "failed to upload multi-part chunk") - } - return prt, nil - } - var parts []alioss.UploadPart - for ; chunk < chunksnum; chunk++ { - part, err := uploadEveryPart(PartSize, chunk+1) - if err != nil { - return errors.Wrap(err, "failed to upload every part") - } - parts = append(parts, part) - } - if lastslice != 0 { - part, err := uploadEveryPart(lastslice, chunksnum+1) - if err != nil { - return errors.Wrap(err, "failed to upload the last chunk") - } - parts = append(parts, part) - } - if _, err := b.bucket.CompleteMultipartUpload(init, parts); err != nil { - return errors.Wrap(err, "failed to set multi-part upload completive") - } - } - } - return nil -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(ctx context.Context, name string) error { - if err := b.bucket.DeleteObject(name); err != nil { - return errors.Wrap(err, "delete oss object") - } - return nil -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { - m, err := b.bucket.GetObjectMeta(name) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - size, err := clientutil.ParseContentLength(m) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - // aliyun oss return Last-Modified header in RFC1123 format. - // see api doc for details: https://www.alibabacloud.com/help/doc-detail/31985.htm - mod, err := clientutil.ParseLastModified(m, time.RFC1123) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: size, - LastModified: mod, - }, nil -} - -// NewBucket returns a new Bucket using the provided oss config values. -func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) { - var config Config - if err := yaml.Unmarshal(conf, &config); err != nil { - return nil, errors.Wrap(err, "parse aliyun oss config file failed") - } - - return NewBucketWithConfig(logger, config, component) -} - -// NewBucketWithConfig returns a new Bucket using the provided oss config struct. -func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) { - if err := validate(config); err != nil { - return nil, err - } - - client, err := alioss.New(config.Endpoint, config.AccessKeyID, config.AccessKeySecret) - if err != nil { - return nil, errors.Wrap(err, "create aliyun oss client failed") - } - bk, err := client.Bucket(config.Bucket) - if err != nil { - return nil, errors.Wrapf(err, "use aliyun oss bucket %s failed", config.Bucket) - } - - bkt := &Bucket{ - logger: logger, - client: client, - name: config.Bucket, - config: config, - bucket: bk, - } - return bkt, nil -} - -// validate checks to see the config options are set. -func validate(config Config) error { - if config.Endpoint == "" || config.Bucket == "" { - return errors.New("aliyun oss endpoint or bucket is not present in config file") - } - if config.AccessKeyID == "" || config.AccessKeySecret == "" { - return errors.New("aliyun oss access_key_id or access_key_secret is not present in config file") - } - - return nil -} - -// Iter calls f for each entry in the given directory (not recursive). The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - if dir != "" { - dir = strings.TrimSuffix(dir, objstore.DirDelim) + objstore.DirDelim - } - - delimiter := alioss.Delimiter(objstore.DirDelim) - if objstore.ApplyIterOptions(options...).Recursive { - delimiter = nil - } - - marker := alioss.Marker("") - for { - if err := ctx.Err(); err != nil { - return errors.Wrap(err, "context closed while iterating bucket") - } - objects, err := b.bucket.ListObjects(alioss.Prefix(dir), delimiter, marker) - if err != nil { - return errors.Wrap(err, "listing aliyun oss bucket failed") - } - marker = alioss.Marker(objects.NextMarker) - - for _, object := range objects.Objects { - if err := f(object.Key); err != nil { - return errors.Wrapf(err, "callback func invoke for object %s failed ", object.Key) - } - } - - for _, object := range objects.CommonPrefixes { - if err := f(object); err != nil { - return errors.Wrapf(err, "callback func invoke for directory %s failed", object) - } - } - if !objects.IsTruncated { - break - } - } - - return nil -} - -func (b *Bucket) Name() string { - return b.name -} - -func NewTestBucketFromConfig(t testing.TB, c Config, reuseBucket bool) (objstore.Bucket, func(), error) { - if c.Bucket == "" { - src := rand.NewSource(time.Now().UnixNano()) - - bktToCreate := strings.ReplaceAll(fmt.Sprintf("test_%s_%x", strings.ToLower(t.Name()), src.Int63()), "_", "-") - if len(bktToCreate) >= 63 { - bktToCreate = bktToCreate[:63] - } - testclient, err := alioss.New(c.Endpoint, c.AccessKeyID, c.AccessKeySecret) - if err != nil { - return nil, nil, errors.Wrap(err, "create aliyun oss client failed") - } - - if err := testclient.CreateBucket(bktToCreate); err != nil { - return nil, nil, errors.Wrapf(err, "create aliyun oss bucket %s failed", bktToCreate) - } - c.Bucket = bktToCreate - } - - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-aliyun-oss-test") - if err != nil { - return nil, nil, err - } - - if reuseBucket { - if err := b.Iter(context.Background(), "", func(f string) error { - return errors.Errorf("bucket %s is not empty", c.Bucket) - }); err != nil { - return nil, nil, errors.Wrapf(err, "oss check bucket %s", c.Bucket) - } - - t.Log("WARNING. Reusing", c.Bucket, "Aliyun OSS bucket for OSS tests. Manual cleanup afterwards is required") - return b, func() {}, nil - } - - return b, func() { - objstore.EmptyBucket(t, context.Background(), b) - if err := b.client.DeleteBucket(c.Bucket); err != nil { - t.Logf("deleting bucket %s failed: %s", c.Bucket, err) - } - }, nil -} - -func (b *Bucket) Close() error { return nil } - -func (b *Bucket) setRange(start, end int64, name string) (alioss.Option, error) { - var opt alioss.Option - if 0 <= start && start <= end { - header, err := b.bucket.GetObjectMeta(name) - if err != nil { - return nil, err - } - - size, err := strconv.ParseInt(header["Content-Length"][0], 10, 64) - if err != nil { - return nil, err - } - - if end > size { - end = size - 1 - } - - opt = alioss.Range(start, end) - } else { - return nil, errors.Errorf("Invalid range specified: start=%d end=%d", start, end) - } - return opt, nil -} - -func (b *Bucket) getRange(_ context.Context, name string, off, length int64) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("given object name should not empty") - } - - var opts []alioss.Option - if length != -1 { - opt, err := b.setRange(off, off+length-1, name) - if err != nil { - return nil, err - } - opts = append(opts, opt) - } - - resp, err := b.bucket.GetObject(name, opts...) - if err != nil { - return nil, err - } - - return resp, nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.getRange(ctx, name, 0, -1) -} - -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.getRange(ctx, name, off, length) -} - -// Exists checks if the given object exists in the bucket. -func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { - exists, err := b.bucket.IsObjectExist(name) - if err != nil { - if b.IsObjNotFoundErr(err) { - return false, nil - } - return false, errors.Wrap(err, "cloud not check if object exists") - } - - return exists, nil -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - switch aliErr := errors.Cause(err).(type) { - case alioss.ServiceError: - if aliErr.StatusCode == http.StatusNotFound { - return true - } - } - return false -} diff --git a/pkg/objstore/s3/s3.go b/pkg/objstore/s3/s3.go deleted file mode 100644 index 1ece1a51f8..0000000000 --- a/pkg/objstore/s3/s3.go +++ /dev/null @@ -1,622 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -// Package s3 implements common object storage abstractions against s3-compatible APIs. -package s3 - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "os" - "runtime" - "strconv" - "strings" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/common/version" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/runutil" -) - -type ctxKey int - -const ( - // DirDelim is the delimiter used to model a directory structure in an object store bucket. - DirDelim = "/" - - // SSEKMS is the name of the SSE-KMS method for objectstore encryption. - SSEKMS = "SSE-KMS" - - // SSEC is the name of the SSE-C method for objstore encryption. - SSEC = "SSE-C" - - // SSES3 is the name of the SSE-S3 method for objstore encryption. - SSES3 = "SSE-S3" - - // sseConfigKey is the context key to override SSE config. This feature is used by downstream - // projects (eg. Cortex) to inject custom SSE config on a per-request basis. Future work or - // refactoring can introduce breaking changes as far as the functionality is preserved. - // NOTE: we're using a context value only because it's a very specific S3 option. If SSE will - // be available to wider set of backends we should probably add a variadic option to Get() and Upload(). - sseConfigKey = ctxKey(0) -) - -var DefaultConfig = Config{ - PutUserMetadata: map[string]string{}, - HTTPConfig: HTTPConfig{ - IdleConnTimeout: model.Duration(90 * time.Second), - ResponseHeaderTimeout: model.Duration(2 * time.Minute), - TLSHandshakeTimeout: model.Duration(10 * time.Second), - ExpectContinueTimeout: model.Duration(1 * time.Second), - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, - MaxConnsPerHost: 0, - }, - PartSize: 1024 * 1024 * 64, // 64MB. -} - -// Config stores the configuration for s3 bucket. -type Config struct { - Bucket string `yaml:"bucket"` - Endpoint string `yaml:"endpoint"` - Region string `yaml:"region"` - AWSSDKAuth bool `yaml:"aws_sdk_auth"` - AccessKey string `yaml:"access_key"` - Insecure bool `yaml:"insecure"` - SignatureV2 bool `yaml:"signature_version2"` - SecretKey string `yaml:"secret_key"` - PutUserMetadata map[string]string `yaml:"put_user_metadata"` - HTTPConfig HTTPConfig `yaml:"http_config"` - TraceConfig TraceConfig `yaml:"trace"` - ListObjectsVersion string `yaml:"list_objects_version"` - // PartSize used for multipart upload. Only used if uploaded object size is known and larger than configured PartSize. - // NOTE we need to make sure this number does not produce more parts than 10 000. - PartSize uint64 `yaml:"part_size"` - SSEConfig SSEConfig `yaml:"sse_config"` - STSEndpoint string `yaml:"sts_endpoint"` -} - -// SSEConfig deals with the configuration of SSE for Minio. The following options are valid: -// kmsencryptioncontext == https://docs.aws.amazon.com/kms/latest/developerguide/services-s3.html#s3-encryption-context -type SSEConfig struct { - Type string `yaml:"type"` - KMSKeyID string `yaml:"kms_key_id"` - KMSEncryptionContext map[string]string `yaml:"kms_encryption_context"` - EncryptionKey string `yaml:"encryption_key"` -} - -type TraceConfig struct { - Enable bool `yaml:"enable"` -} - -// HTTPConfig stores the http.Transport configuration for the s3 minio client. -type HTTPConfig struct { - IdleConnTimeout model.Duration `yaml:"idle_conn_timeout"` - ResponseHeaderTimeout model.Duration `yaml:"response_header_timeout"` - InsecureSkipVerify bool `yaml:"insecure_skip_verify"` - - TLSHandshakeTimeout model.Duration `yaml:"tls_handshake_timeout"` - ExpectContinueTimeout model.Duration `yaml:"expect_continue_timeout"` - MaxIdleConns int `yaml:"max_idle_conns"` - MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"` - MaxConnsPerHost int `yaml:"max_conns_per_host"` - - // Allow upstream callers to inject a round tripper - Transport http.RoundTripper `yaml:"-"` - - TLSConfig objstore.TLSConfig `yaml:"tls_config"` -} - -// DefaultTransport - this default transport is based on the Minio -// DefaultTransport up until the following commit: -// https://github.com/minio/minio-go/commit/008c7aa71fc17e11bf980c209a4f8c4d687fc884 -// The values have since diverged. -func DefaultTransport(config Config) (*http.Transport, error) { - tlsConfig, err := objstore.NewTLSConfig(&config.HTTPConfig.TLSConfig) - if err != nil { - return nil, err - } - - if config.HTTPConfig.InsecureSkipVerify { - tlsConfig.InsecureSkipVerify = true - } - - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - - MaxIdleConns: config.HTTPConfig.MaxIdleConns, - MaxIdleConnsPerHost: config.HTTPConfig.MaxIdleConnsPerHost, - IdleConnTimeout: time.Duration(config.HTTPConfig.IdleConnTimeout), - MaxConnsPerHost: config.HTTPConfig.MaxConnsPerHost, - TLSHandshakeTimeout: time.Duration(config.HTTPConfig.TLSHandshakeTimeout), - ExpectContinueTimeout: time.Duration(config.HTTPConfig.ExpectContinueTimeout), - // A custom ResponseHeaderTimeout was introduced - // to cover cases where the tcp connection works but - // the server never answers. Defaults to 2 minutes. - ResponseHeaderTimeout: time.Duration(config.HTTPConfig.ResponseHeaderTimeout), - // Set this value so that the underlying transport round-tripper - // doesn't try to auto decode the body of objects with - // content-encoding set to `gzip`. - // - // Refer: https://golang.org/src/net/http/transport.go?h=roundTrip#L1843. - DisableCompression: true, - TLSClientConfig: tlsConfig, - }, nil -} - -// Bucket implements the store.Bucket interface against s3-compatible APIs. -type Bucket struct { - logger log.Logger - name string - client *minio.Client - defaultSSE encrypt.ServerSide - putUserMetadata map[string]string - partSize uint64 - listObjectsV1 bool -} - -// parseConfig unmarshals a buffer into a Config with default HTTPConfig values. -func parseConfig(conf []byte) (Config, error) { - config := DefaultConfig - if err := yaml.UnmarshalStrict(conf, &config); err != nil { - return Config{}, err - } - - return config, nil -} - -// NewBucket returns a new Bucket using the provided s3 config values. -func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) { - config, err := parseConfig(conf) - if err != nil { - return nil, err - } - - return NewBucketWithConfig(logger, config, component) -} - -type overrideSignerType struct { - credentials.Provider - signerType credentials.SignatureType -} - -func (s *overrideSignerType) Retrieve() (credentials.Value, error) { - v, err := s.Provider.Retrieve() - if err != nil { - return v, err - } - if !v.SignerType.IsAnonymous() { - v.SignerType = s.signerType - } - return v, nil -} - -// NewBucketWithConfig returns a new Bucket using the provided s3 config values. -func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) { - var chain []credentials.Provider - - // TODO(bwplotka): Don't do flags as they won't scale, use actual params like v2, v4 instead - wrapCredentialsProvider := func(p credentials.Provider) credentials.Provider { return p } - if config.SignatureV2 { - wrapCredentialsProvider = func(p credentials.Provider) credentials.Provider { - return &overrideSignerType{Provider: p, signerType: credentials.SignatureV2} - } - } - - if err := validate(config); err != nil { - return nil, err - } - - if config.AWSSDKAuth { - chain = []credentials.Provider{ - wrapCredentialsProvider(&AWSSDKAuth{Region: config.Region}), - } - } else if config.AccessKey != "" { - chain = []credentials.Provider{wrapCredentialsProvider(&credentials.Static{ - Value: credentials.Value{ - AccessKeyID: config.AccessKey, - SecretAccessKey: config.SecretKey, - SignerType: credentials.SignatureV4, - }, - })} - } else { - chain = []credentials.Provider{ - wrapCredentialsProvider(&credentials.EnvAWS{}), - wrapCredentialsProvider(&credentials.FileAWSCredentials{}), - wrapCredentialsProvider(&credentials.IAM{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - Endpoint: config.STSEndpoint, - }), - } - } - - // Check if a roundtripper has been set in the config - // otherwise build the default transport. - var rt http.RoundTripper - if config.HTTPConfig.Transport != nil { - rt = config.HTTPConfig.Transport - } else { - var err error - rt, err = DefaultTransport(config) - if err != nil { - return nil, err - } - } - - client, err := minio.New(config.Endpoint, &minio.Options{ - Creds: credentials.NewChainCredentials(chain), - Secure: !config.Insecure, - Region: config.Region, - Transport: rt, - }) - if err != nil { - return nil, errors.Wrap(err, "initialize s3 client") - } - client.SetAppInfo(fmt.Sprintf("thanos-%s", component), fmt.Sprintf("%s (%s)", version.Version, runtime.Version())) - - var sse encrypt.ServerSide - if config.SSEConfig.Type != "" { - switch config.SSEConfig.Type { - case SSEKMS: - // If the KMSEncryptionContext is a nil map the header that is - // constructed by the encrypt.ServerSide object will be base64 - // encoded "nil" which is not accepted by AWS. - if config.SSEConfig.KMSEncryptionContext == nil { - config.SSEConfig.KMSEncryptionContext = make(map[string]string) - } - sse, err = encrypt.NewSSEKMS(config.SSEConfig.KMSKeyID, config.SSEConfig.KMSEncryptionContext) - if err != nil { - return nil, errors.Wrap(err, "initialize s3 client SSE-KMS") - } - - case SSEC: - key, err := ioutil.ReadFile(config.SSEConfig.EncryptionKey) - if err != nil { - return nil, err - } - - sse, err = encrypt.NewSSEC(key) - if err != nil { - return nil, errors.Wrap(err, "initialize s3 client SSE-C") - } - - case SSES3: - sse = encrypt.NewSSE() - - default: - sseErrMsg := errors.Errorf("Unsupported type %q was provided. Supported types are SSE-S3, SSE-KMS, SSE-C", config.SSEConfig.Type) - return nil, errors.Wrap(sseErrMsg, "Initialize s3 client SSE Config") - } - } - - if config.TraceConfig.Enable { - logWriter := log.NewStdlibAdapter(level.Debug(logger), log.MessageKey("s3TraceMsg")) - client.TraceOn(logWriter) - } - - if config.ListObjectsVersion != "" && config.ListObjectsVersion != "v1" && config.ListObjectsVersion != "v2" { - return nil, errors.Errorf("Initialize s3 client list objects version: Unsupported version %q was provided. Supported values are v1, v2", config.ListObjectsVersion) - } - - bkt := &Bucket{ - logger: logger, - name: config.Bucket, - client: client, - defaultSSE: sse, - putUserMetadata: config.PutUserMetadata, - partSize: config.PartSize, - listObjectsV1: config.ListObjectsVersion == "v1", - } - return bkt, nil -} - -// Name returns the bucket name for s3. -func (b *Bucket) Name() string { - return b.name -} - -// validate checks to see the config options are set. -func validate(conf Config) error { - if conf.Endpoint == "" { - return errors.New("no s3 endpoint in config file") - } - - if conf.AWSSDKAuth && conf.AccessKey != "" { - return errors.New("aws_sdk_auth and access_key are mutually exclusive configurations") - } - - if conf.AccessKey == "" && conf.SecretKey != "" { - return errors.New("no s3 access_key specified while secret_key is present in config file; either both should be present in config or envvars/IAM should be used.") - } - - if conf.AccessKey != "" && conf.SecretKey == "" { - return errors.New("no s3 secret_key specified while access_key is present in config file; either both should be present in config or envvars/IAM should be used.") - } - - if conf.SSEConfig.Type == SSEC && conf.SSEConfig.EncryptionKey == "" { - return errors.New("encryption_key must be set if sse_config.type is set to 'SSE-C'") - } - - if conf.SSEConfig.Type == SSEKMS && conf.SSEConfig.KMSKeyID == "" { - return errors.New("kms_key_id must be set if sse_config.type is set to 'SSE-KMS'") - } - - return nil -} - -// ValidateForTests checks to see the config options for tests are set. -func ValidateForTests(conf Config) error { - if conf.Endpoint == "" || - conf.AccessKey == "" || - conf.SecretKey == "" { - return errors.New("insufficient s3 test configuration information") - } - return nil -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - // Ensure the object name actually ends with a dir suffix. Otherwise we'll just iterate the - // object itself as one prefix item. - if dir != "" { - dir = strings.TrimSuffix(dir, DirDelim) + DirDelim - } - - opts := minio.ListObjectsOptions{ - Prefix: dir, - Recursive: objstore.ApplyIterOptions(options...).Recursive, - UseV1: b.listObjectsV1, - } - - for object := range b.client.ListObjects(ctx, b.name, opts) { - // Catch the error when failed to list objects. - if object.Err != nil { - return object.Err - } - // This sometimes happens with empty buckets. - if object.Key == "" { - continue - } - // The s3 client can also return the directory itself in the ListObjects call above. - if object.Key == dir { - continue - } - if err := f(object.Key); err != nil { - return err - } - } - - return nil -} - -func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - sse, err := b.getServerSideEncryption(ctx) - if err != nil { - return nil, err - } - - opts := &minio.GetObjectOptions{ServerSideEncryption: sse} - if length != -1 { - if err := opts.SetRange(off, off+length-1); err != nil { - return nil, err - } - } else if off > 0 { - if err := opts.SetRange(off, 0); err != nil { - return nil, err - } - } - r, err := b.client.GetObject(ctx, b.name, name, *opts) - if err != nil { - return nil, err - } - - // NotFoundObject error is revealed only after first Read. This does the initial GetRequest. Prefetch this here - // for convenience. - if _, err := r.Read(nil); err != nil { - runutil.CloseWithLogOnErr(b.logger, r, "s3 get range obj close") - - // First GET Object request error. - return nil, err - } - - return r, nil -} - -// Get returns a reader for the given object name. -func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - return b.getRange(ctx, name, 0, -1) -} - -// GetRange returns a new range reader for the given object name and range. -func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - return b.getRange(ctx, name, off, length) -} - -// Exists checks if the given object exists. -func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) { - _, err := b.client.StatObject(ctx, b.name, name, minio.StatObjectOptions{}) - if err != nil { - if b.IsObjNotFoundErr(err) { - return false, nil - } - return false, errors.Wrap(err, "stat s3 object") - } - - return true, nil -} - -// Upload the contents of the reader as an object into the bucket. -func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error { - sse, err := b.getServerSideEncryption(ctx) - if err != nil { - return err - } - - // TODO(https://github.com/thanos-io/thanos/issues/678): Remove guessing length when minio provider will support multipart upload without this. - size, err := objstore.TryToGetSize(r) - if err != nil { - level.Warn(b.logger).Log("msg", "could not guess file size for multipart upload; upload might be not optimized", "name", name, "err", err) - size = -1 - } - - partSize := b.partSize - if size < int64(partSize) { - partSize = 0 - } - if _, err := b.client.PutObject( - ctx, - b.name, - name, - r, - size, - minio.PutObjectOptions{ - PartSize: partSize, - ServerSideEncryption: sse, - UserMetadata: b.putUserMetadata, - }, - ); err != nil { - return errors.Wrap(err, "upload s3 object") - } - - return nil -} - -// Attributes returns information about the specified object. -func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { - objInfo, err := b.client.StatObject(ctx, b.name, name, minio.StatObjectOptions{}) - if err != nil { - return objstore.ObjectAttributes{}, err - } - - return objstore.ObjectAttributes{ - Size: objInfo.Size, - LastModified: objInfo.LastModified, - }, nil -} - -// Delete removes the object with the given name. -func (b *Bucket) Delete(ctx context.Context, name string) error { - return b.client.RemoveObject(ctx, b.name, name, minio.RemoveObjectOptions{}) -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (b *Bucket) IsObjNotFoundErr(err error) bool { - return minio.ToErrorResponse(errors.Cause(err)).Code == "NoSuchKey" -} - -func (b *Bucket) Close() error { return nil } - -// getServerSideEncryption returns the SSE to use. -func (b *Bucket) getServerSideEncryption(ctx context.Context) (encrypt.ServerSide, error) { - if value := ctx.Value(sseConfigKey); value != nil { - if sse, ok := value.(encrypt.ServerSide); ok { - return sse, nil - } - return nil, errors.New("invalid SSE config override provided in the context") - } - - return b.defaultSSE, nil -} - -func configFromEnv() Config { - c := Config{ - Bucket: os.Getenv("S3_BUCKET"), - Endpoint: os.Getenv("S3_ENDPOINT"), - AccessKey: os.Getenv("S3_ACCESS_KEY"), - SecretKey: os.Getenv("S3_SECRET_KEY"), - } - - c.Insecure, _ = strconv.ParseBool(os.Getenv("S3_INSECURE")) - c.HTTPConfig.InsecureSkipVerify, _ = strconv.ParseBool(os.Getenv("S3_INSECURE_SKIP_VERIFY")) - c.SignatureV2, _ = strconv.ParseBool(os.Getenv("S3_SIGNATURE_VERSION2")) - return c -} - -// NewTestBucket creates test bkt client that before returning creates temporary bucket. -// In a close function it empties and deletes the bucket. -func NewTestBucket(t testing.TB, location string) (objstore.Bucket, func(), error) { - c := configFromEnv() - if err := ValidateForTests(c); err != nil { - return nil, nil, err - } - - if c.Bucket != "" && os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" { - return nil, nil, errors.New("S3_BUCKET is defined. Normally this tests will create temporary bucket " + - "and delete it after test. Unset S3_BUCKET env variable to use default logic. If you really want to run " + - "tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " + - "needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " + - "to safety (accidentally pointing prod bucket for test) as well as aws s3 not being fully strong consistent.") - } - - return NewTestBucketFromConfig(t, location, c, true) -} - -func NewTestBucketFromConfig(t testing.TB, location string, c Config, reuseBucket bool) (objstore.Bucket, func(), error) { - ctx := context.Background() - - bc, err := yaml.Marshal(c) - if err != nil { - return nil, nil, err - } - b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test") - if err != nil { - return nil, nil, err - } - - bktToCreate := c.Bucket - if c.Bucket != "" && reuseBucket { - if err := b.Iter(ctx, "", func(f string) error { - return errors.Errorf("bucket %s is not empty", c.Bucket) - }); err != nil { - return nil, nil, errors.Wrapf(err, "s3 check bucket %s", c.Bucket) - } - - t.Log("WARNING. Reusing", c.Bucket, "AWS bucket for AWS tests. Manual cleanup afterwards is required") - return b, func() {}, nil - } - - if c.Bucket == "" { - bktToCreate = objstore.CreateTemporaryTestBucketName(t) - } - - if err := b.client.MakeBucket(ctx, bktToCreate, minio.MakeBucketOptions{Region: location}); err != nil { - return nil, nil, err - } - b.name = bktToCreate - t.Log("created temporary AWS bucket for AWS tests with name", bktToCreate, "in", location) - - return b, func() { - objstore.EmptyBucket(t, ctx, b) - if err := b.client.RemoveBucket(ctx, bktToCreate); err != nil { - t.Logf("deleting bucket %s failed: %s", bktToCreate, err) - } - }, nil -} - -// ContextWithSSEConfig returns a context with a custom SSE config set. The returned context should be -// provided to S3 objstore client functions to override the default SSE config. -func ContextWithSSEConfig(ctx context.Context, value encrypt.ServerSide) context.Context { - return context.WithValue(ctx, sseConfigKey, value) -} diff --git a/pkg/objstore/s3/s3_aws_sdk_auth.go b/pkg/objstore/s3/s3_aws_sdk_auth.go deleted file mode 100644 index 393a931d17..0000000000 --- a/pkg/objstore/s3/s3_aws_sdk_auth.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package s3 - -import ( - "context" - - aws "github.com/aws/aws-sdk-go-v2/aws" - awsconfig "github.com/aws/aws-sdk-go-v2/config" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/pkg/errors" -) - -// AWSSDKAuth retrieves credentials from the aws-sdk-go. -type AWSSDKAuth struct { - Region string - creds aws.Credentials -} - -// NewAWSSDKAuth returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewAWSSDKAuth(region string) *credentials.Credentials { - return credentials.New(&AWSSDKAuth{ - Region: region, - }) -} - -// Retrieve retrieves the keys from the environment. -func (a *AWSSDKAuth) Retrieve() (credentials.Value, error) { - cfg, err := awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithRegion(a.Region)) - if err != nil { - return credentials.Value{}, errors.Wrap(err, "load AWS SDK config") - } - - creds, err := cfg.Credentials.Retrieve(context.TODO()) - if err != nil { - return credentials.Value{}, errors.Wrap(err, "retrieve AWS SDK credentials") - } - - a.creds = creds - - return credentials.Value{ - AccessKeyID: creds.AccessKeyID, - SecretAccessKey: creds.SecretAccessKey, - SessionToken: creds.SessionToken, - SignerType: credentials.SignatureV4, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (a *AWSSDKAuth) IsExpired() bool { - return a.creds.Expired() -} diff --git a/pkg/objstore/s3/s3_e2e_test.go b/pkg/objstore/s3/s3_e2e_test.go deleted file mode 100644 index 97ef4a86c4..0000000000 --- a/pkg/objstore/s3/s3_e2e_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package s3_test - -import ( - "bytes" - "context" - "strings" - "testing" - - "github.com/efficientgo/e2e" - "github.com/go-kit/log" - - "github.com/thanos-io/thanos/pkg/objstore/s3" - "github.com/thanos-io/thanos/test/e2e/e2ethanos" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -// Regression benchmark for https://github.com/thanos-io/thanos/issues/3917. -func BenchmarkUpload(b *testing.B) { - b.ReportAllocs() - ctx := context.Background() - - e, err := e2e.NewDockerEnvironment("e2e_bench_mino_client") - testutil.Ok(b, err) - b.Cleanup(e2ethanos.CleanScenario(b, e)) - - const bucket = "benchmark" - m, err := e2ethanos.NewMinio(e, "benchmark", bucket) - testutil.Ok(b, err) - testutil.Ok(b, e2e.StartAndWaitReady(m)) - - bkt, err := s3.NewBucketWithConfig(log.NewNopLogger(), - e2ethanos.NewS3Config(bucket, m.Endpoint("https"), e.SharedDir()), "test-feed") - testutil.Ok(b, err) - - buf := bytes.Buffer{} - buf.Grow(1028 * 1028 * 100) // 100MB. - word := "abcdefghij" - for i := 0; i < buf.Cap()/len(word); i++ { - _, _ = buf.WriteString(word) - } - str := buf.String() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - testutil.Ok(b, bkt.Upload(ctx, "test", strings.NewReader(str))) - } -} diff --git a/pkg/objstore/s3/s3_test.go b/pkg/objstore/s3/s3_test.go deleted file mode 100644 index f72ae89088..0000000000 --- a/pkg/objstore/s3/s3_test.go +++ /dev/null @@ -1,420 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package s3 - -import ( - "context" - "encoding/base64" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/minio/minio-go/v7/pkg/encrypt" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -const endpoint string = "localhost:80" - -func TestParseConfig(t *testing.T) { - input := []byte(`bucket: abcd -insecure: false`) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - if cfg.Bucket != "abcd" { - t.Errorf("parsing of bucket failed: got %v, expected %v", cfg.Bucket, "abcd") - } - if cfg.Insecure { - t.Errorf("parsing of insecure failed: got %v, expected %v", cfg.Insecure, false) - } -} - -func TestParseConfig_SSEConfig(t *testing.T) { - input := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-S3`) - - cfg, err := parseConfig(input) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg)) - - input2 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-C`) - - cfg, err = parseConfig(input2) - testutil.Ok(t, err) - testutil.NotOk(t, validate(cfg)) - - input3 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-C - kms_key_id: qweasd`) - - cfg, err = parseConfig(input3) - testutil.Ok(t, err) - testutil.NotOk(t, validate(cfg)) - - input4 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-C - encryption_key: /some/file`) - - cfg, err = parseConfig(input4) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg)) - - input5 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-KMS`) - - cfg, err = parseConfig(input5) - testutil.Ok(t, err) - testutil.NotOk(t, validate(cfg)) - - input6 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-KMS - kms_key_id: abcd1234-ab12-cd34-1234567890ab`) - - cfg, err = parseConfig(input6) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg)) - - input7 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-KMS - kms_key_id: abcd1234-ab12-cd34-1234567890ab - kms_encryption_context: - key: value - something: else - a: b`) - - cfg, err = parseConfig(input7) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg)) - - input8 := []byte(`bucket: abdd -endpoint: "s3-endpoint" -sse_config: - type: SSE-MagicKey - kms_key_id: abcd1234-ab12-cd34-1234567890ab - encryption_key: /some/file`) - - cfg, err = parseConfig(input8) - testutil.Ok(t, err) - // Since the error handling for "proper type" if done as we're setting up the bucket. - testutil.Ok(t, validate(cfg)) -} - -func TestParseConfig_DefaultHTTPConfig(t *testing.T) { - input := []byte(`bucket: abcd -insecure: false`) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - if time.Duration(cfg.HTTPConfig.IdleConnTimeout) != time.Duration(90*time.Second) { - t.Errorf("parsing of idle_conn_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(90*time.Second)) - } - - if time.Duration(cfg.HTTPConfig.ResponseHeaderTimeout) != time.Duration(2*time.Minute) { - t.Errorf("parsing of response_header_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(2*time.Minute)) - } - - if cfg.HTTPConfig.InsecureSkipVerify { - t.Errorf("parsing of insecure_skip_verify failed: got %v, expected %v", cfg.HTTPConfig.InsecureSkipVerify, false) - } -} - -func TestParseConfig_CustomHTTPConfig(t *testing.T) { - input := []byte(`bucket: abcd -insecure: false -http_config: - insecure_skip_verify: true - idle_conn_timeout: 50s - response_header_timeout: 1m`) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - if time.Duration(cfg.HTTPConfig.IdleConnTimeout) != time.Duration(50*time.Second) { - t.Errorf("parsing of idle_conn_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(50*time.Second)) - } - - if time.Duration(cfg.HTTPConfig.ResponseHeaderTimeout) != time.Duration(1*time.Minute) { - t.Errorf("parsing of response_header_timeout failed: got %v, expected %v", - time.Duration(cfg.HTTPConfig.IdleConnTimeout), time.Duration(1*time.Minute)) - } - - if !cfg.HTTPConfig.InsecureSkipVerify { - t.Errorf("parsing of insecure_skip_verify failed: got %v, expected %v", cfg.HTTPConfig.InsecureSkipVerify, false) - } -} - -func TestParseConfig_CustomHTTPConfigWithTLS(t *testing.T) { - input := []byte(`bucket: abcd -insecure: false -http_config: - tls_config: - ca_file: /certs/ca.crt - cert_file: /certs/cert.crt - key_file: /certs/key.key - server_name: server - insecure_skip_verify: false - `) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - testutil.Equals(t, "/certs/ca.crt", cfg.HTTPConfig.TLSConfig.CAFile) - testutil.Equals(t, "/certs/cert.crt", cfg.HTTPConfig.TLSConfig.CertFile) - testutil.Equals(t, "/certs/key.key", cfg.HTTPConfig.TLSConfig.KeyFile) - testutil.Equals(t, "server", cfg.HTTPConfig.TLSConfig.ServerName) - testutil.Equals(t, false, cfg.HTTPConfig.TLSConfig.InsecureSkipVerify) -} - -func TestParseConfig_CustomLegacyInsecureSkipVerify(t *testing.T) { - input := []byte(`bucket: abcd -insecure: false -http_config: - insecure_skip_verify: true - tls_config: - insecure_skip_verify: false - `) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - transport, err := DefaultTransport(cfg) - testutil.Ok(t, err) - testutil.Equals(t, true, transport.TLSClientConfig.InsecureSkipVerify) -} - -func TestValidate_OK(t *testing.T) { - input := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -access_key: "access_key" -insecure: false -signature_version2: false -secret_key: "secret_key" -http_config: - insecure_skip_verify: false - idle_conn_timeout: 50s`) - cfg, err := parseConfig(input) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg)) - testutil.Assert(t, cfg.PutUserMetadata != nil, "map should not be nil") - - input2 := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -access_key: "access_key" -insecure: false -signature_version2: false -secret_key: "secret_key" -put_user_metadata: - "X-Amz-Acl": "bucket-owner-full-control" -http_config: - idle_conn_timeout: 0s`) - cfg2, err := parseConfig(input2) - testutil.Ok(t, err) - testutil.Ok(t, validate(cfg2)) - - testutil.Equals(t, "bucket-owner-full-control", cfg2.PutUserMetadata["X-Amz-Acl"]) -} - -func TestParseConfig_PartSize(t *testing.T) { - input := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -access_key: "access_key" -insecure: false -signature_version2: false -secret_key: "secret_key" -http_config: - insecure_skip_verify: false - idle_conn_timeout: 50s`) - - cfg, err := parseConfig(input) - testutil.Ok(t, err) - testutil.Assert(t, cfg.PartSize == 1024*1024*64, "when part size not set it should default to 128MiB") - - input2 := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -access_key: "access_key" -insecure: false -signature_version2: false -secret_key: "secret_key" -part_size: 104857600 -http_config: - insecure_skip_verify: false - idle_conn_timeout: 50s`) - cfg2, err := parseConfig(input2) - testutil.Ok(t, err) - testutil.Assert(t, cfg2.PartSize == 1024*1024*100, "when part size should be set to 100MiB") -} - -func TestParseConfig_OldSEEncryptionFieldShouldFail(t *testing.T) { - input := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -access_key: "access_key" -insecure: false -signature_version2: false -encrypt_sse: false -secret_key: "secret_key" -see_encryption: true -put_user_metadata: - "X-Amz-Acl": "bucket-owner-full-control" -http_config: - idle_conn_timeout: 0s`) - _, err := parseConfig(input) - testutil.NotOk(t, err) -} - -func TestParseConfig_ListObjectsV1(t *testing.T) { - input := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint"`) - - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - if cfg.ListObjectsVersion != "" { - t.Errorf("when list_objects_version not set, it should default to empty") - } - - input2 := []byte(`bucket: "bucket-name" -endpoint: "s3-endpoint" -list_objects_version: "abcd"`) - - cfg2, err := parseConfig(input2) - testutil.Ok(t, err) - - if cfg2.ListObjectsVersion != "abcd" { - t.Errorf("parsing of list_objects_version failed: got %v, expected %v", cfg.ListObjectsVersion, "abcd") - } -} - -func TestBucket_getServerSideEncryption(t *testing.T) { - // Default config should return no SSE config. - cfg := DefaultConfig - cfg.Endpoint = endpoint - bkt, err := NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - sse, err := bkt.getServerSideEncryption(context.Background()) - testutil.Ok(t, err) - testutil.Equals(t, nil, sse) - - // If SSE is configured in the client config it should be used. - cfg = DefaultConfig - cfg.Endpoint = endpoint - cfg.SSEConfig = SSEConfig{Type: SSES3} - bkt, err = NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - sse, err = bkt.getServerSideEncryption(context.Background()) - testutil.Ok(t, err) - testutil.Equals(t, encrypt.S3, sse.Type()) - - // SSE-KMS can be configured in the client config with an optional - // KMSEncryptionContext - In this case the encryptionContextHeader should be - // a base64 encoded string which represents a string-string map "{}" - cfg = DefaultConfig - cfg.Endpoint = endpoint - cfg.SSEConfig = SSEConfig{ - Type: SSEKMS, - KMSKeyID: "key", - } - bkt, err = NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - sse, err = bkt.getServerSideEncryption(context.Background()) - testutil.Ok(t, err) - testutil.Equals(t, encrypt.KMS, sse.Type()) - - encryptionContextHeader := "X-Amz-Server-Side-Encryption-Context" - headers := make(http.Header) - sse.Marshal(headers) - wantJson, err := json.Marshal(make(map[string]string)) - testutil.Ok(t, err) - want := base64.StdEncoding.EncodeToString(wantJson) - testutil.Equals(t, want, headers.Get(encryptionContextHeader)) - - // If the KMSEncryptionContext is set then the header should reflect it's - // value. - cfg = DefaultConfig - cfg.Endpoint = endpoint - cfg.SSEConfig = SSEConfig{ - Type: SSEKMS, - KMSKeyID: "key", - KMSEncryptionContext: map[string]string{"foo": "bar"}, - } - bkt, err = NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - sse, err = bkt.getServerSideEncryption(context.Background()) - testutil.Ok(t, err) - testutil.Equals(t, encrypt.KMS, sse.Type()) - - headers = make(http.Header) - sse.Marshal(headers) - wantJson, err = json.Marshal(cfg.SSEConfig.KMSEncryptionContext) - testutil.Ok(t, err) - want = base64.StdEncoding.EncodeToString(wantJson) - testutil.Equals(t, want, headers.Get(encryptionContextHeader)) - - // If SSE is configured in the context it should win. - cfg = DefaultConfig - cfg.Endpoint = endpoint - cfg.SSEConfig = SSEConfig{Type: SSES3} - override, err := encrypt.NewSSEKMS("test", nil) - testutil.Ok(t, err) - - bkt, err = NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - sse, err = bkt.getServerSideEncryption(context.WithValue(context.Background(), sseConfigKey, override)) - testutil.Ok(t, err) - testutil.Equals(t, encrypt.KMS, sse.Type()) -} - -func TestBucket_Get_ShouldReturnErrorIfServerTruncateResponse(t *testing.T) { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") - w.Header().Set("Content-Length", "100") - - // Write less bytes than the content length. - _, err := w.Write([]byte("12345")) - testutil.Ok(t, err) - })) - defer srv.Close() - - cfg := DefaultConfig - cfg.Bucket = "test-bucket" - cfg.Endpoint = srv.Listener.Addr().String() - cfg.Insecure = true - cfg.Region = "test" - cfg.AccessKey = "test" - cfg.SecretKey = "test" - - bkt, err := NewBucketWithConfig(log.NewNopLogger(), cfg, "test") - testutil.Ok(t, err) - - reader, err := bkt.Get(context.Background(), "test") - testutil.Ok(t, err) - - // We expect an error when reading back. - _, err = ioutil.ReadAll(reader) - testutil.Equals(t, io.ErrUnexpectedEOF, err) -} diff --git a/pkg/objstore/swift/swift.go b/pkg/objstore/swift/swift.go deleted file mode 100644 index eb757ae9ac..0000000000 --- a/pkg/objstore/swift/swift.go +++ /dev/null @@ -1,377 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -// Package swift implements common object storage abstractions against OpenStack swift APIs. -package swift - -import ( - "context" - "fmt" - "io" - "os" - "strconv" - "strings" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/ncw/swift" - "github.com/pkg/errors" - "github.com/prometheus/common/model" - "gopkg.in/yaml.v2" - - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/runutil" -) - -const ( - // DirDelim is the delimiter used to model a directory structure in an object store bucket. - DirDelim = '/' - // Name of the directory in bucket, where to store file parts of SLO and DLO. - SegmentsDir = "segments/" -) - -var DefaultConfig = Config{ - AuthVersion: 0, // Means autodetect of the auth API version by the library. - ChunkSize: 1024 * 1024 * 1024, - Retries: 3, - ConnectTimeout: model.Duration(10 * time.Second), - Timeout: model.Duration(5 * time.Minute), -} - -type Config struct { - AuthVersion int `yaml:"auth_version"` - AuthUrl string `yaml:"auth_url"` - Username string `yaml:"username"` - UserDomainName string `yaml:"user_domain_name"` - UserDomainID string `yaml:"user_domain_id"` - UserId string `yaml:"user_id"` - Password string `yaml:"password"` - DomainId string `yaml:"domain_id"` - DomainName string `yaml:"domain_name"` - ProjectID string `yaml:"project_id"` - ProjectName string `yaml:"project_name"` - ProjectDomainID string `yaml:"project_domain_id"` - ProjectDomainName string `yaml:"project_domain_name"` - RegionName string `yaml:"region_name"` - ContainerName string `yaml:"container_name"` - ChunkSize int64 `yaml:"large_object_chunk_size"` - SegmentContainerName string `yaml:"large_object_segments_container_name"` - Retries int `yaml:"retries"` - ConnectTimeout model.Duration `yaml:"connect_timeout"` - Timeout model.Duration `yaml:"timeout"` - UseDynamicLargeObjects bool `yaml:"use_dynamic_large_objects"` -} - -func parseConfig(conf []byte) (*Config, error) { - sc := DefaultConfig - err := yaml.UnmarshalStrict(conf, &sc) - return &sc, err -} - -func configFromEnv() (*Config, error) { - c := swift.Connection{} - if err := c.ApplyEnvironment(); err != nil { - return nil, err - } - - config := Config{ - AuthVersion: c.AuthVersion, - AuthUrl: c.AuthUrl, - Password: c.ApiKey, - Username: c.UserName, - UserId: c.UserId, - DomainId: c.DomainId, - DomainName: c.Domain, - ProjectID: c.TenantId, - ProjectName: c.Tenant, - ProjectDomainID: c.TenantDomainId, - ProjectDomainName: c.TenantDomain, - RegionName: c.Region, - ContainerName: os.Getenv("OS_CONTAINER_NAME"), - ChunkSize: DefaultConfig.ChunkSize, - SegmentContainerName: os.Getenv("SWIFT_SEGMENTS_CONTAINER_NAME"), - Retries: c.Retries, - ConnectTimeout: model.Duration(c.ConnectTimeout), - Timeout: model.Duration(c.Timeout), - UseDynamicLargeObjects: false, - } - if os.Getenv("SWIFT_CHUNK_SIZE") != "" { - var err error - config.ChunkSize, err = strconv.ParseInt(os.Getenv("SWIFT_CHUNK_SIZE"), 10, 64) - if err != nil { - return nil, errors.Wrap(err, "parsing chunk size") - } - } - if strings.ToLower(os.Getenv("SWIFT_USE_DYNAMIC_LARGE_OBJECTS")) == "true" { - config.UseDynamicLargeObjects = true - } - return &config, nil -} - -func connectionFromConfig(sc *Config) *swift.Connection { - connection := swift.Connection{ - Domain: sc.DomainName, - DomainId: sc.DomainId, - UserName: sc.Username, - UserId: sc.UserId, - ApiKey: sc.Password, - AuthUrl: sc.AuthUrl, - Retries: sc.Retries, - Region: sc.RegionName, - AuthVersion: sc.AuthVersion, - Tenant: sc.ProjectName, - TenantId: sc.ProjectID, - TenantDomain: sc.ProjectDomainName, - TenantDomainId: sc.ProjectDomainID, - ConnectTimeout: time.Duration(sc.ConnectTimeout), - Timeout: time.Duration(sc.Timeout), - } - return &connection -} - -type Container struct { - logger log.Logger - name string - connection *swift.Connection - chunkSize int64 - useDynamicLargeObjects bool - segmentsContainer string -} - -func NewContainer(logger log.Logger, conf []byte) (*Container, error) { - sc, err := parseConfig(conf) - if err != nil { - return nil, errors.Wrap(err, "parse config") - } - return NewContainerFromConfig(logger, sc, false) -} - -func ensureContainer(connection *swift.Connection, name string, createIfNotExist bool) error { - if _, _, err := connection.Container(name); err != nil { - if err != swift.ContainerNotFound { - return errors.Wrapf(err, "verify container %s", name) - } - if !createIfNotExist { - return fmt.Errorf("unable to find the expected container %s", name) - } - if err = connection.ContainerCreate(name, swift.Headers{}); err != nil { - return errors.Wrapf(err, "create container %s", name) - } - return nil - } - return nil -} - -func NewContainerFromConfig(logger log.Logger, sc *Config, createContainer bool) (*Container, error) { - connection := connectionFromConfig(sc) - if err := connection.Authenticate(); err != nil { - return nil, errors.Wrap(err, "authentication") - } - - if err := ensureContainer(connection, sc.ContainerName, createContainer); err != nil { - return nil, err - } - if sc.SegmentContainerName == "" { - sc.SegmentContainerName = sc.ContainerName - } else if err := ensureContainer(connection, sc.SegmentContainerName, createContainer); err != nil { - return nil, err - } - - return &Container{ - logger: logger, - name: sc.ContainerName, - connection: connection, - chunkSize: sc.ChunkSize, - useDynamicLargeObjects: sc.UseDynamicLargeObjects, - segmentsContainer: sc.SegmentContainerName, - }, nil -} - -// Name returns the container name for swift. -func (c *Container) Name() string { - return c.name -} - -// Iter calls f for each entry in the given directory. The argument to f is the full -// object name including the prefix of the inspected directory. -func (c *Container) Iter(_ context.Context, dir string, f func(string) error, options ...objstore.IterOption) error { - if dir != "" { - dir = strings.TrimSuffix(dir, string(DirDelim)) + string(DirDelim) - } - - listOptions := &swift.ObjectsOpts{ - Prefix: dir, - Delimiter: DirDelim, - } - if objstore.ApplyIterOptions(options...).Recursive { - listOptions.Delimiter = rune(0) - } - - return c.connection.ObjectsWalk(c.name, listOptions, func(opts *swift.ObjectsOpts) (interface{}, error) { - objects, err := c.connection.ObjectNames(c.name, opts) - if err != nil { - return objects, errors.Wrap(err, "list object names") - } - for _, object := range objects { - if object == SegmentsDir { - continue - } - if err := f(object); err != nil { - return objects, errors.Wrap(err, "iteration over objects") - } - } - return objects, nil - }) -} - -func (c *Container) get(name string, headers swift.Headers, checkHash bool) (io.ReadCloser, error) { - if name == "" { - return nil, errors.New("object name cannot be empty") - } - file, _, err := c.connection.ObjectOpen(c.name, name, checkHash, headers) - if err != nil { - return nil, errors.Wrap(err, "open object") - } - return file, err -} - -// Get returns a reader for the given object name. -func (c *Container) Get(_ context.Context, name string) (io.ReadCloser, error) { - return c.get(name, swift.Headers{}, true) -} - -func (c *Container) GetRange(_ context.Context, name string, off, length int64) (io.ReadCloser, error) { - // Set Range HTTP header, see the docs https://docs.openstack.org/api-ref/object-store/?expanded=show-container-details-and-list-objects-detail,get-object-content-and-metadata-detail#id76. - bytesRange := fmt.Sprintf("bytes=%d-", off) - if length != -1 { - bytesRange = fmt.Sprintf("%s%d", bytesRange, off+length-1) - } - return c.get(name, swift.Headers{"Range": bytesRange}, false) -} - -// Attributes returns information about the specified object. -func (c *Container) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) { - if name == "" { - return objstore.ObjectAttributes{}, errors.New("object name cannot be empty") - } - info, _, err := c.connection.Object(c.name, name) - if err != nil { - return objstore.ObjectAttributes{}, errors.Wrap(err, "get object attributes") - } - return objstore.ObjectAttributes{ - Size: info.Bytes, - LastModified: info.LastModified, - }, nil -} - -// Exists checks if the given object exists. -func (c *Container) Exists(_ context.Context, name string) (bool, error) { - found := true - _, _, err := c.connection.Object(c.name, name) - if c.IsObjNotFoundErr(err) { - err = nil - found = false - } - return found, err -} - -// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. -func (c *Container) IsObjNotFoundErr(err error) bool { - return errors.Is(err, swift.ObjectNotFound) -} - -// Upload writes the contents of the reader as an object into the container. -func (c *Container) Upload(_ context.Context, name string, r io.Reader) (err error) { - size, err := objstore.TryToGetSize(r) - if err != nil { - level.Warn(c.logger).Log("msg", "could not guess file size, using large object to avoid issues if the file is larger than limit", "name", name, "err", err) - // Anything higher or equal to chunk size so the SLO is used. - size = c.chunkSize - } - var file io.WriteCloser - if size >= c.chunkSize { - opts := swift.LargeObjectOpts{ - Container: c.name, - ObjectName: name, - ChunkSize: c.chunkSize, - SegmentContainer: c.segmentsContainer, - CheckHash: true, - } - if c.useDynamicLargeObjects { - if file, err = c.connection.DynamicLargeObjectCreateFile(&opts); err != nil { - return errors.Wrap(err, "create DLO file") - } - } else { - if file, err = c.connection.StaticLargeObjectCreateFile(&opts); err != nil { - return errors.Wrap(err, "create SLO file") - } - } - } else { - if file, err = c.connection.ObjectCreate(c.name, name, true, "", "", swift.Headers{}); err != nil { - return errors.Wrap(err, "create file") - } - } - defer runutil.CloseWithErrCapture(&err, file, "upload object close") - if _, err := io.Copy(file, r); err != nil { - return errors.Wrap(err, "uploading object") - } - return nil -} - -// Delete removes the object with the given name. -func (c *Container) Delete(_ context.Context, name string) error { - return errors.Wrap(c.connection.LargeObjectDelete(c.name, name), "delete object") -} - -func (*Container) Close() error { - // Nothing to close. - return nil -} - -// NewTestContainer creates test objStore client that before returning creates temporary container. -// In a close function it empties and deletes the container. -func NewTestContainer(t testing.TB) (objstore.Bucket, func(), error) { - config, err := configFromEnv() - if err != nil { - return nil, nil, errors.Wrap(err, "loading config from ENV") - } - if config.ContainerName != "" { - if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" { - return nil, nil, errors.New("OS_CONTAINER_NAME is defined. Normally this tests will create temporary container " + - "and delete it after test. Unset OS_CONTAINER_NAME env variable to use default logic. If you really want to run " + - "tests against provided (NOT USED!) container, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That container " + - "needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " + - "to safety (accidentally pointing prod container for test) as well as swift not being fully strong consistent.") - } - c, err := NewContainerFromConfig(log.NewNopLogger(), config, false) - if err != nil { - return nil, nil, errors.Wrap(err, "initializing new container") - } - if err := c.Iter(context.Background(), "", func(f string) error { - return errors.Errorf("container %s is not empty", c.Name()) - }); err != nil { - return nil, nil, errors.Wrapf(err, "check container %s", c.Name()) - } - t.Log("WARNING. Reusing", c.Name(), "container for Swift tests. Manual cleanup afterwards is required") - return c, func() {}, nil - } - config.ContainerName = objstore.CreateTemporaryTestBucketName(t) - config.SegmentContainerName = config.ContainerName - c, err := NewContainerFromConfig(log.NewNopLogger(), config, true) - if err != nil { - return nil, nil, errors.Wrap(err, "initializing new container") - } - t.Log("created temporary container for swift tests with name", c.Name()) - - return c, func() { - objstore.EmptyBucket(t, context.Background(), c) - if err := c.connection.ContainerDelete(c.name); err != nil { - t.Logf("deleting container %s failed: %s", c.Name(), err) - } - if err := c.connection.ContainerDelete(c.segmentsContainer); err != nil { - t.Logf("deleting segments container %s failed: %s", c.segmentsContainer, err) - } - }, nil -} diff --git a/pkg/objstore/swift/swift_test.go b/pkg/objstore/swift/swift_test.go deleted file mode 100644 index c4aac41860..0000000000 --- a/pkg/objstore/swift/swift_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package swift - -import ( - "testing" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func TestParseConfig(t *testing.T) { - input := []byte(`auth_url: http://identity.something.com/v3 -username: thanos -user_domain_name: userDomain -project_name: thanosProject -project_domain_name: projectDomain`) - - cfg, err := parseConfig(input) - testutil.Ok(t, err) - - testutil.Equals(t, "http://identity.something.com/v3", cfg.AuthUrl) - testutil.Equals(t, "thanos", cfg.Username) - testutil.Equals(t, "userDomain", cfg.UserDomainName) - testutil.Equals(t, "thanosProject", cfg.ProjectName) - testutil.Equals(t, "projectDomain", cfg.ProjectDomainName) -} - -func TestParseConfigFail(t *testing.T) { - input := []byte(`auth_url: http://identity.something.com/v3 -tenant_name: something`) - - _, err := parseConfig(input) - // Must result in unmarshal error as there's no `tenant_name` in SwiftConfig. - testutil.NotOk(t, err) -} diff --git a/pkg/objstore/testing.go b/pkg/objstore/testing.go deleted file mode 100644 index d3dfcde959..0000000000 --- a/pkg/objstore/testing.go +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "math/rand" - "sort" - "strings" - "sync" - "testing" - "time" - - "github.com/thanos-io/thanos/pkg/testutil" -) - -func CreateTemporaryTestBucketName(t testing.TB) string { - src := rand.NewSource(time.Now().UnixNano()) - - // Bucket name need to conform: https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html. - name := strings.ReplaceAll(strings.Replace(fmt.Sprintf("test_%x_%s", src.Int63(), strings.ToLower(t.Name())), "_", "-", -1), "/", "-") - if len(name) >= 63 { - name = name[:63] - } - return name -} - -// EmptyBucket deletes all objects from bucket. This operation is required to properly delete bucket as a whole. -// It is used for testing only. -// TODO(bplotka): Add retries. -func EmptyBucket(t testing.TB, ctx context.Context, bkt Bucket) { - var wg sync.WaitGroup - - queue := []string{""} - for len(queue) > 0 { - elem := queue[0] - queue = queue[1:] - - err := bkt.Iter(ctx, elem, func(p string) error { - if strings.HasSuffix(p, DirDelim) { - queue = append(queue, p) - return nil - } - - wg.Add(1) - go func() { - if err := bkt.Delete(ctx, p); err != nil { - t.Logf("deleting object %s failed: %s", p, err) - } - wg.Done() - }() - return nil - }) - if err != nil { - t.Logf("iterating over bucket objects failed: %s", err) - wg.Wait() - return - } - } - wg.Wait() -} - -func WithNoopInstr(bkt Bucket) InstrumentedBucket { - return noopInstrumentedBucket{Bucket: bkt} -} - -type noopInstrumentedBucket struct { - Bucket -} - -func (b noopInstrumentedBucket) WithExpectedErrs(IsOpFailureExpectedFunc) Bucket { - return b -} - -func (b noopInstrumentedBucket) ReaderWithExpectedErrs(IsOpFailureExpectedFunc) BucketReader { - return b -} - -func AcceptanceTest(t *testing.T, bkt Bucket) { - ctx := context.Background() - - _, err := bkt.Get(ctx, "") - testutil.NotOk(t, err) - testutil.Assert(t, !bkt.IsObjNotFoundErr(err), "expected user error got not found %s", err) - - _, err = bkt.Get(ctx, "id1/obj_1.some") - testutil.NotOk(t, err) - testutil.Assert(t, bkt.IsObjNotFoundErr(err), "expected not found error got %s", err) - - ok, err := bkt.Exists(ctx, "id1/obj_1.some") - testutil.Ok(t, err) - testutil.Assert(t, !ok, "expected not exits") - - _, err = bkt.Attributes(ctx, "id1/obj_1.some") - testutil.NotOk(t, err) - testutil.Assert(t, bkt.IsObjNotFoundErr(err), "expected not found error but got %s", err) - - // Upload first object. - testutil.Ok(t, bkt.Upload(ctx, "id1/obj_1.some", strings.NewReader("@test-data@"))) - - // Double check we can immediately read it. - rc1, err := bkt.Get(ctx, "id1/obj_1.some") - testutil.Ok(t, err) - defer func() { testutil.Ok(t, rc1.Close()) }() - content, err := ioutil.ReadAll(rc1) - testutil.Ok(t, err) - testutil.Equals(t, "@test-data@", string(content)) - - // Check if we can get the correct size. - attrs, err := bkt.Attributes(ctx, "id1/obj_1.some") - testutil.Ok(t, err) - testutil.Assert(t, attrs.Size == 11, "expected size to be equal to 11") - - rc2, err := bkt.GetRange(ctx, "id1/obj_1.some", 1, 3) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, rc2.Close()) }() - content, err = ioutil.ReadAll(rc2) - testutil.Ok(t, err) - testutil.Equals(t, "tes", string(content)) - - // Unspecified range with offset. - rcUnspecifiedLen, err := bkt.GetRange(ctx, "id1/obj_1.some", 1, -1) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, rcUnspecifiedLen.Close()) }() - content, err = ioutil.ReadAll(rcUnspecifiedLen) - testutil.Ok(t, err) - testutil.Equals(t, "test-data@", string(content)) - - // Out of band offset. Do not rely on outcome. - // NOTE: For various providers we have different outcome. - // * GCS is giving 416 status code - // * S3 errors immdiately with invalid range error. - // * inmem and filesystem are returning 0 bytes. - //rcOffset, err := bkt.GetRange(ctx, "id1/obj_1.some", 124141, 3) - - // Out of band length. We expect to read file fully. - rcLength, err := bkt.GetRange(ctx, "id1/obj_1.some", 3, 9999) - testutil.Ok(t, err) - defer func() { testutil.Ok(t, rcLength.Close()) }() - content, err = ioutil.ReadAll(rcLength) - testutil.Ok(t, err) - testutil.Equals(t, "st-data@", string(content)) - - ok, err = bkt.Exists(ctx, "id1/obj_1.some") - testutil.Ok(t, err) - testutil.Assert(t, ok, "expected exits") - - // Upload other objects. - testutil.Ok(t, bkt.Upload(ctx, "id1/obj_2.some", strings.NewReader("@test-data2@"))) - // Upload should be idempotent. - testutil.Ok(t, bkt.Upload(ctx, "id1/obj_2.some", strings.NewReader("@test-data2@"))) - testutil.Ok(t, bkt.Upload(ctx, "id1/obj_3.some", strings.NewReader("@test-data3@"))) - testutil.Ok(t, bkt.Upload(ctx, "id1/sub/subobj_1.some", strings.NewReader("@test-data4@"))) - testutil.Ok(t, bkt.Upload(ctx, "id1/sub/subobj_2.some", strings.NewReader("@test-data5@"))) - testutil.Ok(t, bkt.Upload(ctx, "id2/obj_4.some", strings.NewReader("@test-data6@"))) - testutil.Ok(t, bkt.Upload(ctx, "obj_5.some", strings.NewReader("@test-data7@"))) - - // Can we iter over items from top dir? - var seen []string - testutil.Ok(t, bkt.Iter(ctx, "", func(fn string) error { - seen = append(seen, fn) - return nil - })) - expected := []string{"obj_5.some", "id1/", "id2/"} - sort.Strings(expected) - sort.Strings(seen) - testutil.Equals(t, expected, seen) - - // Can we iter over items from top dir recursively? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "", func(fn string) error { - seen = append(seen, fn) - return nil - }, WithRecursiveIter)) - expected = []string{"id1/obj_1.some", "id1/obj_2.some", "id1/obj_3.some", "id1/sub/subobj_1.some", "id1/sub/subobj_2.some", "id2/obj_4.some", "obj_5.some"} - sort.Strings(expected) - sort.Strings(seen) - testutil.Equals(t, expected, seen) - - // Can we iter over items from id1/ dir? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "id1/", func(fn string) error { - seen = append(seen, fn) - return nil - })) - testutil.Equals(t, []string{"id1/obj_1.some", "id1/obj_2.some", "id1/obj_3.some", "id1/sub/"}, seen) - - // Can we iter over items from id1/ dir recursively? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "id1/", func(fn string) error { - seen = append(seen, fn) - return nil - }, WithRecursiveIter)) - testutil.Equals(t, []string{"id1/obj_1.some", "id1/obj_2.some", "id1/obj_3.some", "id1/sub/subobj_1.some", "id1/sub/subobj_2.some"}, seen) - - // Can we iter over items from id1 dir? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "id1", func(fn string) error { - seen = append(seen, fn) - return nil - })) - testutil.Equals(t, []string{"id1/obj_1.some", "id1/obj_2.some", "id1/obj_3.some", "id1/sub/"}, seen) - - // Can we iter over items from id1 dir recursively? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "id1", func(fn string) error { - seen = append(seen, fn) - return nil - }, WithRecursiveIter)) - testutil.Equals(t, []string{"id1/obj_1.some", "id1/obj_2.some", "id1/obj_3.some", "id1/sub/subobj_1.some", "id1/sub/subobj_2.some"}, seen) - - // Can we iter over items from not existing dir? - testutil.Ok(t, bkt.Iter(ctx, "id0", func(fn string) error { - t.Error("Not expected to loop through not existing directory") - t.FailNow() - - return nil - })) - - testutil.Ok(t, bkt.Delete(ctx, "id1/obj_2.some")) - - // Delete is expected to fail on non existing object. - // NOTE: Don't rely on this. S3 is not complying with this as GCS is. - // testutil.NotOk(t, bkt.Delete(ctx, "id1/obj_2.some")) - - // Can we iter over items from id1/ dir and see obj2 being deleted? - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "id1/", func(fn string) error { - seen = append(seen, fn) - return nil - })) - testutil.Equals(t, []string{"id1/obj_1.some", "id1/obj_3.some", "id1/sub/"}, seen) - - testutil.Ok(t, bkt.Delete(ctx, "id2/obj_4.some")) - - seen = []string{} - testutil.Ok(t, bkt.Iter(ctx, "", func(fn string) error { - seen = append(seen, fn) - return nil - })) - expected = []string{"obj_5.some", "id1/"} - sort.Strings(expected) - sort.Strings(seen) - testutil.Equals(t, expected, seen) - - testutil.Ok(t, bkt.Upload(ctx, "obj_6.som", bytes.NewReader(make([]byte, 1024*1024*200)))) - testutil.Ok(t, bkt.Delete(ctx, "obj_6.som")) -} - -type delayingBucket struct { - bkt Bucket - delay time.Duration -} - -func WithDelay(bkt Bucket, delay time.Duration) Bucket { - return &delayingBucket{bkt: bkt, delay: delay} -} - -func (d *delayingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - time.Sleep(d.delay) - return d.bkt.Get(ctx, name) -} - -func (d *delayingBucket) Attributes(ctx context.Context, name string) (ObjectAttributes, error) { - time.Sleep(d.delay) - return d.bkt.Attributes(ctx, name) -} - -func (d *delayingBucket) Iter(ctx context.Context, dir string, f func(string) error, options ...IterOption) error { - time.Sleep(d.delay) - return d.bkt.Iter(ctx, dir, f, options...) -} - -func (d *delayingBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - time.Sleep(d.delay) - return d.bkt.GetRange(ctx, name, off, length) -} - -func (d *delayingBucket) Exists(ctx context.Context, name string) (bool, error) { - time.Sleep(d.delay) - return d.bkt.Exists(ctx, name) -} - -func (d *delayingBucket) Upload(ctx context.Context, name string, r io.Reader) error { - time.Sleep(d.delay) - return d.bkt.Upload(ctx, name, r) -} - -func (d *delayingBucket) Delete(ctx context.Context, name string) error { - time.Sleep(d.delay) - return d.bkt.Delete(ctx, name) -} - -func (d *delayingBucket) Name() string { - time.Sleep(d.delay) - return d.bkt.Name() -} - -func (d *delayingBucket) Close() error { - // No delay for a local operation. - return d.bkt.Close() -} -func (d *delayingBucket) IsObjNotFoundErr(err error) bool { - // No delay for a local operation. - return d.bkt.IsObjNotFoundErr(err) -} diff --git a/pkg/objstore/tlsconfig.go b/pkg/objstore/tlsconfig.go deleted file mode 100644 index f705fa31b6..0000000000 --- a/pkg/objstore/tlsconfig.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" -) - -// NewTLSConfig creates a new tls.Config from the given TLSConfig. -func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { - tlsConfig := &tls.Config{InsecureSkipVerify: cfg.InsecureSkipVerify} - - // If a CA cert is provided then let's read it in. - if len(cfg.CAFile) > 0 { - b, err := readCAFile(cfg.CAFile) - if err != nil { - return nil, err - } - if !updateRootCA(tlsConfig, b) { - return nil, fmt.Errorf("unable to use specified CA cert %s", cfg.CAFile) - } - } - - if len(cfg.ServerName) > 0 { - tlsConfig.ServerName = cfg.ServerName - } - // If a client cert & key is provided then configure TLS config accordingly. - if len(cfg.CertFile) > 0 && len(cfg.KeyFile) == 0 { - return nil, fmt.Errorf("client cert file %q specified without client key file", cfg.CertFile) - } else if len(cfg.KeyFile) > 0 && len(cfg.CertFile) == 0 { - return nil, fmt.Errorf("client key file %q specified without client cert file", cfg.KeyFile) - } else if len(cfg.CertFile) > 0 && len(cfg.KeyFile) > 0 { - // Verify that client cert and key are valid. - if _, err := cfg.getClientCertificate(nil); err != nil { - return nil, err - } - tlsConfig.GetClientCertificate = cfg.getClientCertificate - } - - return tlsConfig, nil -} - -// readCAFile reads the CA cert file from disk. -func readCAFile(f string) ([]byte, error) { - data, err := ioutil.ReadFile(f) - if err != nil { - return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err) - } - return data, nil -} - -// updateRootCA parses the given byte slice as a series of PEM encoded certificates and updates tls.Config.RootCAs. -func updateRootCA(cfg *tls.Config, b []byte) bool { - caCertPool := x509.NewCertPool() - if !caCertPool.AppendCertsFromPEM(b) { - return false - } - cfg.RootCAs = caCertPool - return true -} - -// getClientCertificate reads the pair of client cert and key from disk and returns a tls.Certificate. -func (c *TLSConfig) getClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) { - cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile) - if err != nil { - return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) - } - return &cert, nil -} - -// TLSConfig configures the options for TLS connections. -type TLSConfig struct { - // The CA cert to use for the targets. - CAFile string `yaml:"ca_file"` - // The client cert file for the targets. - CertFile string `yaml:"cert_file"` - // The client key file for the targets. - KeyFile string `yaml:"key_file"` - // Used to verify the hostname for the targets. - ServerName string `yaml:"server_name"` - // Disable target certificate validation. - InsecureSkipVerify bool `yaml:"insecure_skip_verify"` -} diff --git a/pkg/objstore/tracing.go b/pkg/objstore/tracing.go deleted file mode 100644 index 0dccab724e..0000000000 --- a/pkg/objstore/tracing.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package objstore - -import ( - "context" - "io" - - "github.com/opentracing/opentracing-go" - - "github.com/thanos-io/thanos/pkg/tracing" -) - -// TracingBucket includes bucket operations in the traces. -type TracingBucket struct { - bkt Bucket -} - -func NewTracingBucket(bkt Bucket) InstrumentedBucket { - return TracingBucket{bkt: bkt} -} - -func (t TracingBucket) Iter(ctx context.Context, dir string, f func(string) error, options ...IterOption) (err error) { - tracing.DoWithSpan(ctx, "bucket_iter", func(spanCtx context.Context, span opentracing.Span) { - span.LogKV("dir", dir) - err = t.bkt.Iter(spanCtx, dir, f, options...) - }) - return -} - -func (t TracingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - span, spanCtx := tracing.StartSpan(ctx, "bucket_get") - span.LogKV("name", name) - - r, err := t.bkt.Get(spanCtx, name) - if err != nil { - span.LogKV("err", err) - span.Finish() - return nil, err - } - - return newTracingReadCloser(r, span), nil -} - -func (t TracingBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - span, spanCtx := tracing.StartSpan(ctx, "bucket_getrange") - span.LogKV("name", name, "offset", off, "length", length) - - r, err := t.bkt.GetRange(spanCtx, name, off, length) - if err != nil { - span.LogKV("err", err) - span.Finish() - return nil, err - } - - return newTracingReadCloser(r, span), nil -} - -func (t TracingBucket) Exists(ctx context.Context, name string) (exists bool, err error) { - tracing.DoWithSpan(ctx, "bucket_exists", func(spanCtx context.Context, span opentracing.Span) { - span.LogKV("name", name) - exists, err = t.bkt.Exists(spanCtx, name) - }) - return -} - -func (t TracingBucket) Attributes(ctx context.Context, name string) (attrs ObjectAttributes, err error) { - tracing.DoWithSpan(ctx, "bucket_attributes", func(spanCtx context.Context, span opentracing.Span) { - span.LogKV("name", name) - attrs, err = t.bkt.Attributes(spanCtx, name) - }) - return -} - -func (t TracingBucket) Upload(ctx context.Context, name string, r io.Reader) (err error) { - tracing.DoWithSpan(ctx, "bucket_upload", func(spanCtx context.Context, span opentracing.Span) { - span.LogKV("name", name) - err = t.bkt.Upload(spanCtx, name, r) - }) - return -} - -func (t TracingBucket) Delete(ctx context.Context, name string) (err error) { - tracing.DoWithSpan(ctx, "bucket_delete", func(spanCtx context.Context, span opentracing.Span) { - span.LogKV("name", name) - err = t.bkt.Delete(spanCtx, name) - }) - return -} - -func (t TracingBucket) Name() string { - return "tracing: " + t.bkt.Name() -} - -func (t TracingBucket) Close() error { - return t.bkt.Close() -} - -func (t TracingBucket) IsObjNotFoundErr(err error) bool { - return t.bkt.IsObjNotFoundErr(err) -} - -func (t TracingBucket) WithExpectedErrs(expectedFunc IsOpFailureExpectedFunc) Bucket { - if ib, ok := t.bkt.(InstrumentedBucket); ok { - return TracingBucket{bkt: ib.WithExpectedErrs(expectedFunc)} - } - return t -} - -func (t TracingBucket) ReaderWithExpectedErrs(expectedFunc IsOpFailureExpectedFunc) BucketReader { - return t.WithExpectedErrs(expectedFunc) -} - -type tracingReadCloser struct { - r io.ReadCloser - s opentracing.Span - - objSize int64 - objSizeErr error - - read int -} - -func newTracingReadCloser(r io.ReadCloser, span opentracing.Span) io.ReadCloser { - // Since TryToGetSize can only reliably return size before doing any read calls, - // we call during "construction" and remember the results. - objSize, objSizeErr := TryToGetSize(r) - - return &tracingReadCloser{r: r, s: span, objSize: objSize, objSizeErr: objSizeErr} -} - -func (t *tracingReadCloser) ObjectSize() (int64, error) { - return t.objSize, t.objSizeErr -} - -func (t *tracingReadCloser) Read(p []byte) (int, error) { - n, err := t.r.Read(p) - if n > 0 { - t.read += n - } - if err != nil && err != io.EOF && t.s != nil { - t.s.LogKV("err", err) - } - return n, err -} - -func (t *tracingReadCloser) Close() error { - err := t.r.Close() - if t.s != nil { - t.s.LogKV("read", t.read) - if err != nil { - t.s.LogKV("close err", err) - } - t.s.Finish() - t.s = nil - } - return err -} diff --git a/pkg/objstoreutil/objstoreutil.go b/pkg/objstoreutil/objstoreutil.go new file mode 100644 index 0000000000..ad71752024 --- /dev/null +++ b/pkg/objstoreutil/objstoreutil.go @@ -0,0 +1,18 @@ +package objstoreutil + +import ( + "log" + + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" +) + +func NewBucket(logger log.Logger, confContentYaml []byte, reg prometheus.Registerer, component string) (objstore.InstrumentedBucket, error) { + bucket, err := client.NewBucket(logger, confContentYaml, component) + if err != nil { + return nil, err + } + + return objstore.NewTracingBucket(objstore.BucketWithMetrics(bucket.Name(), bucket, reg)), nil +} diff --git a/pkg/receive/multitsdb.go b/pkg/receive/multitsdb.go index 9e158776c0..27ab48103b 100644 --- a/pkg/receive/multitsdb.go +++ b/pkg/receive/multitsdb.go @@ -25,7 +25,7 @@ import ( "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/exemplars" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/shipper" "github.com/thanos-io/thanos/pkg/store" "github.com/thanos-io/thanos/pkg/store/labelpb" diff --git a/pkg/replicate/replicator.go b/pkg/replicate/replicator.go index db7992c5ea..281814d15d 100644 --- a/pkg/replicate/replicator.go +++ b/pkg/replicate/replicator.go @@ -22,13 +22,13 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" + "github.com/thanos-io/objstore" thanosblock "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/extprom" thanosmodel "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/thanos/pkg/objstoreutil" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/server/http" @@ -126,7 +126,7 @@ func RunReplicate( return errors.New("No supported bucket was configured to replicate from") } - fromBkt, err := client.NewBucket( + fromBkt, err := objstoreutil.NewBucket( logger, fromConfContentYaml, prometheus.WrapRegistererWith(prometheus.Labels{"replicate": "from"}, reg), @@ -145,7 +145,7 @@ func RunReplicate( return errors.New("No supported bucket was configured to replicate to") } - toBkt, err := client.NewBucket( + toBkt, err := objstoreutil.NewBucket( logger, toConfContentYaml, prometheus.WrapRegistererWith(prometheus.Labels{"replicate": "to"}, reg), diff --git a/pkg/replicate/scheme.go b/pkg/replicate/scheme.go index f9b1067b8e..d0b1530993 100644 --- a/pkg/replicate/scheme.go +++ b/pkg/replicate/scheme.go @@ -23,7 +23,7 @@ import ( thanosblock "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/replicate/scheme_test.go b/pkg/replicate/scheme_test.go index 4d6c38c5a1..16cb807ec1 100644 --- a/pkg/replicate/scheme_test.go +++ b/pkg/replicate/scheme_test.go @@ -23,7 +23,7 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/shipper/shipper.go b/pkg/shipper/shipper.go index a1184a9421..a79a7481fa 100644 --- a/pkg/shipper/shipper.go +++ b/pkg/shipper/shipper.go @@ -27,7 +27,7 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/runutil" ) diff --git a/pkg/shipper/shipper_e2e_test.go b/pkg/shipper/shipper_e2e_test.go index 641c1bf9ff..7f7344464b 100644 --- a/pkg/shipper/shipper_e2e_test.go +++ b/pkg/shipper/shipper_e2e_test.go @@ -26,8 +26,8 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/objtesting" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/objtesting" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" ) diff --git a/pkg/shipper/shipper_test.go b/pkg/shipper/shipper_test.go index 8844d57f4a..a7cdb26ec6 100644 --- a/pkg/shipper/shipper_test.go +++ b/pkg/shipper/shipper_test.go @@ -21,7 +21,7 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/testutil" ) diff --git a/pkg/store/bucket.go b/pkg/store/bucket.go index 679c245fe6..4967cc731d 100644 --- a/pkg/store/bucket.go +++ b/pkg/store/bucket.go @@ -46,7 +46,7 @@ import ( "github.com/thanos-io/thanos/pkg/extprom" "github.com/thanos-io/thanos/pkg/gate" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/pool" "github.com/thanos-io/thanos/pkg/runutil" storecache "github.com/thanos-io/thanos/pkg/store/cache" diff --git a/pkg/store/bucket_e2e_test.go b/pkg/store/bucket_e2e_test.go index 3f0a2ed81a..612871c9eb 100644 --- a/pkg/store/bucket_e2e_test.go +++ b/pkg/store/bucket_e2e_test.go @@ -28,8 +28,8 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/objtesting" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/objtesting" storecache "github.com/thanos-io/thanos/pkg/store/cache" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/store/storepb" diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index 381b0f5053..28137c7b26 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -44,8 +44,8 @@ import ( "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/compact/downsample" "github.com/thanos-io/thanos/pkg/gate" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/filesystem" "github.com/thanos-io/thanos/pkg/pool" storecache "github.com/thanos-io/thanos/pkg/store/cache" "github.com/thanos-io/thanos/pkg/store/hintspb" diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index a8fbde25b2..808072dd7a 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -21,7 +21,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/thanos-io/thanos/pkg/cache" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/cache/cachekey" ) diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index c92b072090..36c0194196 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -19,7 +19,7 @@ import ( cache "github.com/thanos-io/thanos/pkg/cache" "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/model" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // BucketCacheProvider is a type used to evaluate all bucket cache providers. diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 549afa5710..de5a27cd0e 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -20,7 +20,7 @@ import ( promtest "github.com/prometheus/client_golang/prometheus/testutil" thanoscache "github.com/thanos-io/thanos/pkg/cache" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/cache/cachekey" "github.com/thanos-io/thanos/pkg/testutil" diff --git a/pkg/verifier/index_issue.go b/pkg/verifier/index_issue.go index 61e84293c1..6938bd5438 100644 --- a/pkg/verifier/index_issue.go +++ b/pkg/verifier/index_issue.go @@ -17,7 +17,7 @@ import ( "github.com/pkg/errors" "github.com/thanos-io/thanos/pkg/block" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // IndexKnownIssues verifies any known index issue. diff --git a/pkg/verifier/safe_delete.go b/pkg/verifier/safe_delete.go index 26eb0be2fa..5c4b39617f 100644 --- a/pkg/verifier/safe_delete.go +++ b/pkg/verifier/safe_delete.go @@ -17,7 +17,7 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) // TSDBBlockExistsInBucket checks to see if a given TSDB block ID exists in a diff --git a/pkg/verifier/verify.go b/pkg/verifier/verify.go index 12fb4dcc21..dd2fb2769f 100644 --- a/pkg/verifier/verify.go +++ b/pkg/verifier/verify.go @@ -17,7 +17,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/objstore" ) type Verifier interface { diff --git a/scripts/cfggen/main.go b/scripts/cfggen/main.go index 9571f159f1..fa7147a6ca 100644 --- a/scripts/cfggen/main.go +++ b/scripts/cfggen/main.go @@ -23,15 +23,15 @@ import ( "github.com/thanos-io/thanos/pkg/alert" "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/logging" - "github.com/thanos-io/thanos/pkg/objstore/azure" - "github.com/thanos-io/thanos/pkg/objstore/bos" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/cos" - "github.com/thanos-io/thanos/pkg/objstore/filesystem" - "github.com/thanos-io/thanos/pkg/objstore/gcs" - "github.com/thanos-io/thanos/pkg/objstore/oss" - "github.com/thanos-io/thanos/pkg/objstore/s3" - "github.com/thanos-io/thanos/pkg/objstore/swift" + "github.com/thanos-io/objstore/azure" + "github.com/thanos-io/objstore/bos" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/objstore/cos" + "github.com/thanos-io/objstore/filesystem" + "github.com/thanos-io/objstore/gcs" + "github.com/thanos-io/objstore/oss" + "github.com/thanos-io/objstore/s3" + "github.com/thanos-io/objstore/swift" "github.com/thanos-io/thanos/pkg/queryfrontend" storecache "github.com/thanos-io/thanos/pkg/store/cache" trclient "github.com/thanos-io/thanos/pkg/tracing/client" diff --git a/test/e2e/compact_test.go b/test/e2e/compact_test.go index cd05cd5e42..7f833d571d 100644 --- a/test/e2e/compact_test.go +++ b/test/e2e/compact_test.go @@ -28,9 +28,9 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/objstore/s3" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/testutil" diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 7a63d23ef1..81e2359d85 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -31,9 +31,9 @@ import ( "github.com/thanos-io/thanos/pkg/alert" "github.com/thanos-io/thanos/pkg/httpconfig" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/objstore/s3" "github.com/thanos-io/thanos/pkg/queryfrontend" "github.com/thanos-io/thanos/pkg/receive" ) diff --git a/test/e2e/info_api_test.go b/test/e2e/info_api_test.go index 6122d9ea6b..dfa839dc72 100644 --- a/test/e2e/info_api_test.go +++ b/test/e2e/info_api_test.go @@ -16,7 +16,7 @@ import ( "github.com/efficientgo/e2e" "github.com/prometheus/prometheus/model/labels" - "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/objstore/client" "github.com/thanos-io/thanos/pkg/query" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/testutil" diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 280b2cd9d1..ab13cadcab 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -37,9 +37,9 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" "github.com/thanos-io/thanos/pkg/metadata/metadatapb" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/objstore/s3" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/runutil" diff --git a/test/e2e/store_gateway_test.go b/test/e2e/store_gateway_test.go index 8c034b7a2f..acdc85e8fd 100644 --- a/test/e2e/store_gateway_test.go +++ b/test/e2e/store_gateway_test.go @@ -23,9 +23,9 @@ import ( "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/objstore/s3" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/pkg/testutil/e2eutil" diff --git a/test/e2e/tools_bucket_web_test.go b/test/e2e/tools_bucket_web_test.go index 5ce4621484..cd401043f9 100644 --- a/test/e2e/tools_bucket_web_test.go +++ b/test/e2e/tools_bucket_web_test.go @@ -21,9 +21,9 @@ import ( "github.com/prometheus/prometheus/model/timestamp" v1 "github.com/thanos-io/thanos/pkg/api/blocks" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/objstore/client" - "github.com/thanos-io/thanos/pkg/objstore/s3" + "github.com/thanos-io/objstore" + "github.com/thanos-io/objstore/client" + "github.com/thanos-io/objstore/s3" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos"