Skip to content
This repository has been archived by the owner on Aug 13, 2019. It is now read-only.

Open db in Read only mode #588

Merged
Merged
Show file tree
Hide file tree
Changes from 29 commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
0997a15
Added db read only open mode and use it for the tsdb cli.
Apr 24, 2019
cd08753
add the DBReadOnly struct
Apr 24, 2019
8128c53
fix wal closing
Apr 24, 2019
83d3683
refactored to update blocks on domand
Apr 25, 2019
a704541
rename to DBView
Apr 25, 2019
a16cd17
revert renaming
Apr 25, 2019
ce25efc
remove metrics
Apr 25, 2019
825d6c0
detach db from openBlocks()
May 6, 2019
ca72a4c
Merge remote-tracking branch 'upstream/master' into read-only-alterna…
May 6, 2019
93af048
Merge remote-tracking branch 'upstream/master' into read-only-alterna…
May 29, 2019
c2d809b
refactor and add tests
May 30, 2019
1b68f0d
added db.Close to close all open blocks for windows
May 30, 2019
4778d73
add a read only interface for a block.
May 31, 2019
32d5aae
nits
Jun 3, 2019
b722198
Merge remote-tracking branch 'upstream/master' into read-only-alterna…
Jun 6, 2019
add6f1e
nits
krasi-georgiev Jun 11, 2019
a2726b4
Merge remote-tracking branch 'upstream/master' into read-only-alterna…
krasi-georgiev Jun 11, 2019
6092063
simplified
krasi-georgiev Jun 12, 2019
fbea5e8
refactored to use the Blockreader API.
krasi-georgiev Jun 13, 2019
3b76b2f
non blocking head closing and use dir hash to ensure read only.
krasi-georgiev Jun 14, 2019
95ba508
Merge branch 'master' into read-only-alternative
krasi-georgiev Jun 24, 2019
764d307
fix wal corruption metrics and head test
krasi-georgiev Jun 25, 2019
bd79d07
nits
krasi-georgiev Jun 25, 2019
106d0e9
nit
krasi-georgiev Jun 25, 2019
2b4ddbc
nits
krasi-georgiev Jul 2, 2019
32ae42e
Merge branch 'master' into read-only-alternative
krasi-georgiev Jul 4, 2019
fb75682
refactor error handling for DirHash and DirSize
krasi-georgiev Jul 9, 2019
1cf409f
NumSeries in Meta() of Head
codesome Jul 16, 2019
0a645ab
Merge pull request #5 from codesome/num-series-read-only
krasi-georgiev Jul 16, 2019
af10877
nits
krasi-georgiev Jul 17, 2019
b8c4781
Merge branch 'read-only-alternative' of github.com:krasi-georgiev/tsd…
krasi-georgiev Jul 17, 2019
df4a374
use channel for the db closing and remove mutex
krasi-georgiev Jul 18, 2019
b3f7774
add ErrClosed and add a test for it.
krasi-georgiev Jul 18, 2019
1a97569
handle mutli errors.
krasi-georgiev Jul 19, 2019
03b33b1
nits
krasi-georgiev Jul 19, 2019
beeaefa
head meta comment
krasi-georgiev Jul 22, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
7 changes: 6 additions & 1 deletion CHANGELOG.md
@@ -1,4 +1,9 @@
## Master / unreleased
## master / unreleased

- [FEATURE] Added `DBReadOnly` to allow opening a database in read only mode.
krasi-georgiev marked this conversation as resolved.
Show resolved Hide resolved
- `DBReadOnly.Blocks()` exposes a slice of `BlockReader`s and
krasi-georgiev marked this conversation as resolved.
Show resolved Hide resolved
for this the interface is refactored to return the full block meta instead of
just MinTime/MaxTime. Required to allow reading the ULID of a block.

## 0.9.1

Expand Down
7 changes: 2 additions & 5 deletions block.go
Expand Up @@ -138,11 +138,8 @@ type BlockReader interface {
// Tombstones returns a TombstoneReader over the block's deleted data.
Tombstones() (TombstoneReader, error)

// MinTime returns the min time of the block.
MinTime() int64

// MaxTime returns the max time of the block.
MaxTime() int64
// Meta provides meta information about the block reader.
Meta() BlockMeta
bwplotka marked this conversation as resolved.
Show resolved Hide resolved
}

// Appendable defines an entity to which data can be appended.
Expand Down
8 changes: 3 additions & 5 deletions block_test.go
Expand Up @@ -175,8 +175,7 @@ func TestBlockSize(t *testing.T) {
testutil.Ok(t, blockInit.Close())
}()
expSizeInit = blockInit.Size()
actSizeInit, err := testutil.DirSize(blockInit.Dir())
testutil.Ok(t, err)
actSizeInit := testutil.DirSize(t, blockInit.Dir())
testutil.Equals(t, expSizeInit, actSizeInit)
}

Expand All @@ -185,7 +184,7 @@ func TestBlockSize(t *testing.T) {
testutil.Ok(t, blockInit.Delete(1, 10, labels.NewMustRegexpMatcher("", ".*")))
expAfterDelete := blockInit.Size()
testutil.Assert(t, expAfterDelete > expSizeInit, "after a delete the block size should be bigger as the tombstone file should grow %v > %v", expAfterDelete, expSizeInit)
actAfterDelete, err := testutil.DirSize(blockDirInit)
actAfterDelete := testutil.DirSize(t, blockDirInit)
testutil.Ok(t, err)
testutil.Equals(t, expAfterDelete, actAfterDelete, "after a delete reported block size doesn't match actual disk size")

Expand All @@ -199,8 +198,7 @@ func TestBlockSize(t *testing.T) {
testutil.Ok(t, blockAfterCompact.Close())
}()
expAfterCompact := blockAfterCompact.Size()
actAfterCompact, err := testutil.DirSize(blockAfterCompact.Dir())
testutil.Ok(t, err)
actAfterCompact := testutil.DirSize(t, blockAfterCompact.Dir())
testutil.Assert(t, actAfterDelete > actAfterCompact, "after a delete and compaction the block size should be smaller %v,%v", actAfterDelete, actAfterCompact)
testutil.Equals(t, expAfterCompact, actAfterCompact, "after a delete and compaction reported block size doesn't match actual disk size")
}
Expand Down
50 changes: 37 additions & 13 deletions cmd/tsdb/main.go
Expand Up @@ -61,9 +61,6 @@ func main() {
dumpMaxTime = dumpCmd.Flag("max-time", "maximum timestamp to dump").Default(strconv.FormatInt(math.MaxInt64, 10)).Int64()
)

safeDBOptions := *tsdb.DefaultOptions
safeDBOptions.RetentionDuration = 0

switch kingpin.MustParse(cli.Parse(os.Args[1:])) {
case benchWriteCmd.FullCommand():
wb := &writeBenchmark{
Expand All @@ -73,18 +70,35 @@ func main() {
}
wb.run()
case listCmd.FullCommand():
db, err := tsdb.Open(*listPath, nil, nil, &safeDBOptions)
db, err := tsdb.OpenDBReadOnly(*listPath, nil)
if err != nil {
exitWithError(err)
}
defer func() {
krasi-georgiev marked this conversation as resolved.
Show resolved Hide resolved
if err := db.Close(); err != nil {
exitWithError(err)
}
}()
blocks, err := db.Blocks()
if err != nil {
exitWithError(err)
}
printBlocks(db.Blocks(), listCmdHumanReadable)
printBlocks(blocks, listCmdHumanReadable)
case analyzeCmd.FullCommand():
db, err := tsdb.Open(*analyzePath, nil, nil, &safeDBOptions)
db, err := tsdb.OpenDBReadOnly(*analyzePath, nil)
if err != nil {
exitWithError(err)
}
blocks := db.Blocks()
var block *tsdb.Block
defer func() {
if err := db.Close(); err != nil {
exitWithError(err)
}
}()
blocks, err := db.Blocks()
if err != nil {
exitWithError(err)
}
var block tsdb.BlockReader
if *analyzeBlockID != "" {
for _, b := range blocks {
if b.Meta().ULID.String() == *analyzeBlockID {
Expand All @@ -100,10 +114,15 @@ func main() {
}
analyzeBlock(block, *analyzeLimit)
case dumpCmd.FullCommand():
db, err := tsdb.Open(*dumpPath, nil, nil, &safeDBOptions)
db, err := tsdb.OpenDBReadOnly(*dumpPath, nil)
if err != nil {
exitWithError(err)
}
defer func() {
if err := db.Close(); err != nil {
exitWithError(err)
}
}()
dumpSamples(db, *dumpMinTime, *dumpMaxTime)
}
}
Expand Down Expand Up @@ -390,7 +409,7 @@ func exitWithError(err error) {
os.Exit(1)
}

func printBlocks(blocks []*tsdb.Block, humanReadable *bool) {
func printBlocks(blocks []tsdb.BlockReader, humanReadable *bool) {
tw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
defer tw.Flush()

Expand All @@ -417,9 +436,9 @@ func getFormatedTime(timestamp int64, humanReadable *bool) string {
return strconv.FormatInt(timestamp, 10)
}

func analyzeBlock(b *tsdb.Block, limit int) {
fmt.Printf("Block path: %s\n", b.Dir())
func analyzeBlock(b tsdb.BlockReader, limit int) {
meta := b.Meta()
fmt.Printf("Block ID: %s\n", meta.ULID)
// Presume 1ms resolution that Prometheus uses.
fmt.Printf("Duration: %s\n", (time.Duration(meta.MaxTime-meta.MinTime) * 1e6).String())
fmt.Printf("Series: %d\n", meta.Stats.NumSeries)
Expand Down Expand Up @@ -570,11 +589,16 @@ func analyzeBlock(b *tsdb.Block, limit int) {
printInfo(postingInfos)
}

func dumpSamples(db *tsdb.DB, mint, maxt int64) {
func dumpSamples(db *tsdb.DBReadOnly, mint, maxt int64) {
q, err := db.Querier(mint, maxt)
if err != nil {
exitWithError(err)
}
defer func() {
if err := q.Close(); err != nil {
exitWithError(err)
}
}()

ss, err := q.Select(labels.NewMustRegexpMatcher("", ".*"))
if err != nil {
Expand Down
8 changes: 4 additions & 4 deletions compact.go
Expand Up @@ -662,7 +662,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
}()
c.metrics.populatingBlocks.Set(1)

globalMaxt := blocks[0].MaxTime()
globalMaxt := blocks[0].Meta().MaxTime
for i, b := range blocks {
select {
case <-c.ctx.Done():
Expand All @@ -671,13 +671,13 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta,
}

if !overlapping {
if i > 0 && b.MinTime() < globalMaxt {
if i > 0 && b.Meta().MinTime < globalMaxt {
c.metrics.overlappingBlocks.Inc()
overlapping = true
level.Warn(c.logger).Log("msg", "found overlapping blocks during compaction", "ulid", meta.ULID)
}
if b.MaxTime() > globalMaxt {
globalMaxt = b.MaxTime()
if b.Meta().MaxTime > globalMaxt {
globalMaxt = b.Meta().MaxTime
}
}

Expand Down
3 changes: 1 addition & 2 deletions compact_test.go
Expand Up @@ -458,8 +458,7 @@ type erringBReader struct{}
func (erringBReader) Index() (IndexReader, error) { return nil, errors.New("index") }
func (erringBReader) Chunks() (ChunkReader, error) { return nil, errors.New("chunks") }
func (erringBReader) Tombstones() (TombstoneReader, error) { return nil, errors.New("tombstones") }
func (erringBReader) MinTime() int64 { return 0 }
func (erringBReader) MaxTime() int64 { return 0 }
func (erringBReader) Meta() BlockMeta { return BlockMeta{} }

type nopChunkWriter struct{}

Expand Down