-
Notifications
You must be signed in to change notification settings - Fork 339
/
reserve.go
209 lines (181 loc) · 5.28 KB
/
reserve.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
// Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package localstore
import (
"encoding/hex"
"errors"
"fmt"
"time"
"github.com/ethersphere/bee/pkg/shed"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/syndtr/goleveldb/leveldb"
)
// EvictBatch will evict all chunks associated with the batch from the reserve. This
// is used by batch store for expirations.
func (db *DB) EvictBatch(id []byte) error {
db.metrics.BatchEvictCounter.Inc()
defer func(start time.Time) {
totalTimeMetric(db.metrics.TotalTimeBatchEvict, start)
}(time.Now())
// EvictBatch will affect the reserve as well as GC indexes
db.lock.Lock(lockKeyGC)
defer db.lock.Unlock(lockKeyGC)
db.stopSamplingIfRunning()
evicted, err := db.unreserveBatch(id, swarm.MaxBins)
if err != nil {
db.metrics.BatchEvictErrorCounter.Inc()
return fmt.Errorf("failed evict batch: %w", err)
}
db.metrics.BatchEvictCollectedCounter.Add(float64(evicted))
db.logger.Debug("evict batch", "batch_id", swarm.NewAddress(id), "evicted_count", evicted)
return nil
}
// UnreserveBatch atomically unpins chunks of a batch in proximity order upto and including po.
// Unpinning will result in all chunks with pincounter 0 to be put in the gc index
// so if a chunk was only pinned by the reserve, unreserving it will make it gc-able.
func (db *DB) unreserveBatch(id []byte, radius uint8) (evicted uint64, err error) {
var (
item = shed.Item{
BatchID: id,
}
reserveSizeChange uint64
)
evictBatch := radius == swarm.MaxBins
if evictBatch {
if err := db.postageRadiusIndex.Delete(item); err != nil {
return 0, err
}
}
// iterate over chunk in bins
for bin := uint8(0); bin < radius; bin++ {
rSizeChange, err := db.unpinBatchChunks(id, bin)
if err != nil {
db.logger.Debug("unreserve batch", "batch", hex.EncodeToString(id), "bin", bin, "error", err)
return 0, err
}
reserveSizeChange += rSizeChange
item.Radius = bin
if !evictBatch {
if err := db.postageRadiusIndex.Put(item); err != nil {
return 0, err
}
}
}
if !evictBatch {
item.Radius = radius
if err := db.postageRadiusIndex.Put(item); err != nil {
return 0, err
}
}
gcSize, err := db.gcSize.Get()
if err != nil && !errors.Is(err, leveldb.ErrNotFound) {
return 0, err
}
// trigger garbage collection if we reached the capacity
if gcSize >= db.cacheCapacity {
db.triggerGarbageCollection()
}
return reserveSizeChange, nil
}
var unpinBatchSize = 10000
func (db *DB) unpinBatchChunks(id []byte, bin uint8) (uint64, error) {
loggerV1 := db.logger.V(1).Register()
var (
batch = new(leveldb.Batch)
gcSizeChange int64 // number to add or subtract from gcSize and reserveSize
totalGCSizeChange int64
)
unpin := func(item shed.Item) (stop bool, err error) {
addr := swarm.NewAddress(item.Address)
c, err := db.setUnpin(batch, addr)
if err != nil {
if !errors.Is(err, leveldb.ErrNotFound) {
return false, fmt.Errorf("unpin: %w", err)
}
// this is possible when we are resyncing chain data after
// a dirty shutdown
loggerV1.Debug("unreserve set unpin chunk failed", "chunk", addr, "error", err)
}
gcSizeChange += c
return false, nil
}
var startItem *shed.Item
for {
currentBatchSize := 0
more := false
err := db.postageChunksIndex.Iterate(func(item shed.Item) (bool, error) {
if currentBatchSize > unpinBatchSize {
startItem = &item
more = true
return true, nil
}
currentBatchSize++
return unpin(item)
}, &shed.IterateOptions{
Prefix: append(id, bin),
StartFrom: startItem,
})
if err != nil {
return 0, err
}
// adjust gcSize
if gcSizeChange > 0 {
if err := db.incGCSizeInBatch(batch, gcSizeChange); err != nil {
return 0, err
}
}
if err := db.shed.WriteBatch(batch); err != nil {
return 0, err
}
batch = new(leveldb.Batch)
totalGCSizeChange += gcSizeChange
gcSizeChange = 0
if !more {
break
}
}
return uint64(totalGCSizeChange), nil
}
func withinRadius(db *DB, item shed.Item) bool {
po := db.po(swarm.NewAddress(item.Address))
return po >= item.Radius
}
// ReserveCapacity returns the configured capacity
func (db *DB) ReserveCapacity() uint64 {
return db.reserveCapacity
}
// ComputeReserveSize iterates on the pull index to count all chunks
// starting at some proximity order with an generated address whose PO
// is used as a starting prefix by the index.
func (db *DB) ComputeReserveSize(startPO uint8) (uint64, error) {
var count uint64
err := db.pullIndex.Iterate(func(item shed.Item) (stop bool, err error) {
count++
return false, nil
}, &shed.IterateOptions{
StartFrom: &shed.Item{
Address: db.addressInBin(startPO).Bytes(),
},
})
if err == nil {
err = db.setReserveSize(count)
if err != nil {
return 0, fmt.Errorf("failed setting reserve size: %w", err)
}
db.metrics.ReserveSize.Set(float64(count))
}
return count, err
}
// SetReserveSize will update the localstore reserve size as calculated by the
// depthmonitor using the updated storage depth
func (db *DB) setReserveSize(size uint64) error {
err := db.reserveSize.Put(size)
if err != nil {
return fmt.Errorf("failed updating reserve size: %w", err)
}
if size > db.reserveCapacity {
db.triggerReserveEviction()
}
return nil
}