-
Notifications
You must be signed in to change notification settings - Fork 4
/
options.go
418 lines (363 loc) · 13.1 KB
/
options.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
package leveldb
import (
"unsafe"
"github.com/kezhuw/leveldb/internal/compaction"
"github.com/kezhuw/leveldb/internal/compress"
"github.com/kezhuw/leveldb/internal/file"
"github.com/kezhuw/leveldb/internal/filter"
"github.com/kezhuw/leveldb/internal/keys"
"github.com/kezhuw/leveldb/internal/logger"
"github.com/kezhuw/leveldb/internal/options"
)
const (
// MaxCompactionConcurrency maximize compaction concurrency as possible.
// Caution that compaction is a disk drive sensitive task, max compaction
// concurrency usually doesn't mean good performance.
MaxCompactionConcurrency = compaction.MaxCompactionConcurrency
)
// CompressionType defines compression methods to compress a table block.
type CompressionType int
const (
// DefaultCompression defaults to SnappyCompression for now.
DefaultCompression CompressionType = iota
// NoCompression means no compression for table block.
NoCompression
// SnappyCompression uses snappy compression to compress table block
// before store it to file.
SnappyCompression
)
// Options contains options controlling various parts of the db instance.
type Options struct {
// Comparator defines the total order over keys in the database.
//
// The default comparator is BytewiseComparator, which uses the same ordering
// as bytes.Compare.
Comparator Comparator
// Compression type used to compress blocks.
//
// The default value points to SnappyCompression.
Compression CompressionType
// MaxFileSize enforces size limitation for files created by LevelDB.
//
// Defaults to 2MiB.
MaxFileSize int64
// MaxGrandparentOverlapBytes limits number of overlap bytes with level+2 a
// compacted file produced in compaction level => level+1 can have.
//
// Defaults to 10 * MaxFileSize.
MaxGrandparentOverlapBytes int64
// MaxExpandedCompactionBytes limits number of bytes of input files when expanding
// input files in compacting level.
//
// Defaults to 25 * MaxFileSize.
MaxExpandedCompactionBytes int64
// BlockSize specifies the minimum uncompressed size in bytes for a table block.
//
// The default value is 4KiB.
BlockSize int
// BlockRestartInterval specifies the number of keys between restart points
// for delta encoding of keys in a block.
//
// The default value is 16.
BlockRestartInterval int
// BlockCompressionRatio specifies a minimal compression ratio for table blocks.
// Uncompressed block will be written to table file if its compression ratio is
// this value.
//
// The default value is 8.0/7.0, which means that uncompressed block will be written
// to table file unless at least 1/8 of raw data were compressed out.
BlockCompressionRatio float64
// WriteBufferSize is the amount of data to build up in memory (backed by
// an unsorted log on disk) before converting to a sorted on-disk file.
//
// Larger values increase performance, especially during bulk loads. Up to two
// write buffers may be held in memory at the same time, so you may wish to
// adjust this parameter to control memory usage. Also, a larger write buffer
// will result in a longer recovery time the next time the database is opened.
//
// The default value is 4MiB.
WriteBufferSize int
// MaxOpenFiles is the number of open files that can be used this db instance.
// You may need to increase this if your database has a large number of files.
//
// The default value is 1000.
MaxOpenFiles int
// BlockCacheCapacity specifies the capacity in bytes for block cache.
//
// The default value is 8MiB.
BlockCacheCapacity int
// CompactionConcurrency specifies max allowed concurrent compactions.
//
// The default value is 1, use MaxCompactionConcurrency to maximize compaction
// concurrency as poosible.
CompactionConcurrency int
// CompactionBytesPerSeek states that one seek cost approximately equal time
// to compact specified number of data.
//
// We decide to compact a file after a certain number of overlap seeks, this
// way for keys in range we reduce potential seeks by one after compaction.
// We use CompactionBytesPerSeek and MinimalAllowedOverlapSeeks to calculate
// the number of allowed overlap seeks for a file.
//
// The default value is 16KiB, which means that one seek cost approximately
// equal time to compact 16KiB data.
CompactionBytesPerSeek int
// MinimalAllowedOverlapSeeks specifies minimal allowed overlap seeks per table file.
//
// The default value is 100.
MinimalAllowedOverlapSeeks int
// IterationBytesPerSampleSeek specifies average iteration bytes for one sample
// seek to detect overlap file.
//
// The default value is 1MiB.
IterationBytesPerSampleSeek int
// Level0CompactionFiles specifies that a compaction for level-0 is triggered if
// there are more than this number of files in level-0.
//
// The default value is 4.
Level0CompactionFiles int
// Level0SlowdownWriteFiles specifies that writes will be slowdown if there are
// more than this number of files in level-0.
//
// The default value is Level0CompactionFiles + 4.
Level0SlowdownWriteFiles int
// Level0StopWriteFiles specifies that writes will be stopped if there are more
// than this number of files in level-0.
//
// The default value is Level0SlowdownWriteFiles + 4.
Level0StopWriteFiles int
// Filter specifies a Filter to filter out unnecessary disk reads when looking for
// a specific key. The filter is also used to generate filter data when building
// table files.
//
// The default value is nil.
Filter Filter
// Logger specifies a place that all internal progress/error information generated
// by this db instance will be written to.
//
// The default value is a file named "LOG" stored under this db directory. You can
// suppress logging by using DiscardLogger.
Logger Logger
// FileSystem defines a hierarchical file storage interface.
//
// The default file system is built around os package.
FileSystem FileSystem
// CreateIfMissing specifies whether to create one if the database does not exist.
//
// The default value is false.
CreateIfMissing bool
// ErrorIfExists specifies whether to report a error if the database already exists.
//
// The default value is false.
ErrorIfExists bool
}
func (opts *Options) getLogger() logger.LogCloser {
if opts.Logger == nil {
return nil
}
return logger.NopCloser(opts.Logger)
}
func (opts *Options) getFilter() filter.Filter {
if opts.Filter == nil {
return nil
}
if f, ok := opts.Filter.(internalFilter); ok {
return f.Filter
}
return wrappedFilter{opts.Filter}
}
func (opts *Options) getFileSystem() file.FileSystem {
if opts.FileSystem == nil {
return file.DefaultFileSystem
}
if fs, ok := opts.FileSystem.(internalFileSystem); ok {
return fs.FileSystem
}
return wrappedFileSystem{opts.FileSystem}
}
func (opts *Options) getComparator() *keys.InternalComparator {
if opts.Comparator == nil || opts.Comparator == keys.BytewiseComparator {
return &options.DefaultInternalComparator
}
return &keys.InternalComparator{UserKeyComparator: opts.Comparator}
}
func (opts *Options) getCompression() compress.Type {
switch opts.Compression {
case NoCompression:
return compress.NoCompression
case SnappyCompression:
return compress.SnappyCompression
}
return options.DefaultCompression
}
func (opts *Options) getMaxFileSize() int64 {
if opts.MaxFileSize <= 0 {
return options.DefaultMaxFileSize
}
return opts.MaxFileSize
}
func (opts *Options) getMaxGrandparentOverlapBytes() int64 {
if opts.MaxGrandparentOverlapBytes <= 0 {
return options.MaxGrandparentOverlapBytes(opts.getMaxFileSize())
}
return opts.MaxGrandparentOverlapBytes
}
func (opts *Options) getMaxExpandedCompactionBytes() int64 {
if opts.MaxExpandedCompactionBytes <= 0 {
return options.MaxExpandedCompactionBytes(opts.getMaxFileSize())
}
return opts.MaxExpandedCompactionBytes
}
func (opts *Options) getBlockSize() int {
if opts.BlockSize <= 0 {
return options.DefaultBlockSize
}
return opts.BlockSize
}
func (opts *Options) getBlockRestartInterval() int {
if opts.BlockRestartInterval <= 0 {
return options.DefaultBlockRestartInterval
}
return opts.BlockRestartInterval
}
func (opts *Options) getBlockCompressionRatio() float64 {
if opts.BlockCompressionRatio <= 1 {
return options.DefaultBlockCompressionRatio
}
return opts.BlockCompressionRatio
}
func (opts *Options) getWriteBufferSize() int {
if opts.WriteBufferSize <= 0 {
return options.DefaultWriteBufferSize
}
return opts.WriteBufferSize
}
func (opts *Options) getMaxOpenFiles() int {
if opts.MaxOpenFiles <= 0 {
return options.DefaultMaxOpenFiles
}
return opts.MaxOpenFiles
}
func (opts *Options) getBlockCacheCapacity() int {
if opts.BlockCacheCapacity <= 0 {
return options.DefaultBlockCacheCapacity
}
return opts.BlockCacheCapacity
}
func (opts *Options) getCompactionConcurrency() int {
switch {
case opts.CompactionConcurrency == 0:
return options.DefaultCompactionConcurrency
case opts.CompactionConcurrency < 0:
return compaction.MaxCompactionConcurrency
default:
return opts.CompactionConcurrency
}
}
func (opts *Options) getCompactionBytesPerSeek() int {
if opts.CompactionBytesPerSeek <= 0 {
return options.DefaultCompactionBytesPerSeek
}
return opts.CompactionBytesPerSeek
}
func (opts *Options) getMinimalAllowedOverlapSeeks() int {
if opts.MinimalAllowedOverlapSeeks <= 0 {
return options.DefaultMinimalAllowedOverlapSeeks
}
return opts.MinimalAllowedOverlapSeeks
}
func (opts *Options) getIterationBytesPerSampleSeek() int {
if opts.IterationBytesPerSampleSeek <= 0 {
return options.DefaultIterationBytesPerSampleSeek
}
return opts.IterationBytesPerSampleSeek
}
func (opts *Options) getLevel0CompactionFiles() int {
if opts.Level0CompactionFiles <= 0 {
return options.DefaultLevel0CompactionFiles
}
return opts.Level0CompactionFiles
}
func (opts *Options) getLevel0SlowdownWriteFiles() int {
if opts.Level0SlowdownWriteFiles <= opts.getLevel0CompactionFiles() {
return opts.getLevel0CompactionFiles() + options.DefaultLevel0ThrottleStepFiles
}
return opts.Level0SlowdownWriteFiles
}
func (opts *Options) getLevel0StopWriteFiles() int {
if opts.Level0StopWriteFiles <= opts.getLevel0SlowdownWriteFiles() {
return opts.getLevel0SlowdownWriteFiles() + options.DefaultLevel0ThrottleStepFiles
}
return opts.Level0StopWriteFiles
}
func convertOptions(opts *Options) *options.Options {
if opts == nil {
return &options.DefaultOptions
}
var iopts options.Options
iopts.Comparator = opts.getComparator()
iopts.Compression = opts.getCompression()
iopts.MaxFileSize = opts.getMaxFileSize()
iopts.MaxGrandparentOverlapBytes = opts.getMaxGrandparentOverlapBytes()
iopts.MaxExpandedCompactionBytes = opts.getMaxExpandedCompactionBytes()
iopts.BlockSize = opts.getBlockSize()
iopts.BlockRestartInterval = opts.getBlockRestartInterval()
iopts.BlockCompressionRatio = opts.getBlockCompressionRatio()
iopts.WriteBufferSize = opts.getWriteBufferSize()
iopts.MaxOpenFiles = opts.getMaxOpenFiles()
iopts.BlockCacheCapacity = opts.getBlockCacheCapacity()
iopts.CompactionConcurrency = opts.getCompactionConcurrency()
iopts.CompactionBytesPerSeek = opts.getCompactionBytesPerSeek()
iopts.MinimalAllowedOverlapSeeks = opts.getMinimalAllowedOverlapSeeks()
iopts.IterationBytesPerSampleSeek = opts.getIterationBytesPerSampleSeek()
iopts.Level0CompactionFiles = opts.getLevel0CompactionFiles()
iopts.Level0SlowdownWriteFiles = opts.getLevel0SlowdownWriteFiles()
iopts.Level0StopWriteFiles = opts.getLevel0StopWriteFiles()
iopts.Filter = opts.getFilter()
iopts.Logger = opts.getLogger()
iopts.FileSystem = opts.getFileSystem()
iopts.CreateIfMissing = opts.CreateIfMissing
iopts.ErrorIfExists = opts.ErrorIfExists
return &iopts
}
// ReadOptions contains options controlling behaviours of read operations.
type ReadOptions struct {
// DontFillCache specifies whether data read in this operation
// should be cached in memory. If true, data read from underlying
// storage will not be cahced in memory for later reading, but
// if the data is already cached in memory, it will be used by
// this operation.
DontFillCache bool
// VerifyChecksums specifies whether data read from underlying
// storage should be verified against saved checksums. Note that
// it never verify data cached in memory.
VerifyChecksums bool
}
func convertReadOptions(opts *ReadOptions) *options.ReadOptions {
if opts == nil {
return &options.DefaultReadOptions
}
return (*options.ReadOptions)(unsafe.Pointer(opts))
}
// WriteOptions contains options controlling write operations: Put, Delete,
// and Write.
type WriteOptions struct {
// Sync specifies whether to synchronize the write from OS cache to
// underlying storage before the write is considered complete.
// Setting Sync to true may result in slower writes.
//
// If Sync is false, and the machine crashes, some recent writes may
// be lost. Note that if it is just the process crashes, no writes will
// be lost.
//
// In other words, a write with false Sync has similar crash semantics
// as the "write()" system call. A write with true Sync has similar crash
// semantics to a "write()" system call followed by "fsync()".
Sync bool
}
func convertWriteOptions(opts *WriteOptions) *options.WriteOptions {
if opts == nil {
return &options.DefaultWriteOptions
}
return (*options.WriteOptions)(unsafe.Pointer(opts))
}