Skip to content

Commit

Permalink
flate: Improve level 7-9 (again)
Browse files Browse the repository at this point in the history
github-june-2days-2019.json 899480979 -> 898541970
nyc-taxi-data-10M.csv 709158534 -> 706632036
enwik9 324078757 -> 322251720
github-ranks-backup.bin 435530626 -> 435809465
gob-stream 279987188 -> 278406897
silesia.tar 68817867 -> 68772170
  • Loading branch information
klauspost committed Jan 17, 2022
1 parent f877063 commit ba0c049
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 18 deletions.
30 changes: 20 additions & 10 deletions flate/deflate.go
Expand Up @@ -10,9 +10,6 @@ import (
"fmt"
"io"
"math"
"math/bits"

comp "github.com/klauspost/compress"
)

const (
Expand Down Expand Up @@ -110,6 +107,7 @@ type advancedState struct {
type compressor struct {
compressionLevel

h *huffmanEncoder
w *huffmanBitWriter

// compression algorithm
Expand Down Expand Up @@ -299,12 +297,13 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead, bpb int) (lengt

// Base is 4 bytes at with an additional cost.
// Matches must be better than this.
cGain := minMatchLength*bpb - 12
cGain := 0
for i := prevHead; tries > 0; tries-- {
if wEnd == win[i+length] {
n := matchLen(win[i:i+minMatchLook], wPos)
if n > length {
newGain := n*bpb - bits.Len32(uint32(pos-i))
// Calculate gain. Estimate
newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - bpb - int(lengthExtraBits[lengthCodes[(n-3)&255]])
if newGain > cGain {
length = n
offset = pos - i
Expand Down Expand Up @@ -389,10 +388,16 @@ func (d *compressor) deflateLazy() {
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
return
}
s.estBitsPerByte = 8
if !d.sync {
s.estBitsPerByte = comp.ShannonEntropyBits(d.window[s.index:d.windowEnd])
s.estBitsPerByte = int(1 + float64(s.estBitsPerByte)/float64(d.windowEnd-s.index))
if d.windowEnd != s.index {
// Get literal huffman coder.
if d.h == nil {
d.h = newHuffmanEncoder(maxFlateBlockTokens)
}
var tmp [256]uint16
for _, v := range d.window[s.index:d.windowEnd] {
tmp[v]++
}
d.h.generate(tmp[:], 15)
}

s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
Expand Down Expand Up @@ -446,7 +451,12 @@ func (d *compressor) deflateLazy() {
}

if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead, s.estBitsPerByte); ok {
e := 8
if d.tokens.nLits*4 > d.tokens.nMatch {
// We have many literals, emit any size.
e = -100
}
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead, e); ok {
s.length = newLength
s.offset = newOffset
}
Expand Down
20 changes: 12 additions & 8 deletions flate/token.go
Expand Up @@ -129,11 +129,13 @@ var offsetCodes14 = [256]uint32{
type token uint32

type tokens struct {
nLits int
extraHist [32]uint16 // codes 256->maxnumlit
offHist [32]uint16 // offset codes
litHist [256]uint16 // codes 0->255
n uint16 // Must be able to contain maxStoreBlockSize
nFilled int
nLits int
nMatch int
n uint16 // Must be able to contain maxStoreBlockSize
tokens [maxStoreBlockSize + 1]token
}

Expand All @@ -142,7 +144,9 @@ func (t *tokens) Reset() {
return
}
t.n = 0
t.nFilled = 0
t.nLits = 0
t.nMatch = 0
for i := range t.litHist[:] {
t.litHist[i] = 0
}
Expand All @@ -161,12 +165,12 @@ func (t *tokens) Fill() {
for i, v := range t.litHist[:] {
if v == 0 {
t.litHist[i] = 1
t.nLits++
t.nFilled++
}
}
for i, v := range t.extraHist[:literalCount-256] {
if v == 0 {
t.nLits++
t.nFilled++
t.extraHist[i] = 1
}
}
Expand Down Expand Up @@ -230,8 +234,9 @@ func (t *tokens) EstimatedBits() int {
shannon := float32(0)
bits := int(0)
nMatches := 0
if t.nLits > 0 {
invTotal := 1.0 / float32(t.nLits)
total := int(t.n) + t.nFilled
if total > 0 {
invTotal := 1.0 / float32(total)
for _, v := range t.litHist[:] {
if v > 0 {
n := float32(v)
Expand Down Expand Up @@ -275,9 +280,9 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
}
oCode := offsetCode(xoffset)
xoffset |= oCode << 16
t.nLits++

t.extraHist[lengthCodes1[uint8(xlength)]]++
t.nMatch += int(xlength) + baseMatchLength
t.offHist[oCode]++
t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
t.n++
Expand All @@ -301,7 +306,6 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
}
xlength -= xl
xl -= baseMatchLength
t.nLits++
t.extraHist[lengthCodes1[uint8(xl)]]++
t.offHist[oc]++
t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
Expand Down

0 comments on commit ba0c049

Please sign in to comment.