Skip to content

Commit

Permalink
Merge pull request #74 from caesarxuchao/limit-total-copy-size
Browse files Browse the repository at this point in the history
Limiting the total copy size
  • Loading branch information
evanphx committed Feb 2, 2019
2 parents d402050 + 767c9e7 commit 498b574
Show file tree
Hide file tree
Showing 4 changed files with 109 additions and 17 deletions.
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ go get -u github.com/evanphx/json-patch
which limits the increase of array length caused by each operation. It
defaults to 0, which means there is no limit.

* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
which limits the total size increase in bytes caused by "copy" operations in a
patch. It defaults to 0, which means there is no limit.

## Create and apply a merge patch
Given both an original JSON document and a modified JSON document, you can create
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
Expand Down
38 changes: 38 additions & 0 deletions errors.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
package jsonpatch

import "fmt"

// AccumulatedCopySizeError is an error type returned when the accumulated size
// increase caused by copy operations in a patch operation has exceeded the
// limit.
type AccumulatedCopySizeError struct {
limit int64
accumulated int64
}

// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
return &AccumulatedCopySizeError{limit: l, accumulated: a}
}

// Error implements the error interface.
func (a *AccumulatedCopySizeError) Error() string {
return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
}

// ArraySizeError is an error type returned when the array size has exceeded
// the limit.
type ArraySizeError struct {
limit int
size int
}

// NewArraySizeError returns an ArraySizeError.
func NewArraySizeError(l, s int) *ArraySizeError {
return &ArraySizeError{limit: l, size: s}
}

// Error implements the error interface.
func (a *ArraySizeError) Error() string {
return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
}
39 changes: 27 additions & 12 deletions patch.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,17 @@ const (
eAry
)

var SupportNegativeIndices bool = true
var ArraySizeLimit int = 0
var ArraySizeAdditionLimit int = 0
var (
// SupportNegativeIndices decides whether to support non-standard practice of
// allowing negative indices to mean indices starting at the end of an array.
// Default to true.
SupportNegativeIndices bool = true
ArraySizeLimit int = 0
ArraySizeAdditionLimit int = 0
// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
// "copy" operations in a patch.
AccumulatedCopySizeLimit int64 = 0
)

type lazyNode struct {
raw *json.RawMessage
Expand Down Expand Up @@ -65,17 +73,18 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
return nil
}

func deepCopy(src *lazyNode) (*lazyNode, error) {
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
if src == nil {
return nil, nil
return nil, 0, nil
}
a, err := src.MarshalJSON()
if err != nil {
return nil, err
return nil, 0, err
}
ra := make(json.RawMessage, len(a))
sz := len(a)
ra := make(json.RawMessage, sz)
copy(ra, a)
return newLazyNode(&ra), nil
return newLazyNode(&ra), sz, nil
}

func (n *lazyNode) intoDoc() (*partialDoc, error) {
Expand Down Expand Up @@ -381,7 +390,7 @@ func (d *partialArray) set(key string, val *lazyNode) error {
}

if ArraySizeLimit > 0 && sz > ArraySizeLimit {
return fmt.Errorf("Unable to create array of size %d, limit is %d", sz, ArraySizeLimit)
return NewArraySizeError(ArraySizeLimit, sz)
}

ary := make([]*lazyNode, sz)
Expand Down Expand Up @@ -590,7 +599,7 @@ func (p Patch) test(doc *container, op operation) error {
return fmt.Errorf("Testing value %s failed", path)
}

func (p Patch) copy(doc *container, op operation) error {
func (p Patch) copy(doc *container, op operation, accumulatedCopySize *int64) error {
from := op.from()

con, key := findObject(doc, from)
Expand All @@ -612,10 +621,14 @@ func (p Patch) copy(doc *container, op operation) error {
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
}

valCopy, err := deepCopy(val)
valCopy, sz, err := deepCopy(val)
if err != nil {
return err
}
(*accumulatedCopySize) += int64(sz)
if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
}

return con.add(key, valCopy)
}
Expand Down Expand Up @@ -670,6 +683,8 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {

err = nil

var accumulatedCopySize int64

for _, op := range p {
switch op.kind() {
case "add":
Expand All @@ -683,7 +698,7 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
case "test":
err = p.test(&pd, op)
case "copy":
err = p.copy(&pd, op)
err = p.copy(&pd, op, &accumulatedCopySize)
default:
err = fmt.Errorf("Unexpected kind: %s", op.kind())
}
Expand Down
45 changes: 40 additions & 5 deletions patch_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,6 @@ import (
"testing"
)

func init() {
ArraySizeLimit = 1000
ArraySizeAdditionLimit = 10
}

func reformatJSON(j string) string {
buf := new(bytes.Buffer)

Expand Down Expand Up @@ -52,6 +47,14 @@ type Case struct {
doc, patch, result string
}

func repeatedA(r int) string {
var s string
for i := 0; i < r; i++ {
s += "A"
}
return s
}

var Cases = []Case{
{
`{ "foo": "bar"}`,
Expand Down Expand Up @@ -208,6 +211,14 @@ var Cases = []Case{
`[ { "op": "replace", "path": "/bar/0", "value": null } ]`,
`{ "bar": [null]}`,
},
{
fmt.Sprintf(`{ "foo": ["A", %q] }`, repeatedA(48)),
// The wrapping quotes around 'A's are included in the copy
// size, so each copy operation increases the size by 50 bytes.
`[ { "op": "copy", "path": "/foo/-", "from": "/foo/1" },
{ "op": "copy", "path": "/foo/-", "from": "/foo/1" }]`,
fmt.Sprintf(`{ "foo": ["A", %q, %q, %q] }`, repeatedA(48), repeatedA(48), repeatedA(48)),
},
}

type BadCase struct {
Expand Down Expand Up @@ -303,9 +314,33 @@ var BadCases = []BadCase{
`{ "foo": ["bar"]}`,
`[{"op": "copy", "path": "/foo/2", "from": "/foo/0"}]`,
},
// Accumulated copy size cannot exceed AccumulatedCopySizeLimit.
{
fmt.Sprintf(`{ "foo": ["A", %q] }`, repeatedA(49)),
// The wrapping quotes around 'A's are included in the copy
// size, so each copy operation increases the size by 51 bytes.
`[ { "op": "copy", "path": "/foo/-", "from": "/foo/1" },
{ "op": "copy", "path": "/foo/-", "from": "/foo/1" }]`,
},
}

// This is not thread safe, so we cannot run patch tests in parallel.
func configureGlobals(arraySizeLimit, arraySizeAdditionLimit int, accumulatedCopySizeLimit int64) func() {
oldArraySizeLimit := ArraySizeLimit
oldArraySizeAdditionLimit := ArraySizeAdditionLimit
oldAccumulatedCopySizeLimit := AccumulatedCopySizeLimit
ArraySizeLimit = arraySizeLimit
ArraySizeAdditionLimit = arraySizeAdditionLimit
AccumulatedCopySizeLimit = accumulatedCopySizeLimit
return func() {
ArraySizeLimit = oldArraySizeLimit
ArraySizeAdditionLimit = oldArraySizeAdditionLimit
AccumulatedCopySizeLimit = oldAccumulatedCopySizeLimit
}
}

func TestAllCases(t *testing.T) {
defer configureGlobals(1000, 10, int64(100))()
for _, c := range Cases {
out, err := applyPatch(c.doc, c.patch)

Expand Down

0 comments on commit 498b574

Please sign in to comment.