From c971ab617d57f614c1d84ad44efdf662cee00758 Mon Sep 17 00:00:00 2001 From: ligi Date: Wed, 20 Jun 2018 10:28:10 +0200 Subject: [PATCH 01/36] travis: use NDK 17b for Android archives (#17029) --- .travis.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index bf252156020bf..afa9ab503f21b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -152,10 +152,10 @@ matrix: - export GOPATH=$HOME/go script: # Build the Android archive and upload it to Maven Central and Azure - - curl https://dl.google.com/android/repository/android-ndk-r16b-linux-x86_64.zip -o android-ndk-r16b.zip - - unzip -q android-ndk-r16b.zip && rm android-ndk-r16b.zip - - mv android-ndk-r16b $HOME - - export ANDROID_NDK=$HOME/android-ndk-r16b + - curl https://dl.google.com/android/repository/android-ndk-r17b-linux-x86_64.zip -o android-ndk-r17b.zip + - unzip -q android-ndk-r17b.zip && rm android-ndk-r17b.zip + - mv android-ndk-r17b $HOME + - export ANDROID_NDK=$HOME/android-ndk-r17b - mkdir -p $GOPATH/src/github.com/ethereum - ln -s `pwd` $GOPATH/src/github.com/ethereum From 4210dd150074f527af2e8c3abb400e771260ca68 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Tue, 12 Jun 2018 10:32:29 +0200 Subject: [PATCH 02/36] tracers: fix err in 4byte, add some opcode analysis tools --- eth/tracers/internal/tracers/4byte_tracer.js | 2 +- eth/tracers/internal/tracers/assets.go | 76 ++++++++++++++++++- eth/tracers/internal/tracers/bigram_tracer.js | 47 ++++++++++++ .../internal/tracers/trigram_tracer.js | 49 ++++++++++++ .../internal/tracers/unigram_tracer.js | 43 +++++++++++ 5 files changed, 214 insertions(+), 3 deletions(-) create mode 100644 eth/tracers/internal/tracers/bigram_tracer.js create mode 100644 eth/tracers/internal/tracers/trigram_tracer.js create mode 100644 eth/tracers/internal/tracers/unigram_tracer.js diff --git a/eth/tracers/internal/tracers/4byte_tracer.js b/eth/tracers/internal/tracers/4byte_tracer.js index 2629aba3cf0d2..462b4ad4cb550 100644 --- a/eth/tracers/internal/tracers/4byte_tracer.js +++ b/eth/tracers/internal/tracers/4byte_tracer.js @@ -60,7 +60,7 @@ return; } // Skip any pre-compile invocations, those are just fancy opcodes - if (isPrecompiled(toAddress(log.stack.peek(1)))) { + if (isPrecompiled(toAddress(log.stack.peek(1).toString(16)))) { return; } // Gather internal call details diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go index 2fdf5a646d37d..a3963b53b81ac 100644 --- a/eth/tracers/internal/tracers/assets.go +++ b/eth/tracers/internal/tracers/assets.go @@ -1,11 +1,14 @@ // Code generated by go-bindata. DO NOT EDIT. // sources: // 4byte_tracer.js +// bigram_tracer.js // call_tracer.js // evmdis_tracer.js // noop_tracer.js // opcount_tracer.js // prestate_tracer.js +// trigram_tracer.js +// unigram_tracer.js package tracers @@ -74,7 +77,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var __4byte_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x56\x5b\x6f\xdb\x4a\x0e\x7e\xb6\x7f\x05\xd7\x2f\xb5\x51\x59\x8e\x2f\x89\x2f\xd9\x16\xf0\xe6\xa4\x6d\x80\x9c\x24\x88\xdd\x3d\x28\x16\xfb\x30\x9e\xa1\xac\xd9\xc8\x33\xc2\x0c\xe5\x4b\x73\xf2\xdf\x17\x1c\x49\x89\x93\xd3\x62\xbb\x4f\x96\x47\xc3\x8f\x1f\xc9\x8f\xa4\x7a\x3d\xb8\xb0\xf9\xc1\xe9\x75\x4a\x30\x38\xe9\x8f\x61\x99\x22\xac\x6d\x17\x29\x45\x87\xc5\x06\xe6\x05\xa5\xd6\xf9\x66\xaf\x07\xcb\x54\x7b\x48\x74\x86\xa0\x3d\xe4\xc2\x11\xd8\x04\xe8\xcd\xfd\x4c\xaf\x9c\x70\x87\xb8\xd9\xeb\x95\x36\x3f\x7c\xcd\x08\x89\x43\x04\x6f\x13\xda\x09\x87\x33\x38\xd8\x02\xa4\x30\xe0\x50\x69\x4f\x4e\xaf\x0a\x42\xd0\x04\xc2\xa8\x9e\x75\xb0\xb1\x4a\x27\x07\x86\xd4\x04\x85\x51\xe8\x82\x6b\x42\xb7\xf1\x35\x8f\xcf\x37\x5f\xe1\x1a\xbd\x47\x07\x9f\xd1\xa0\x13\x19\xdc\x15\xab\x4c\x4b\xb8\xd6\x12\x8d\x47\x10\x1e\x72\x3e\xf1\x29\x2a\x58\x05\x38\x36\xfc\xc4\x54\x16\x15\x15\xf8\x64\x0b\xa3\x04\x69\x6b\x22\x40\xcd\xcc\x61\x8b\xce\x6b\x6b\x60\x58\xbb\xaa\x00\x23\xb0\x8e\x41\xda\x82\x38\x00\x07\x36\x67\xbb\x0e\x08\x73\x80\x4c\xd0\x8b\xe9\x2f\x24\xe4\x25\x6e\x05\xda\x04\x37\xa9\xcd\x11\x28\x15\xc4\x51\xef\x74\x96\xc1\x0a\xa1\xf0\x98\x14\x59\xc4\x68\xab\x82\xe0\x8f\xab\xe5\x97\xdb\xaf\x4b\x98\xdf\x7c\x83\x3f\xe6\xf7\xf7\xf3\x9b\xe5\xb7\x73\xd8\x69\x4a\x6d\x41\x80\x5b\x2c\xa1\xf4\x26\xcf\x34\x2a\xd8\x09\xe7\x84\xa1\x03\xd8\x84\x11\x7e\xbf\xbc\xbf\xf8\x32\xbf\x59\xce\xff\x71\x75\x7d\xb5\xfc\x06\xd6\xc1\xa7\xab\xe5\xcd\xe5\x62\x01\x9f\x6e\xef\x61\x0e\x77\xf3\xfb\xe5\xd5\xc5\xd7\xeb\xf9\x3d\xdc\x7d\xbd\xbf\xbb\x5d\x5c\xc6\xb0\x40\x66\x85\x6c\xff\xbf\x73\x9e\x84\xea\x39\x04\x85\x24\x74\xe6\xeb\x4c\x7c\xb3\x05\xf8\xd4\x16\x99\x82\x54\x6c\x11\x1c\x4a\xd4\x5b\x54\x20\x40\xda\xfc\xf0\xcb\x45\x65\x2c\x91\x59\xb3\x0e\x31\xff\x54\x90\x70\x95\x80\xb1\x14\x81\x47\x84\xbf\xa7\x44\xf9\xac\xd7\xdb\xed\x76\xf1\xda\x14\xb1\x75\xeb\x5e\x56\xc2\xf9\xde\xc7\xb8\xc9\x98\xa3\xd5\x81\x70\xe9\x84\x44\x07\x1e\x85\x93\x29\xfa\x10\x4c\x78\xd1\xd5\x0a\x0d\xe9\x44\xa3\xf3\x11\x8b\x14\xa4\xcd\x32\x94\xe4\x99\xc1\x26\x5c\xcc\xad\xa7\x6e\xee\xac\x44\xef\xb5\x59\x73\xe0\x70\x45\xaf\x2e\xc2\x06\x29\xb5\xca\xc3\x11\xdc\xdb\x68\xbc\xfe\x8e\x75\x36\x7c\x91\x97\x65\x54\x82\x44\x04\xde\x86\xe8\xc1\x21\xcb\x0c\x15\x78\xbd\x36\x82\x0a\x87\xa1\x97\x56\x08\x1b\x41\x92\xc5\x2e\xd6\x42\x1b\x4f\x7f\x01\x64\x9c\xba\x22\x97\x7b\xb1\xc9\x33\x9c\xf1\x33\xc0\x47\x50\xb8\x2a\xd6\x31\x71\x0a\x96\x4e\x18\x2f\x24\x8b\xbb\x0d\xad\x93\xfd\xa0\x3f\xc2\xd3\xe9\x18\x87\xa7\x4a\x9c\x4c\x86\x67\xd3\x41\x72\x3a\x9c\x9c\xf5\x47\x7d\x3c\x9b\x26\xa3\x31\x4e\xc7\xc3\xd5\x40\x9e\x9e\xe1\x58\x4c\x4e\xc6\xc3\x55\x1f\xc5\xc9\x24\x51\xe3\xd3\x71\x1f\xa7\x0a\x5b\x11\x3c\x06\x60\x37\x83\xd6\x51\xa6\x5b\x4f\x9d\xd2\xfb\x63\xf9\x03\x70\xb2\x1f\x8c\x95\x1c\x4c\xc7\xd8\xed\x0f\x26\x33\xe8\x47\x2f\x6f\x86\x13\x29\x47\x93\x61\xbf\x7b\x32\x83\xc1\xd1\xf9\xe9\x60\x94\x0c\x27\x93\x69\x77\x7a\xf6\xda\x40\xa8\xe4\x74\x9a\x4c\xa7\xdd\xc1\xe4\x0d\x94\x1c\x4c\xfa\xaa\x3f\x45\x86\xea\x97\xc7\x4f\xcd\xc7\x66\x83\x07\x8e\xf2\x20\xd6\x6b\x87\x6b\x41\x58\x56\x2d\x30\x0e\x2f\x12\x1e\x16\x71\xb3\xc1\xcf\x33\x78\x7c\x8a\x9a\xc1\x46\x8a\x2c\x5b\x1e\x72\x56\x35\x15\xce\x78\x78\x97\x88\xcc\xe3\xbb\xa0\x0b\x63\x4d\x97\x2f\x78\x1e\x1f\x01\x2f\x47\x7c\xe8\x6a\xa3\x70\x1f\x2e\xf0\x51\xa2\x9d\x27\x1e\xb3\x62\x13\x10\x45\xc2\xd3\xe4\xdd\x56\x64\x05\xbe\x8b\x40\xc7\x18\xc3\x06\x37\x5c\x54\xe1\x28\x6e\x36\x6a\x97\x33\x48\x0a\x53\x56\xca\xe6\x9e\x5c\xe7\xb1\xd9\x68\xf8\x9d\x26\x99\x1e\x1d\x48\xe1\x11\x5a\x17\xf3\xeb\xeb\xd6\x0c\x5e\xfe\x5c\xdc\xfe\x76\xd9\x9a\x35\x1b\x0d\x76\xb9\x16\x2c\x6d\xa5\x5c\x04\x5b\x91\x45\xa5\xbb\xea\xc7\x7f\x0f\x0f\xb6\xa0\xfa\xd7\x7f\x67\xb3\x32\x5e\x18\x9e\x43\xaf\x07\x9e\x84\x7c\x80\x9c\x1c\x90\x2d\xcd\x9a\xcf\xae\x7f\xbb\xbc\xbe\xfc\x3c\x5f\x5e\xbe\xa2\xb0\x58\xce\x97\x57\x17\xe5\xd1\x5f\x49\xfc\x1f\xfe\x07\x3f\xf3\xdf\x68\x3c\x35\x9f\x6f\x85\x9a\x9c\x37\x1b\x75\xd5\x3c\xf1\x9c\xf2\x3c\x8d\xc2\x18\xd1\x3c\x3c\xb9\x2c\x55\x6b\x86\x3e\xe7\x8e\xe1\x0e\x8a\x9b\x8d\x70\xff\x28\xdf\x5a\x45\xa1\xb9\x42\x86\xb7\xc2\xc1\x03\x1e\xe0\x03\xb4\x5a\xf0\x1e\xc8\x7e\xc1\x7d\x5b\xab\x0e\xbc\x87\x56\x97\x4f\xf8\xe6\x79\xb3\xd1\xa0\x54\xfb\x58\x2b\xff\xaf\x07\x3c\xfc\x1b\x3e\xc0\xeb\xff\xef\xa1\x0f\x7f\xfe\x09\xfd\x57\x34\x31\xe7\x85\xa1\xcd\xd6\x3e\xa0\x0a\x92\xe1\x01\x70\x00\x9b\x4b\xab\xaa\x8d\xc1\x11\xfc\xf3\x77\xc0\x3d\xca\x82\xd0\x07\xba\x98\x1f\xb1\xcd\xec\x3a\x02\xb5\xea\x00\xb3\xed\xf5\x60\xf1\xa0\xf3\xb0\xb8\x4a\x14\x5f\xc2\xf0\x46\x34\x96\x40\x1b\x42\x67\x44\x16\xa4\xed\xab\xf8\x24\xd5\x7c\x6b\xf5\x31\x6a\x6c\xf3\x98\xec\x82\x9c\x36\xeb\x76\xa7\xc3\x31\xea\x04\xda\x7f\x93\x54\xfa\xaa\xd2\x7f\x5e\x15\xe3\xd8\x75\xee\xb0\x2b\xed\x26\x0f\x5f\x19\x66\x6b\x65\xd8\xc3\x3e\x02\x4a\x2d\xef\x6f\x87\xf0\x9f\xc2\x13\x24\xc2\xc8\x67\xa2\x15\xbe\xf6\x77\x0e\x2b\x63\xd5\x26\x3b\x57\xca\xa1\xf7\x81\x51\x50\x42\xcc\x6d\xd6\xee\x77\x3a\x9d\x9f\xf1\xf8\x2c\xc2\xba\x7f\x15\x6b\xbd\xb7\xaa\x90\xb5\x59\x7c\x87\x0f\xf0\x06\x54\x12\x17\xaa\x13\x87\xf6\xbc\x4d\xda\xcf\x41\x87\xeb\x1f\x3f\xc0\xa8\x72\x59\x42\xdc\x26\xc9\x8f\x30\xde\xd8\x97\xca\x08\x22\x0b\x41\xb0\xce\xdd\x21\xf6\xbc\xa9\xda\x01\x24\xaa\xb0\xde\xc3\xa8\x13\x05\x6a\xdd\x51\xa7\x8a\xa7\x56\x4b\x22\x8a\x8c\x8e\xe5\xb2\x4b\xab\x4f\x02\x21\xa9\x10\x59\xa5\x10\xfe\xbc\xb1\x09\x08\x53\x8b\x28\x29\x97\x75\x23\xd8\xff\x50\x36\x50\xbb\x70\xe8\x7f\xe4\x83\x93\xc7\x7e\x6a\x3d\x85\x35\xbf\x42\xee\x29\x42\x27\xf8\x3b\xc7\x6e\xab\xae\xaa\xe6\x64\x80\x2b\xc7\x1f\xe7\xbf\x02\xae\x76\x15\x2f\x8c\xb0\x47\x1b\xe5\xf9\x11\x29\x49\xfb\x17\x1d\xd7\xfd\x6b\x0b\x1e\x99\x5c\x43\xee\x59\x10\x99\xb7\x55\x55\x24\xed\x63\x6d\xf2\x82\xe2\x0c\xcd\x9a\xd2\xe3\x0a\x1d\x25\xbd\xcc\xf4\xf3\xe5\x08\x4e\xa2\x90\xe8\xb7\xe6\xdd\x51\xe7\xf5\x60\xa9\x5b\xb8\x6c\xda\xa7\xe6\x7f\x03\x00\x00\xff\xff\x8b\x90\x53\x6e\x68\x0b\x00\x00") +var __4byte_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x56\x5b\x6f\xdb\x4a\x0e\x7e\xb6\x7f\x05\xd7\x2f\xb5\x51\x59\x8e\x2f\x89\x2f\xd9\x16\xf0\xe6\xa4\x6d\x80\x9c\x24\x88\xdd\x3d\x28\x16\xfb\x30\x9e\xa1\xac\xd9\xc8\x33\xc2\x0c\xe5\x4b\x73\xf2\xdf\x17\x1c\x49\x89\x93\xd3\x62\xbb\x4f\x96\x47\xc3\x8f\x1f\xc9\x8f\xa4\x7a\x3d\xb8\xb0\xf9\xc1\xe9\x75\x4a\x30\x38\xe9\x8f\x61\x99\x22\xac\x6d\x17\x29\x45\x87\xc5\x06\xe6\x05\xa5\xd6\xf9\x66\xaf\x07\xcb\x54\x7b\x48\x74\x86\xa0\x3d\xe4\xc2\x11\xd8\x04\xe8\xcd\xfd\x4c\xaf\x9c\x70\x87\xb8\xd9\xeb\x95\x36\x3f\x7c\xcd\x08\x89\x43\x04\x6f\x13\xda\x09\x87\x33\x38\xd8\x02\xa4\x30\xe0\x50\x69\x4f\x4e\xaf\x0a\x42\xd0\x04\xc2\xa8\x9e\x75\xb0\xb1\x4a\x27\x07\x86\xd4\x04\x85\x51\xe8\x82\x6b\x42\xb7\xf1\x35\x8f\xcf\x37\x5f\xe1\x1a\xbd\x47\x07\x9f\xd1\xa0\x13\x19\xdc\x15\xab\x4c\x4b\xb8\xd6\x12\x8d\x47\x10\x1e\x72\x3e\xf1\x29\x2a\x58\x05\x38\x36\xfc\xc4\x54\x16\x15\x15\xf8\x64\x0b\xa3\x04\x69\x6b\x22\x40\xcd\xcc\x61\x8b\xce\x6b\x6b\x60\x58\xbb\xaa\x00\x23\xb0\x8e\x41\xda\x82\x38\x00\x07\x36\x67\xbb\x0e\x08\x73\x80\x4c\xd0\x8b\xe9\x2f\x24\xe4\x25\x6e\x05\xda\x04\x37\xa9\xcd\x11\x28\x15\xc4\x51\xef\x74\x96\xc1\x0a\xa1\xf0\x98\x14\x59\xc4\x68\xab\x82\xe0\x8f\xab\xe5\x97\xdb\xaf\x4b\x98\xdf\x7c\x83\x3f\xe6\xf7\xf7\xf3\x9b\xe5\xb7\x73\xd8\x69\x4a\x6d\x41\x80\x5b\x2c\xa1\xf4\x26\xcf\x34\x2a\xd8\x09\xe7\x84\xa1\x03\xd8\x84\x11\x7e\xbf\xbc\xbf\xf8\x32\xbf\x59\xce\xff\x71\x75\x7d\xb5\xfc\x06\xd6\xc1\xa7\xab\xe5\xcd\xe5\x62\x01\x9f\x6e\xef\x61\x0e\x77\xf3\xfb\xe5\xd5\xc5\xd7\xeb\xf9\x3d\xdc\x7d\xbd\xbf\xbb\x5d\x5c\xc6\xb0\x40\x66\x85\x6c\xff\xbf\x73\x9e\x84\xea\x39\x04\x85\x24\x74\xe6\xeb\x4c\x7c\xb3\x05\xf8\xd4\x16\x99\x82\x54\x6c\x11\x1c\x4a\xd4\x5b\x54\x20\x40\xda\xfc\xf0\xcb\x45\x65\x2c\x91\x59\xb3\x0e\x31\xff\x54\x90\x70\x95\x80\xb1\x14\x81\x47\x84\xbf\xa7\x44\xf9\xac\xd7\xdb\xed\x76\xf1\xda\x14\xb1\x75\xeb\x5e\x56\xc2\xf9\xde\xc7\xb8\xc9\x98\xa3\xd5\x81\x70\xe9\x84\x44\x07\x1e\x85\x93\x29\xfa\x10\x4c\x78\xd1\xd5\x0a\x0d\xe9\x44\xa3\xf3\x11\x8b\x14\xa4\xcd\x32\x94\xe4\x99\xc1\x26\x5c\xcc\xad\xa7\x6e\xee\xac\x44\xef\xb5\x59\x73\xe0\x70\x45\xaf\x2e\xc2\x06\x29\xb5\xca\xc3\x11\xdc\xdb\x68\xbc\xfe\x8e\x75\x36\x7c\x91\x97\x65\x54\x82\x44\x04\xde\x86\xe8\xc1\x21\xcb\x0c\x15\x78\xbd\x36\x82\x0a\x87\xa1\x97\x56\x08\x1b\x41\x92\xc5\x2e\xd6\x42\x1b\x4f\x7f\x01\x64\x9c\xba\x22\x97\x7b\xb1\xc9\x33\x9c\xf1\x33\xc0\x47\x50\xb8\x2a\xd6\x31\x71\x0a\x96\x4e\x18\x2f\x24\x8b\xbb\x0d\xad\x93\xfd\xa0\x3f\xc2\xd3\xe9\x18\x87\xa7\x4a\x9c\x4c\x86\x67\xd3\x41\x72\x3a\x9c\x9c\xf5\x47\x7d\x3c\x9b\x26\xa3\x31\x4e\xc7\xc3\xd5\x40\x9e\x9e\xe1\x58\x4c\x4e\xc6\xc3\x55\x1f\xc5\xc9\x24\x51\xe3\xd3\x71\x1f\xa7\x0a\x5b\x11\x3c\x06\x60\x37\x83\xd6\x51\xa6\x5b\x4f\x9d\xd2\xfb\x63\xf9\x03\x70\xb2\x1f\x8c\x95\x1c\x4c\xc7\xd8\xed\x0f\x26\x33\xe8\x47\x2f\x6f\x86\x13\x29\x47\x93\x61\xbf\x7b\x32\x83\xc1\xd1\xf9\xe9\x60\x94\x0c\x27\x93\x69\x77\x7a\xf6\xda\x40\xa8\xe4\x74\x9a\x4c\xa7\xdd\xc1\xe4\x0d\x94\x1c\x4c\xfa\xaa\x3f\x45\x86\xea\x97\xc7\x4f\xcd\xc7\x66\x83\x07\x8e\xf2\x20\xd6\x6b\x87\x6b\x41\x58\x56\x2d\x30\x0e\x2f\x12\x1e\x16\x71\xb3\xc1\xcf\x33\x78\x7c\x8a\x9a\xc1\x46\x8a\x2c\x5b\x1e\x72\x56\x35\x15\xce\x78\x78\x97\x88\xcc\xe3\xbb\xa0\x0b\x63\x4d\x97\x2f\x78\x1e\x1f\x01\x2f\x47\x7c\xe8\x6a\xa3\x70\x1f\x2e\xf0\x51\xa2\x9d\x27\x1e\xb3\x62\x13\x10\x45\xc2\xd3\xe4\xdd\x56\x64\x05\xbe\x8b\x40\xc7\x18\xc3\x06\x37\x5c\x54\xe1\x28\x6e\x36\x6a\x97\x33\x48\x0a\x53\x56\xca\xe6\x9e\x5c\xe7\xb1\xd9\x68\xf8\x9d\x26\x99\x1e\x1d\x48\xe1\x11\x5a\x17\xf3\xeb\xeb\xd6\x0c\x5e\xfe\x5c\xdc\xfe\x76\xd9\x9a\x35\x1b\x0d\x76\xb9\x16\x2c\x6d\xa5\x5c\x04\x5b\x91\x45\xa5\xbb\xea\xc7\x7f\x0f\x0f\xb6\xa0\xfa\xd7\x7f\x67\xb3\x32\x5e\x18\x9e\x43\xaf\x07\x9e\x84\x7c\x80\x9c\x1c\x90\x2d\xcd\x9a\xcf\xae\x7f\xbb\xbc\xbe\xfc\x3c\x5f\x5e\xbe\xa2\xb0\x58\xce\x97\x57\x17\xe5\xd1\x5f\x49\xfc\x1f\xfe\x07\x3f\xf3\xdf\x68\x3c\x35\x9f\x6f\x85\x9a\x9c\x37\x1b\x75\xd5\x3c\xf1\x9c\xf2\x3c\x8d\xc2\x18\xd1\x3c\x3c\xb9\x2c\x55\x6b\x86\x3e\xe7\x8e\xe1\x0e\x8a\x9b\x8d\x70\xff\x28\xdf\x5a\x45\xa1\xb9\x42\x86\xb7\xc2\xc1\x03\x1e\xe0\x03\xb4\x5a\xf0\x1e\xc8\x7e\xc1\x7d\x5b\xab\x0e\xbc\x87\x56\x97\x4f\xf8\xe6\x79\xb3\xd1\xa0\x54\xfb\x58\x2b\xff\xaf\x07\x3c\xfc\x1b\x3e\xc0\xeb\xff\xef\xa1\x0f\x7f\xfe\x09\xfd\x57\x34\x31\xe7\x85\xa1\xcd\xd6\x3e\xa0\x0a\x92\xe1\x01\x70\x00\x9b\x4b\xab\xaa\x8d\xc1\x11\xfc\xf3\x77\xc0\x3d\xca\x82\xd0\x07\xba\x98\x1f\xb1\xcd\xec\x3a\x02\xb5\xea\x00\xb3\xed\xf5\x60\xf1\xa0\xf3\xb0\xb8\x4a\x14\x5f\xc2\xf0\x46\x34\x96\x40\x1b\x42\x67\x44\x16\xa4\xed\xab\xf8\x24\xd5\x7c\x6b\xf5\x31\x6a\x6c\xf3\x98\xec\x82\x9c\x36\xeb\x76\xa7\xc3\x31\xea\x04\xda\x7f\x93\x54\xfa\xaa\xd2\x7f\x5e\x15\xe3\xd8\x75\xee\xb0\x2b\xed\x26\x0f\x5f\x19\x66\x6b\x65\xd8\xc3\x3e\x02\x4a\x2d\xef\x6f\x87\xf0\x9f\xc2\x13\x24\xc2\xc8\x67\xa2\x15\xbe\xf6\x77\x0e\x2b\x63\xd5\x26\x3b\x57\xca\xa1\xf7\x81\x51\x50\x42\xcc\x6d\xd6\xee\x77\x5e\xc8\xf5\xcf\x3a\x9d\xce\xcf\x48\x7d\x16\x61\xf7\xbf\x0a\xbc\x5e\x62\x55\xfc\xda\x2c\xbe\xc3\x07\x78\xe3\x41\x12\x57\xad\x13\x87\x5e\xbd\x4d\xda\xcf\x19\x08\xd7\x3f\x7e\x80\x51\xe5\xb2\x84\xb8\x4d\x92\x1f\x61\xbc\xb1\x2f\x65\x12\x14\x17\x22\x62\xd1\xbb\x43\xec\x79\x6d\xb5\x03\x48\x54\x61\xbd\x87\x51\x27\x0a\xd4\xba\xa3\x4e\x15\x4f\x2d\x9d\x44\x14\x19\x1d\x6b\x67\x97\x56\xdf\x07\x42\x52\x21\xb2\x4a\x2e\xfc\xad\x63\x13\x10\xa6\x56\x54\x52\x6e\xee\x46\xb0\xff\xa1\x86\xa0\x76\xe1\xd0\xff\xc8\x07\x27\x8f\xfd\xd4\xe2\x0a\x3b\x7f\x85\xdc\x60\x84\x4e\xf0\x47\x8f\xdd\x56\x2d\x56\x0d\xcd\x00\x57\xce\x42\xce\x7f\x05\x5c\x2d\x2e\xde\x1e\x61\xa9\x36\xca\xf3\x23\x52\x92\xf6\x2f\xa2\xae\x9b\xd9\x16\x3c\x3f\xb9\x86\xdc\xc0\x20\x32\x6f\xab\xaa\x48\xda\xc7\xda\xe4\x05\xc5\x19\x9a\x35\xa5\xc7\x15\x3a\x4a\x7a\x99\xe9\xe7\xcb\x11\x9c\x44\x21\xd1\x6f\xcd\xbb\xa3\xce\xeb\x29\x53\xf7\x73\xd9\xc1\x4f\xcd\xff\x06\x00\x00\xff\xff\x8e\xc8\x27\x72\x75\x0b\x00\x00") func _4byte_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -90,7 +93,27 @@ func _4byte_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "4byte_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xdc, 0x44, 0x40, 0x64, 0xa7, 0xa2, 0x19, 0xea, 0x36, 0x7, 0xf8, 0x62, 0x5, 0x90, 0xda, 0x9c, 0xc1, 0x71, 0xab, 0xc6, 0x14, 0x63, 0xe5, 0x52, 0x34, 0xb9, 0x53, 0x9b, 0x89, 0x2, 0x5b, 0xa4}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb4, 0xc5, 0x48, 0x2d, 0xd9, 0x43, 0x95, 0x93, 0x3b, 0x93, 0x2c, 0x47, 0x8c, 0x84, 0x32, 0x3c, 0x8b, 0x2e, 0xf3, 0x72, 0xc4, 0x57, 0xe6, 0x3a, 0xb3, 0xdf, 0x1d, 0xbf, 0x45, 0x3, 0xfc, 0xa}} + return a, nil +} + +var _bigram_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x54\x5b\x6f\xdb\x36\x14\x7e\xf7\xaf\xf8\xde\x92\x20\xae\xd4\x6e\x2f\x83\x33\x0f\xd0\xb2\xa4\x35\x90\xda\x81\xad\xac\x30\x86\x3d\x50\xd2\x91\x44\x84\x26\x05\xf2\xd0\xae\x50\xe4\xbf\x17\x94\x2c\x5f\x8a\x14\x8d\x9e\x64\xf3\xbb\x9d\x0b\x15\xc7\xb8\x35\x4d\x6b\x65\x55\x33\x7e\x7b\xff\xe1\x0f\xa4\x35\xa1\x32\xef\x88\x6b\xb2\xe4\x37\x48\x3c\xd7\xc6\xba\x51\x1c\x23\xad\xa5\x43\x29\x15\x41\x3a\x34\xc2\x32\x4c\x09\xfe\x01\xaf\x64\x66\x85\x6d\xa3\x51\x1c\xf7\x9c\x57\x8f\x83\x42\x69\x89\xe0\x4c\xc9\x3b\x61\x69\x82\xd6\x78\xe4\x42\xc3\x52\x21\x1d\x5b\x99\x79\x26\x48\x86\xd0\x45\x6c\x2c\x36\xa6\x90\x65\x1b\x24\x25\xc3\xeb\x82\x6c\x67\xcd\x64\x37\x6e\xc8\xf1\x71\xfe\x84\x07\x72\x8e\x2c\x3e\x92\x26\x2b\x14\x1e\x7d\xa6\x64\x8e\x07\x99\x93\x76\x04\xe1\xd0\x84\x7f\x5c\x4d\x05\xb2\x4e\x2e\x10\xef\x43\x94\xd5\x3e\x0a\xee\x8d\xd7\x85\x60\x69\xf4\x18\x24\x43\x72\x6c\xc9\x3a\x69\x34\x7e\x1f\xac\xf6\x82\x63\x18\x1b\x44\x2e\x05\x87\x02\x2c\x4c\x13\x78\x57\x10\xba\x85\x12\x7c\xa4\xbe\xa1\x21\xc7\xba\x0b\x48\xdd\xd9\xd4\xa6\x21\x70\x2d\x38\x54\xbd\x93\x4a\x21\x23\x78\x47\xa5\x57\xe3\xa0\x96\x79\xc6\x97\x59\xfa\x69\xf1\x94\x22\x99\xaf\xf1\x25\x59\x2e\x93\x79\xba\xbe\xc1\x4e\x72\x6d\x3c\x83\xb6\xd4\x4b\xc9\x4d\xa3\x24\x15\xd8\x09\x6b\x85\xe6\x16\xa6\x0c\x0a\x9f\xef\x96\xb7\x9f\x92\x79\x9a\xfc\x3d\x7b\x98\xa5\x6b\x18\x8b\xfb\x59\x3a\xbf\x5b\xad\x70\xbf\x58\x22\xc1\x63\xb2\x4c\x67\xb7\x4f\x0f\xc9\x12\x8f\x4f\xcb\xc7\xc5\xea\x2e\xc2\x8a\x42\x2a\x0a\xfc\x5f\xf7\xbc\xec\xa6\x67\x09\x05\xb1\x90\xca\x0d\x9d\x58\x1b\x0f\x57\x1b\xaf\x0a\xd4\x62\x4b\xb0\x94\x93\xdc\x52\x01\x81\xdc\x34\xed\x9b\x87\x1a\xb4\x84\x32\xba\xea\x6a\xfe\xe9\x42\x62\x56\x42\x1b\x1e\xc3\x11\xe1\xcf\x9a\xb9\x99\xc4\xf1\x6e\xb7\x8b\x2a\xed\x23\x63\xab\x58\xf5\x72\x2e\xfe\x2b\x1a\x8d\xbe\x8d\x00\x20\x8e\x51\x4b\xc7\x61\x38\x41\x36\x37\x5e\x33\xd9\x6e\xdf\x4c\x93\x9b\x82\x90\xc9\xca\x8a\x8d\xeb\xd0\x01\x3a\xc1\xb7\x97\xf1\xc0\x55\xc2\xf1\xa2\x09\xec\xf0\x06\xd3\x90\xed\xd6\xaa\x3b\xef\x0f\x27\xb8\xb8\x38\xe0\xe9\x2b\xe5\x3e\x00\x50\x50\xc3\x75\xb0\xd9\x13\x0f\x8c\x7f\xc2\xc1\x04\xef\x0f\x1c\xc7\xd4\x39\x48\xbd\x35\xcf\x54\x74\xdd\xa6\x2d\xd9\x76\x48\xd8\x6d\x4f\x48\xff\xef\xe7\xbd\x01\xb9\xa8\x63\x07\xea\x04\xa5\xd7\x79\xf0\xbc\x54\xa6\x1a\xa3\xc8\xae\xd0\xd7\x1e\x9e\xad\x08\x1b\x8d\x29\x94\xa9\x22\xd3\x44\x6c\x56\x6c\xa5\xae\x2e\xaf\x6e\xce\x30\x7d\xdc\x1e\x56\x51\x1f\xf2\x14\x23\x4b\x5c\xee\x31\x53\x70\x2d\x5d\x74\xa8\xe5\xea\xe8\x36\xa8\x3d\x53\x8b\x13\xd8\xa2\xb9\xbe\x78\x77\x71\x6d\x9a\x9b\x33\x64\xd0\xec\x30\xa1\xed\xff\x3d\x53\xfb\xff\x0f\x52\xe1\x39\x07\x5c\x5f\x9f\x4b\xbc\x9c\xfd\x22\xe5\x08\xbf\x92\xc0\x14\x1f\x7e\x26\x72\x7c\x3b\xc9\x8e\x29\x4e\x93\x9f\x17\x8f\x69\xdf\xba\xfe\xfc\xb8\x38\xa5\xf0\x8a\x4f\xa7\xba\xab\xf7\xb7\x58\xe4\xec\x85\x3a\xd9\x14\x53\x42\xe8\x61\xd6\x65\x7f\xbf\x82\x4a\x27\xf1\xea\x74\x8f\x36\x96\xdc\x6b\x3e\x42\xa9\xce\xab\x17\x75\xfd\xed\xcc\x88\x34\x24\x87\x0d\xa6\x02\x66\x4b\x36\x7c\x99\x61\x89\xbd\xd5\x6e\x50\x0c\xb4\x52\x6a\xa1\x06\xed\xfd\x25\x66\x2b\x72\xa9\xab\x3e\x5a\x7f\x74\x92\x2d\xe7\xaf\xa7\x5b\xd7\x6b\x1e\x1b\x7f\xe8\xce\xcb\xe8\x7b\x00\x00\x00\xff\xff\x83\xb5\xcb\x27\xb0\x06\x00\x00") + +func bigram_tracerJsBytes() ([]byte, error) { + return bindataRead( + _bigram_tracerJs, + "bigram_tracer.js", + ) +} + +func bigram_tracerJs() (*asset, error) { + bytes, err := bigram_tracerJsBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "bigram_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0x6c, 0xd, 0x24, 0xf2, 0x49, 0xbd, 0x58, 0x8b, 0xb5, 0xd1, 0xc9, 0xcd, 0xcf, 0x5b, 0x3e, 0x5c, 0xfb, 0x14, 0x50, 0xe7, 0xe3, 0xb9, 0xd1, 0x54, 0x69, 0xe6, 0x5e, 0x45, 0xa6, 0x2c, 0x6c}} return a, nil } @@ -194,6 +217,46 @@ func prestate_tracerJs() (*asset, error) { return a, nil } +var _trigram_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x94\x4f\x6f\xe3\x36\x10\xc5\xef\xfe\x14\xaf\x27\x27\x88\xd7\x4a\xda\x4b\xe1\xd4\x05\xdc\x6c\xb2\x6b\x20\x6b\x07\xb6\xd2\x45\x10\xe4\x40\x4b\x23\x89\x08\x4d\x0a\xe4\xd0\x5e\x21\xc8\x77\x2f\xa8\x3f\xfe\x13\xb8\xed\xfa\x64\x70\xe6\xfd\xe6\xcd\x70\xc4\x28\xc2\x8d\x29\x2b\x2b\xf3\x82\xf1\xeb\xe5\xd5\xef\x88\x0b\x42\x6e\x3e\x11\x17\x64\xc9\xaf\x31\xf1\x5c\x18\xeb\x7a\x51\x84\xb8\x90\x0e\x99\x54\x04\xe9\x50\x0a\xcb\x30\x19\xf8\x43\xbe\x92\x2b\x2b\x6c\x35\xec\x45\x51\xa3\x39\x19\x0e\x84\xcc\x12\xc1\x99\x8c\xb7\xc2\xd2\x08\x95\xf1\x48\x84\x86\xa5\x54\x3a\xb6\x72\xe5\x99\x20\x19\x42\xa7\x91\xb1\x58\x9b\x54\x66\x55\x40\x4a\x86\xd7\x29\xd9\xba\x34\x93\x5d\xbb\xce\xc7\x97\xd9\x23\xee\xc9\x39\xb2\xf8\x42\x9a\xac\x50\x78\xf0\x2b\x25\x13\xdc\xcb\x84\xb4\x23\x08\x87\x32\x9c\xb8\x82\x52\xac\x6a\x5c\x10\xde\x05\x2b\xcb\xd6\x0a\xee\x8c\xd7\xa9\x60\x69\xf4\x00\x24\x83\x73\x6c\xc8\x3a\x69\x34\x7e\xeb\x4a\xb5\xc0\x01\x8c\x0d\x90\x33\xc1\xa1\x01\x0b\x53\x06\xdd\x39\x84\xae\xa0\x04\xef\xa5\x3f\x31\x90\x7d\xdf\x29\xa4\xae\xcb\x14\xa6\x24\x70\x21\x38\x74\xbd\x95\x4a\x61\x45\xf0\x8e\x32\xaf\x06\x81\xb6\xf2\x8c\xef\xd3\xf8\xeb\xfc\x31\xc6\x64\xf6\x84\xef\x93\xc5\x62\x32\x8b\x9f\xae\xb1\x95\x5c\x18\xcf\xa0\x0d\x35\x28\xb9\x2e\x95\xa4\x14\x5b\x61\xad\xd0\x5c\xc1\x64\x81\xf0\xed\x76\x71\xf3\x75\x32\x8b\x27\x7f\x4d\xef\xa7\xf1\x13\x8c\xc5\xdd\x34\x9e\xdd\x2e\x97\xb8\x9b\x2f\x30\xc1\xc3\x64\x11\x4f\x6f\x1e\xef\x27\x0b\x3c\x3c\x2e\x1e\xe6\xcb\xdb\x21\x96\x14\x5c\x51\xd0\xff\xff\xcc\xb3\xfa\xf6\x2c\x21\x25\x16\x52\xb9\x6e\x12\x4f\xc6\xc3\x15\xc6\xab\x14\x85\xd8\x10\x2c\x25\x24\x37\x94\x42\x20\x31\x65\xf5\xd3\x97\x1a\x58\x42\x19\x9d\xd7\x3d\xff\xeb\x42\x62\x9a\x41\x1b\x1e\xc0\x11\xe1\x8f\x82\xb9\x1c\x45\xd1\x76\xbb\x1d\xe6\xda\x0f\x8d\xcd\x23\xd5\xe0\x5c\xf4\xe7\xb0\xd7\x7b\xeb\x01\x40\x14\xa1\x90\x8e\xc3\xe5\x04\xec\x5a\x94\xb5\x2b\x2b\x73\x2b\xd6\x48\x8c\xd7\x4c\xd6\xd5\xa9\x21\x6f\x84\xb7\xf7\x41\x27\x54\xc2\xf1\xbc\x0c\xd2\xf0\x0f\xa6\x24\x5b\xef\x54\x1d\x6f\x82\x6e\x84\xe7\x7e\x7f\xd0\xef\xbf\x0c\x76\xa7\x9f\xa9\xe4\x62\x84\xcb\xe6\xa4\x65\x39\xa6\x9a\x24\xf5\xc6\xbc\x52\x5a\x8f\x94\x36\x64\x2b\x98\x32\x31\x69\xbb\x22\xc1\xe2\xdf\xdf\x40\x3f\x28\xf1\x4c\x6e\x58\x13\x82\x74\x84\xcc\xeb\x24\x14\x3f\x53\x26\x1f\x20\x5d\x9d\xe3\x6d\xc7\xdf\x08\x8b\x34\x54\xc5\x18\xca\xe4\xc3\x9c\x1a\x13\x67\xe7\xd7\xbb\x1c\x99\xe1\xac\xc9\xf9\x65\x0c\x2e\xa4\x1b\xee\xbc\x9e\xef\x49\xe1\xb7\x0b\xce\x4b\x87\x71\xd7\xdf\xf5\xe9\x9c\xcf\x6d\xd9\x1a\x7d\x9c\x63\x89\xbd\xd5\xfb\xb3\xf7\x23\xbf\xa6\x6c\xcd\x9a\x72\xc8\x66\xc9\x56\xea\xfc\xd0\x6f\xc8\x79\xa5\x0a\xe3\x23\x3f\xcf\x97\x2f\x17\xfd\x4f\xfd\x8b\xa3\xb3\xab\xe6\xcc\x94\xc7\xdd\xd6\x39\xe1\x52\x9f\x5f\xa9\x7a\x39\xd5\xe4\x2e\x78\x71\x71\xca\x26\x29\x47\xf8\x2f\x19\xc6\xb8\x3a\x25\xfc\xe0\xf8\x63\x0f\x57\x07\xc3\xfc\x10\xc0\x18\x5d\x1b\xfb\x3d\xcc\x84\x57\x7c\xb8\x3c\xdb\xa2\x7d\x11\x44\xc2\x5e\xa8\x76\x5f\xc2\xeb\x66\x32\x08\xdd\xad\x54\xd6\x7c\xab\x81\x52\x23\x4e\x2e\xd1\xbe\x8c\x25\x77\xaa\x8e\x50\xaa\xae\xd5\x40\x5d\xf3\xa5\xaf\x88\x34\x24\x87\x0f\x82\x52\x98\x0d\xd9\xf0\xca\xb7\x57\xee\x3a\x62\x90\x65\x52\x0b\xd5\xb1\xdb\x07\x81\xad\x48\xa4\xce\x1b\x6b\x4d\xe8\xc0\x5b\xc2\x3f\x0e\x97\xbb\x61\xee\x27\xbf\x9b\xce\x7b\xef\x9f\x00\x00\x00\xff\xff\xb3\x93\x16\xd5\xfc\x06\x00\x00") + +func trigram_tracerJsBytes() ([]byte, error) { + return bindataRead( + _trigram_tracerJs, + "trigram_tracer.js", + ) +} + +func trigram_tracerJs() (*asset, error) { + bytes, err := trigram_tracerJsBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "trigram_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x40, 0x63, 0xe1, 0x42, 0x60, 0x7, 0x1b, 0x79, 0x47, 0x1, 0xa1, 0xbf, 0xc4, 0x66, 0x19, 0x9b, 0x2b, 0x5a, 0x1f, 0x82, 0x3d, 0xcf, 0xee, 0xe7, 0x60, 0x25, 0x2c, 0x4f, 0x13, 0x97, 0xc7, 0x18}} + return a, nil +} + +var _unigram_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x54\x4d\x6f\xdb\x46\x10\xbd\xeb\x57\xbc\xa3\x8c\xa8\xa4\xd3\x5e\x0a\xa5\x09\xc0\x1a\x76\x22\xc0\x91\x0d\x89\x6e\x60\x14\x3d\x2c\xc9\x21\xb9\xe8\x6a\x87\xd8\x9d\x95\x42\x04\xfa\xef\xc5\x92\xa2\xe5\x1a\x6e\x13\x9e\x04\xcd\xbc\x8f\x79\x33\x64\x9a\xe2\x8a\xbb\xde\xe9\xa6\x15\xfc\x7c\xf9\xf6\x57\xe4\x2d\xa1\xe1\x9f\x48\x5a\x72\x14\x76\xc8\x82\xb4\xec\xfc\x2c\x4d\x91\xb7\xda\xa3\xd6\x86\xa0\x3d\x3a\xe5\x04\x5c\x43\x5e\xf4\x1b\x5d\x38\xe5\xfa\x64\x96\xa6\x23\xe6\xd5\x72\x64\xa8\x1d\x11\x3c\xd7\x72\x50\x8e\x96\xe8\x39\xa0\x54\x16\x8e\x2a\xed\xc5\xe9\x22\x08\x41\x0b\x94\xad\x52\x76\xd8\x71\xa5\xeb\x3e\x52\x6a\x41\xb0\x15\xb9\x41\x5a\xc8\xed\xfc\xe4\xe3\xe3\xfa\x01\xb7\xe4\x3d\x39\x7c\x24\x4b\x4e\x19\xdc\x87\xc2\xe8\x12\xb7\xba\x24\xeb\x09\xca\xa3\x8b\xff\xf8\x96\x2a\x14\x03\x5d\x04\xde\x44\x2b\xdb\x93\x15\xdc\x70\xb0\x95\x12\xcd\x76\x01\xd2\xd1\x39\xf6\xe4\xbc\x66\x8b\x5f\x26\xa9\x13\xe1\x02\xec\x22\xc9\x5c\x49\x1c\xc0\x81\xbb\x88\xbb\x80\xb2\x3d\x8c\x92\x33\xf4\x07\x02\x39\xcf\x5d\x41\xdb\x41\xa6\xe5\x8e\x20\xad\x92\x38\xf5\x41\x1b\x83\x82\x10\x3c\xd5\xc1\x2c\x22\x5b\x11\x04\x5f\x56\xf9\xa7\xbb\x87\x1c\xd9\xfa\x11\x5f\xb2\xcd\x26\x5b\xe7\x8f\xef\x70\xd0\xd2\x72\x10\xd0\x9e\x46\x2a\xbd\xeb\x8c\xa6\x0a\x07\xe5\x9c\xb2\xd2\x83\xeb\xc8\xf0\xf9\x7a\x73\xf5\x29\x5b\xe7\xd9\xef\xab\xdb\x55\xfe\x08\x76\xb8\x59\xe5\xeb\xeb\xed\x16\x37\x77\x1b\x64\xb8\xcf\x36\xf9\xea\xea\xe1\x36\xdb\xe0\xfe\x61\x73\x7f\xb7\xbd\x4e\xb0\xa5\xe8\x8a\x22\xfe\xfb\x99\xd7\xc3\xf6\x1c\xa1\x22\x51\xda\xf8\x29\x89\x47\x0e\xf0\x2d\x07\x53\xa1\x55\x7b\x82\xa3\x92\xf4\x9e\x2a\x28\x94\xdc\xf5\x3f\xbc\xd4\xc8\xa5\x0c\xdb\x66\x98\xf9\x3f\x0f\x12\xab\x1a\x96\x65\x01\x4f\x84\xdf\x5a\x91\x6e\x99\xa6\x87\xc3\x21\x69\x6c\x48\xd8\x35\xa9\x19\xe9\x7c\xfa\x21\x99\xcd\xbe\xcd\x00\x20\x4d\xd1\x6a\x2f\x71\x39\x91\x76\xa7\xba\xe8\x8a\xbb\x92\x2b\xf2\x10\x46\xc9\xc1\x0a\x39\x3f\x74\xc7\xd6\x25\xbe\x1d\x17\x13\xd6\x72\xe7\xc7\x16\x0f\x1b\x76\x05\xb9\x11\x3e\xb6\xc7\xea\x12\x97\x4f\xdd\x5e\xa8\x8b\x4a\xda\xee\xf9\x6f\xaa\x86\xdc\x68\x4f\xae\x3f\x09\x8e\x77\x10\x7d\xfc\xf1\x19\xf4\x95\xca\x20\xe4\x93\x01\x1d\xa1\x4b\xd4\xc1\x96\xf1\xfa\xe6\x86\x9b\x05\xaa\xe2\x02\xe3\x14\xf1\xd9\xab\x78\x9b\x78\x0f\xc3\x4d\xc2\x5d\x22\xbc\x15\xa7\x6d\x33\xbf\x78\xf7\xd4\xa3\x6b\xcc\xa5\xd5\x3e\x89\x83\xfc\xc9\xdd\x5f\x17\x67\x7c\x7c\xfe\x55\x7b\xf3\xe6\x0c\x3c\x3e\xfd\x22\xe3\x09\xff\x83\xc2\x7b\xbc\x7d\x0d\x37\x34\xc5\x40\x26\xda\x73\x88\xb5\x0a\x46\x9e\xe7\x72\x68\x4f\x17\xad\x4a\x09\xca\x9c\xa2\x88\x6f\x27\xd7\x50\x76\x4a\xab\x1e\x6f\x2d\xb2\x0c\x14\xaf\xe6\x73\x5c\xcc\x26\x1d\x47\xfe\x35\x21\x65\xcc\x20\x36\x2d\x7d\x38\xd5\x82\xc8\x42\x0b\x39\x15\xdf\x55\xde\x93\x8b\x9f\x29\x38\x92\xe0\xac\x9f\x18\x23\xac\xd6\x56\x99\x89\xfb\x74\xd1\xe2\x54\xa9\x6d\x33\x7a\x1b\x4b\xcf\xcc\x95\xf2\xf5\xf9\xe2\x74\x3d\x7f\x0a\x07\x1f\x70\xf9\x62\x27\xa3\xe4\x39\xe4\x97\xe1\x1e\x17\xb3\xe3\xec\x9f\x00\x00\x00\xff\xff\x8d\xba\x8d\xa8\xe6\x05\x00\x00") + +func unigram_tracerJsBytes() ([]byte, error) { + return bindataRead( + _unigram_tracerJs, + "unigram_tracer.js", + ) +} + +func unigram_tracerJs() (*asset, error) { + bytes, err := unigram_tracerJsBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "unigram_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2f, 0x36, 0x14, 0xc2, 0xf6, 0xc3, 0x80, 0x2b, 0x4a, 0x11, 0x7d, 0xd5, 0x3e, 0xef, 0x23, 0xb5, 0xd6, 0xe6, 0xe6, 0x5, 0x41, 0xf6, 0x14, 0x7a, 0x39, 0xf7, 0xf8, 0xac, 0x89, 0x8e, 0x43, 0xe6}} + return a, nil +} + // Asset loads and returns the asset for the given name. // It returns an error if the asset could not be found or // could not be loaded. @@ -287,6 +350,8 @@ func AssetNames() []string { var _bindata = map[string]func() (*asset, error){ "4byte_tracer.js": _4byte_tracerJs, + "bigram_tracer.js": bigram_tracerJs, + "call_tracer.js": call_tracerJs, "evmdis_tracer.js": evmdis_tracerJs, @@ -296,6 +361,10 @@ var _bindata = map[string]func() (*asset, error){ "opcount_tracer.js": opcount_tracerJs, "prestate_tracer.js": prestate_tracerJs, + + "trigram_tracer.js": trigram_tracerJs, + + "unigram_tracer.js": unigram_tracerJs, } // AssetDir returns the file names below a certain @@ -340,11 +409,14 @@ type bintree struct { var _bintree = &bintree{nil, map[string]*bintree{ "4byte_tracer.js": {_4byte_tracerJs, map[string]*bintree{}}, + "bigram_tracer.js": {bigram_tracerJs, map[string]*bintree{}}, "call_tracer.js": {call_tracerJs, map[string]*bintree{}}, "evmdis_tracer.js": {evmdis_tracerJs, map[string]*bintree{}}, "noop_tracer.js": {noop_tracerJs, map[string]*bintree{}}, "opcount_tracer.js": {opcount_tracerJs, map[string]*bintree{}}, "prestate_tracer.js": {prestate_tracerJs, map[string]*bintree{}}, + "trigram_tracer.js": {trigram_tracerJs, map[string]*bintree{}}, + "unigram_tracer.js": {unigram_tracerJs, map[string]*bintree{}}, }} // RestoreAsset restores an asset under the given directory. diff --git a/eth/tracers/internal/tracers/bigram_tracer.js b/eth/tracers/internal/tracers/bigram_tracer.js new file mode 100644 index 0000000000000..421c360af98e4 --- /dev/null +++ b/eth/tracers/internal/tracers/bigram_tracer.js @@ -0,0 +1,47 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +{ + // hist is the counters of opcode bigrams + hist: {}, + // lastOp is last operation + lastOp: '', + // execution depth of last op + lastDepth: 0, + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + var op = log.op.toString(); + var depth = log.getDepth(); + if (depth == this.lastDepth){ + var key = this.lastOp+'-'+op; + if (this.hist[key]){ + this.hist[key]++; + } + else { + this.hist[key] = 1; + } + } + this.lastOp = op; + this.lastDepth = depth; + }, + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) {}, + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx) { + return this.hist; + }, +} diff --git a/eth/tracers/internal/tracers/trigram_tracer.js b/eth/tracers/internal/tracers/trigram_tracer.js new file mode 100644 index 0000000000000..8756490dfc14b --- /dev/null +++ b/eth/tracers/internal/tracers/trigram_tracer.js @@ -0,0 +1,49 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +{ + // hist is the map of trigram counters + hist: {}, + // lastOp is last operation + lastOps: ['',''], + lastDepth: 0, + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + var depth = log.getDepth(); + if (depth != this.lastDepth){ + this.lastOps = ['','']; + this.lastDepth = depth; + return; + } + var op = log.op.toString(); + var key = this.lastOps[0]+'-'+this.lastOps[1]+'-'+op; + if (this.hist[key]){ + this.hist[key]++; + } + else { + this.hist[key] = 1; + } + this.lastOps[0] = this.lastOps[1]; + this.lastOps[1] = op; + }, + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) {}, + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx) { + return this.hist; + }, +} diff --git a/eth/tracers/internal/tracers/unigram_tracer.js b/eth/tracers/internal/tracers/unigram_tracer.js new file mode 100644 index 0000000000000..000fb13b1e9aa --- /dev/null +++ b/eth/tracers/internal/tracers/unigram_tracer.js @@ -0,0 +1,43 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +{ + // hist is the map of opcodes to counters + hist: {}, + // nops counts number of ops + nops: 0, + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + var op = log.op.toString(); + if (this.hist[op]){ + this.hist[op]++; + } + else { + this.hist[op] = 1; + } + this.nops++; + }, + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) {}, + + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx) { + if(this.nops > 0){ + return this.hist; + } + }, +} From 61a5976368684faaba7468d253fb1b8d194f9e27 Mon Sep 17 00:00:00 2001 From: Wenbiao Zheng Date: Wed, 20 Jun 2018 17:46:29 +0800 Subject: [PATCH 03/36] accounts: remove deadcode isSigned (#16990) --- accounts/abi/numbers.go | 15 --------------- accounts/abi/numbers_test.go | 11 ----------- 2 files changed, 26 deletions(-) diff --git a/accounts/abi/numbers.go b/accounts/abi/numbers.go index 0cd97cc66f360..4d706846dacda 100644 --- a/accounts/abi/numbers.go +++ b/accounts/abi/numbers.go @@ -31,29 +31,14 @@ var ( uint16T = reflect.TypeOf(uint16(0)) uint32T = reflect.TypeOf(uint32(0)) uint64T = reflect.TypeOf(uint64(0)) - intT = reflect.TypeOf(int(0)) int8T = reflect.TypeOf(int8(0)) int16T = reflect.TypeOf(int16(0)) int32T = reflect.TypeOf(int32(0)) int64T = reflect.TypeOf(int64(0)) addressT = reflect.TypeOf(common.Address{}) - intTS = reflect.TypeOf([]int(nil)) - int8TS = reflect.TypeOf([]int8(nil)) - int16TS = reflect.TypeOf([]int16(nil)) - int32TS = reflect.TypeOf([]int32(nil)) - int64TS = reflect.TypeOf([]int64(nil)) ) // U256 converts a big Int into a 256bit EVM number. func U256(n *big.Int) []byte { return math.PaddedBigBytes(math.U256(n), 32) } - -// checks whether the given reflect value is signed. This also works for slices with a number type -func isSigned(v reflect.Value) bool { - switch v.Type() { - case intTS, int8TS, int16TS, int32TS, int64TS, intT, int8T, int16T, int32T, int64T: - return true - } - return false -} diff --git a/accounts/abi/numbers_test.go b/accounts/abi/numbers_test.go index b9ff5aef17d32..d25a5abcb5163 100644 --- a/accounts/abi/numbers_test.go +++ b/accounts/abi/numbers_test.go @@ -19,7 +19,6 @@ package abi import ( "bytes" "math/big" - "reflect" "testing" ) @@ -32,13 +31,3 @@ func TestNumberTypes(t *testing.T) { t.Errorf("expected %x got %x", ubytes, unsigned) } } - -func TestSigned(t *testing.T) { - if isSigned(reflect.ValueOf(uint(10))) { - t.Error("signed") - } - - if !isSigned(reflect.ValueOf(int(10))) { - t.Error("not signed") - } -} From 1a7033873496ad65ab740deab82c67d5a7b2f554 Mon Sep 17 00:00:00 2001 From: Husam Ibrahim <39692071+HusamIbrahim@users.noreply.github.com> Date: Thu, 21 Jun 2018 10:35:35 +0300 Subject: [PATCH 04/36] mobile: correct comment typo in ethereum.go (#17040) --- mobile/ethereum.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mobile/ethereum.go b/mobile/ethereum.go index 0eb1d90552c37..35a43d274dc0a 100644 --- a/mobile/ethereum.go +++ b/mobile/ethereum.go @@ -125,12 +125,12 @@ func (t *Topics) Append(topics *Hashes) { t.topics = append(t.topics, topics.hashes) } -// FilterQuery contains options for contact log filtering. +// FilterQuery contains options for contract log filtering. type FilterQuery struct { query ethereum.FilterQuery } -// NewFilterQuery creates an empty filter query for contact log filtering. +// NewFilterQuery creates an empty filter query for contract log filtering. func NewFilterQuery() *FilterQuery { return new(FilterQuery) } From 8db8d074e2fff547e9d85169018e03f89b5975a1 Mon Sep 17 00:00:00 2001 From: nobody Date: Thu, 21 Jun 2018 15:44:39 +0800 Subject: [PATCH 05/36] cmd/geth: remove the tail "," from genesis config (#17028) remove the tail "," from genesis config, which will cause genesis config parse error . --- cmd/geth/genesis_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go index a00ae00c19db4..e75b542cbbf64 100644 --- a/cmd/geth/genesis_test.go +++ b/cmd/geth/genesis_test.go @@ -77,7 +77,7 @@ var customGenesisTests = []struct { "homesteadBlock" : 314, "daoForkBlock" : 141, "daoForkSupport" : true - }, + } }`, query: "eth.getBlock(0).nonce", result: "0x0000000000000042", From d926bf2c7e3182d694c15829a37a0ca7331cd03c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 21 Jun 2018 12:28:05 +0300 Subject: [PATCH 06/36] trie: cache collapsed tries node, not rlp blobs (#16876) The current trie memory database/cache that we do pruning on stores trie nodes as binary rlp encoded blobs, and also stores the node relationships/references for GC purposes. However, most of the trie nodes (everything apart from a value node) is in essence just a collection of references. This PR switches out the RLP encoded trie blobs with the collapsed-but-not-serialized trie nodes. This permits most of the references to be recovered from within the node data structure, avoiding the need to track them a second time (expensive memory wise). --- core/blockchain.go | 4 +- core/blockchain_test.go | 4 +- core/state/statedb.go | 2 +- eth/api_tracer.go | 6 +- trie/database.go | 274 ++++++++++++++++++++++++++++++++++------ trie/hasher.go | 30 +---- trie/node.go | 15 ++- trie/trie.go | 8 +- 8 files changed, 268 insertions(+), 75 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index 34832252a7a6f..0b50e3f37772d 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -672,7 +672,7 @@ func (bc *BlockChain) Stop() { } } for !bc.triegc.Empty() { - triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{}) + triedb.Dereference(bc.triegc.PopItem().(common.Hash)) } if size, _ := triedb.Size(); size != 0 { log.Error("Dangling trie nodes after full cleanup") @@ -947,7 +947,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. bc.triegc.Push(root, number) break } - triedb.Dereference(root.(common.Hash), common.Hash{}) + triedb.Dereference(root.(common.Hash)) } } } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index f409bb7b08a90..687209bfae4ae 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -1313,8 +1313,8 @@ func TestTrieForkGC(t *testing.T) { } // Dereference all the recent tries and ensure no past trie is left in for i := 0; i < triesInMemory; i++ { - chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root(), common.Hash{}) - chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root(), common.Hash{}) + chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) + chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) } if len(chain.stateCache.TrieDB().Nodes()) > 0 { t.Fatalf("stale tries still alive after garbase collection") diff --git a/core/state/statedb.go b/core/state/statedb.go index ffea761d9f31d..92d394ae328f2 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -596,7 +596,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) case isDirty: // Write any contract code associated with the state object if stateObject.code != nil && stateObject.dirtyCode { - s.db.TrieDB().Insert(common.BytesToHash(stateObject.CodeHash()), stateObject.code) + s.db.TrieDB().InsertBlob(common.BytesToHash(stateObject.CodeHash()), stateObject.code) stateObject.dirtyCode = false } // Write any storage changes in the state object to its storage trie. diff --git a/eth/api_tracer.go b/eth/api_tracer.go index 61f5c71d6460c..623e5ed1bd4f6 100644 --- a/eth/api_tracer.go +++ b/eth/api_tracer.go @@ -297,7 +297,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl database.TrieDB().Reference(root, common.Hash{}) } // Dereference all past tries we ourselves are done working with - database.TrieDB().Dereference(proot, common.Hash{}) + database.TrieDB().Dereference(proot) proot = root // TODO(karalabe): Do we need the preimages? Won't they accumulate too much? @@ -320,7 +320,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl done[uint64(result.Block)] = result // Dereference any paret tries held in memory by this task - database.TrieDB().Dereference(res.rootref, common.Hash{}) + database.TrieDB().Dereference(res.rootref) // Stream completed traces to the user, aborting on the first error for result, ok := done[next]; ok; result, ok = done[next] { @@ -526,7 +526,7 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (* return nil, err } database.TrieDB().Reference(root, common.Hash{}) - database.TrieDB().Dereference(proot, common.Hash{}) + database.TrieDB().Dereference(proot) proot = root } nodes, imgs := database.TrieDB().Size() diff --git a/trie/database.go b/trie/database.go index 468c139df4c6d..88c6e9cd61cec 100644 --- a/trie/database.go +++ b/trie/database.go @@ -17,6 +17,8 @@ package trie import ( + "fmt" + "io" "sync" "time" @@ -24,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/rlp" ) var ( @@ -82,25 +85,188 @@ type Database struct { lock sync.RWMutex } +// rawNode is a simple binary blob used to differentiate between collapsed trie +// nodes and already encoded RLP binary blobs (while at the same time store them +// in the same cache fields). +type rawNode []byte + +func (n rawNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } +func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } +func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } + +// rawFullNode represents only the useful data content of a full node, with the +// caches and flags stripped out to minimize its data storage. This type honors +// the same RLP encoding as the original parent. +type rawFullNode [17]node + +func (n rawFullNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } +func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } +func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } + +func (n rawFullNode) EncodeRLP(w io.Writer) error { + var nodes [17]node + + for i, child := range n { + if child != nil { + nodes[i] = child + } else { + nodes[i] = nilValueNode + } + } + return rlp.Encode(w, nodes) +} + +// rawShortNode represents only the useful data content of a short node, with the +// caches and flags stripped out to minimize its data storage. This type honors +// the same RLP encoding as the original parent. +type rawShortNode struct { + Key []byte + Val node +} + +func (n rawShortNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } +func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } +func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } + // cachedNode is all the information we know about a single cached node in the // memory database write layer. type cachedNode struct { - blob []byte // Cached data block of the trie node - parents int // Number of live nodes referencing this one - children map[common.Hash]int // Children referenced by this nodes + node node // Cached collapsed trie node, or raw rlp data + size uint16 // Byte size of the useful cached data + + parents uint16 // Number of live nodes referencing this one + children map[common.Hash]uint16 // External children referenced by this node flushPrev common.Hash // Previous node in the flush-list flushNext common.Hash // Next node in the flush-list } +// rlp returns the raw rlp encoded blob of the cached node, either directly from +// the cache, or by regenerating it from the collapsed node. +func (n *cachedNode) rlp() []byte { + if node, ok := n.node.(rawNode); ok { + return node + } + blob, err := rlp.EncodeToBytes(n.node) + if err != nil { + panic(err) + } + return blob +} + +// obj returns the decoded and expanded trie node, either directly from the cache, +// or by regenerating it from the rlp encoded blob. +func (n *cachedNode) obj(hash common.Hash, cachegen uint16) node { + if node, ok := n.node.(rawNode); ok { + return mustDecodeNode(hash[:], node, cachegen) + } + return expandNode(hash[:], n.node, cachegen) +} + +// childs returns all the tracked children of this node, both the implicit ones +// from inside the node as well as the explicit ones from outside the node. +func (n *cachedNode) childs() []common.Hash { + children := make([]common.Hash, 0, 16) + for child := range n.children { + children = append(children, child) + } + if _, ok := n.node.(rawNode); !ok { + gatherChildren(n.node, &children) + } + return children +} + +// gatherChildren traverses the node hierarchy of a collapsed storage node and +// retrieves all the hashnode children. +func gatherChildren(n node, children *[]common.Hash) { + switch n := n.(type) { + case *rawShortNode: + gatherChildren(n.Val, children) + + case rawFullNode: + for i := 0; i < 16; i++ { + gatherChildren(n[i], children) + } + case hashNode: + *children = append(*children, common.BytesToHash(n)) + + case valueNode, nil: + + default: + panic(fmt.Sprintf("unknown node type: %T", n)) + } +} + +// simplifyNode traverses the hierarchy of an expanded memory node and discards +// all the internal caches, returning a node that only contains the raw data. +func simplifyNode(n node) node { + switch n := n.(type) { + case *shortNode: + // Short nodes discard the flags and cascade + return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} + + case *fullNode: + // Full nodes discard the flags and cascade + node := rawFullNode(n.Children) + for i := 0; i < len(node); i++ { + if node[i] != nil { + node[i] = simplifyNode(node[i]) + } + } + return node + + case valueNode, hashNode, rawNode: + return n + + default: + panic(fmt.Sprintf("unknown node type: %T", n)) + } +} + +// expandNode traverses the node hierarchy of a collapsed storage node and converts +// all fields and keys into expanded memory form. +func expandNode(hash hashNode, n node, cachegen uint16) node { + switch n := n.(type) { + case *rawShortNode: + // Short nodes need key and child expansion + return &shortNode{ + Key: compactToHex(n.Key), + Val: expandNode(nil, n.Val, cachegen), + flags: nodeFlag{ + hash: hash, + gen: cachegen, + }, + } + + case rawFullNode: + // Full nodes need child expansion + node := &fullNode{ + flags: nodeFlag{ + hash: hash, + gen: cachegen, + }, + } + for i := 0; i < len(node.Children); i++ { + if n[i] != nil { + node.Children[i] = expandNode(nil, n[i], cachegen) + } + } + return node + + case valueNode, hashNode: + return n + + default: + panic(fmt.Sprintf("unknown node type: %T", n)) + } +} + // NewDatabase creates a new trie database to store ephemeral trie content before // its written out to disk or garbage collected. func NewDatabase(diskdb ethdb.Database) *Database { return &Database{ - diskdb: diskdb, - nodes: map[common.Hash]*cachedNode{ - {}: {children: make(map[common.Hash]int)}, - }, + diskdb: diskdb, + nodes: map[common.Hash]*cachedNode{{}: {}}, preimages: make(map[common.Hash][]byte), } } @@ -110,33 +276,46 @@ func (db *Database) DiskDB() DatabaseReader { return db.diskdb } -// Insert writes a new trie node to the memory database if it's yet unknown. The -// method will make a copy of the slice. -func (db *Database) Insert(hash common.Hash, blob []byte) { +// InsertBlob writes a new reference tracked blob to the memory database if it's +// yet unknown. This method should only be used for non-trie nodes that require +// reference counting, since trie nodes are garbage collected directly through +// their embedded children. +func (db *Database) InsertBlob(hash common.Hash, blob []byte) { db.lock.Lock() defer db.lock.Unlock() - db.insert(hash, blob) + db.insert(hash, blob, rawNode(blob)) } -// insert is the private locked version of Insert. -func (db *Database) insert(hash common.Hash, blob []byte) { +// insert inserts a collapsed trie node into the memory database. This method is +// a more generic version of InsertBlob, supporting both raw blob insertions as +// well ex trie node insertions. The blob must always be specified to allow proper +// size tracking. +func (db *Database) insert(hash common.Hash, blob []byte, node node) { // If the node's already cached, skip if _, ok := db.nodes[hash]; ok { return } - db.nodes[hash] = &cachedNode{ - blob: common.CopyBytes(blob), - children: make(map[common.Hash]int), + // Create the cached entry for this node + entry := &cachedNode{ + node: simplifyNode(node), + size: uint16(len(blob)), flushPrev: db.newest, } + for _, child := range entry.childs() { + if c := db.nodes[child]; c != nil { + c.parents++ + } + } + db.nodes[hash] = entry + // Update the flush-list endpoints if db.oldest == (common.Hash{}) { db.oldest, db.newest = hash, hash } else { db.nodes[db.newest].flushNext, db.newest = hash, hash } - db.nodesSize += common.StorageSize(common.HashLength + len(blob)) + db.nodesSize += common.StorageSize(common.HashLength + entry.size) } // insertPreimage writes a new trie node pre-image to the memory database if it's @@ -151,8 +330,27 @@ func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { db.preimagesSize += common.StorageSize(common.HashLength + len(preimage)) } -// Node retrieves a cached trie node from memory. If it cannot be found cached, -// the method queries the persistent database for the content. +// node retrieves a cached trie node from memory, or returns nil if none can be +// found in the memory cache. +func (db *Database) node(hash common.Hash, cachegen uint16) node { + // Retrieve the node from cache if available + db.lock.RLock() + node := db.nodes[hash] + db.lock.RUnlock() + + if node != nil { + return node.obj(hash, cachegen) + } + // Content unavailable in memory, attempt to retrieve from disk + enc, err := db.diskdb.Get(hash[:]) + if err != nil || enc == nil { + return nil + } + return mustDecodeNode(hash[:], enc, cachegen) +} + +// Node retrieves an encoded cached trie node from memory. If it cannot be found +// cached, the method queries the persistent database for the content. func (db *Database) Node(hash common.Hash) ([]byte, error) { // Retrieve the node from cache if available db.lock.RLock() @@ -160,7 +358,7 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) { db.lock.RUnlock() if node != nil { - return node.blob, nil + return node.rlp(), nil } // Content unavailable in memory, attempt to retrieve from disk return db.diskdb.Get(hash[:]) @@ -222,20 +420,22 @@ func (db *Database) reference(child common.Hash, parent common.Hash) { return } // If the reference already exists, only duplicate for roots - if _, ok = db.nodes[parent].children[child]; ok && parent != (common.Hash{}) { + if db.nodes[parent].children == nil { + db.nodes[parent].children = make(map[common.Hash]uint16) + } else if _, ok = db.nodes[parent].children[child]; ok && parent != (common.Hash{}) { return } node.parents++ db.nodes[parent].children[child]++ } -// Dereference removes an existing reference from a parent node to a child node. -func (db *Database) Dereference(child common.Hash, parent common.Hash) { +// Dereference removes an existing reference from a root node. +func (db *Database) Dereference(root common.Hash) { db.lock.Lock() defer db.lock.Unlock() nodes, storage, start := len(db.nodes), db.nodesSize, time.Now() - db.dereference(child, parent) + db.dereference(root, common.Hash{}) db.gcnodes += uint64(nodes - len(db.nodes)) db.gcsize += storage - db.nodesSize @@ -254,9 +454,11 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) { // Dereference the parent-child node := db.nodes[parent] - node.children[child]-- - if node.children[child] == 0 { - delete(node.children, child) + if node.children != nil && node.children[child] > 0 { + node.children[child]-- + if node.children[child] == 0 { + delete(node.children, child) + } } // If the child does not exist, it's a previously committed node. node, ok := db.nodes[child] @@ -274,11 +476,11 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) { db.nodes[node.flushNext].flushPrev = node.flushPrev } // Dereference all children and delete the node - for hash := range node.children { + for _, hash := range node.childs() { db.dereference(hash, child) } delete(db.nodes, child) - db.nodesSize -= common.StorageSize(common.HashLength + len(node.blob)) + db.nodesSize -= common.StorageSize(common.HashLength + int(node.size)) } } @@ -323,7 +525,7 @@ func (db *Database) Cap(limit common.StorageSize) error { for size > limit && oldest != (common.Hash{}) { // Fetch the oldest referenced node and push into the batch node := db.nodes[oldest] - if err := batch.Put(oldest[:], node.blob); err != nil { + if err := batch.Put(oldest[:], node.rlp()); err != nil { db.lock.RUnlock() return err } @@ -340,7 +542,7 @@ func (db *Database) Cap(limit common.StorageSize) error { // is the total size, including both the useful cached data (hash -> blob), as // well as the flushlist metadata (2*hash). When flushing items from the cache, // we need to reduce both. - size -= common.StorageSize(3*common.HashLength + len(node.blob)) + size -= common.StorageSize(3*common.HashLength + int(node.size)) oldest = node.flushNext } // Flush out any remainder data from the last batch @@ -364,7 +566,7 @@ func (db *Database) Cap(limit common.StorageSize) error { delete(db.nodes, db.oldest) db.oldest = node.flushNext - db.nodesSize -= common.StorageSize(common.HashLength + len(node.blob)) + db.nodesSize -= common.StorageSize(common.HashLength + int(node.size)) } if db.oldest != (common.Hash{}) { db.nodes[db.oldest].flushPrev = common.Hash{} @@ -460,12 +662,12 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch) error { if !ok { return nil } - for child := range node.children { + for _, child := range node.childs() { if err := db.commit(child, batch); err != nil { return err } } - if err := batch.Put(hash[:], node.blob); err != nil { + if err := batch.Put(hash[:], node.rlp()); err != nil { return err } // If we've reached an optimal batch size, commit and start over @@ -496,11 +698,11 @@ func (db *Database) uncache(hash common.Hash) { db.nodes[node.flushNext].flushPrev = node.flushPrev } // Uncache the node's subtries and remove the node itself too - for child := range node.children { + for _, child := range node.childs() { db.uncache(child) } delete(db.nodes, hash) - db.nodesSize -= common.StorageSize(common.HashLength + len(node.blob)) + db.nodesSize -= common.StorageSize(common.HashLength + int(node.size)) } // Size returns the current storage size of the memory cache in front of the diff --git a/trie/hasher.go b/trie/hasher.go index 47c6dd8f9d095..7b1d7793fa65d 100644 --- a/trie/hasher.go +++ b/trie/hasher.go @@ -137,9 +137,6 @@ func (h *hasher) hashChildren(original node, db *Database) (node, node, error) { return original, original, err } } - if collapsed.Val == nil { - collapsed.Val = valueNode(nil) // Ensure that nil children are encoded as empty strings. - } return collapsed, cached, nil case *fullNode: @@ -152,14 +149,9 @@ func (h *hasher) hashChildren(original node, db *Database) (node, node, error) { if err != nil { return original, original, err } - } else { - collapsed.Children[i] = valueNode(nil) // Ensure that nil children are encoded as empty strings. } } cached.Children[16] = n.Children[16] - if collapsed.Children[16] == nil { - collapsed.Children[16] = valueNode(nil) - } return collapsed, cached, nil default: @@ -192,34 +184,22 @@ func (h *hasher) store(n node, db *Database, force bool) (node, error) { if db != nil { // We are pooling the trie nodes into an intermediate memory cache - db.lock.Lock() hash := common.BytesToHash(hash) - db.insert(hash, h.tmp) - // Track all direct parent->child node references - switch n := n.(type) { - case *shortNode: - if child, ok := n.Val.(hashNode); ok { - db.reference(common.BytesToHash(child), hash) - } - case *fullNode: - for i := 0; i < 16; i++ { - if child, ok := n.Children[i].(hashNode); ok { - db.reference(common.BytesToHash(child), hash) - } - } - } + + db.lock.Lock() + db.insert(hash, h.tmp, n) db.lock.Unlock() // Track external references from account->storage trie if h.onleaf != nil { switch n := n.(type) { case *shortNode: - if child, ok := n.Val.(valueNode); ok && child != nil { + if child, ok := n.Val.(valueNode); ok { h.onleaf(child, hash) } case *fullNode: for i := 0; i < 16; i++ { - if child, ok := n.Children[i].(valueNode); ok && child != nil { + if child, ok := n.Children[i].(valueNode); ok { h.onleaf(child, hash) } } diff --git a/trie/node.go b/trie/node.go index 02815042c6daa..a06f1b3898f3a 100644 --- a/trie/node.go +++ b/trie/node.go @@ -47,9 +47,22 @@ type ( valueNode []byte ) +// nilValueNode is used when collapsing internal trie nodes for hashing, since +// unset children need to serialize correctly. +var nilValueNode = valueNode(nil) + // EncodeRLP encodes a full node into the consensus RLP format. func (n *fullNode) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, n.Children) + var nodes [17]node + + for i, child := range n.Children { + if child != nil { + nodes[i] = child + } else { + nodes[i] = nilValueNode + } + } + return rlp.Encode(w, nodes) } func (n *fullNode) copy() *fullNode { copy := *n; return © } diff --git a/trie/trie.go b/trie/trie.go index 30543c5496bf9..4284e30ad40e3 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -433,12 +433,10 @@ func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) { cacheMissCounter.Inc(1) hash := common.BytesToHash(n) - - enc, err := t.db.Node(hash) - if err != nil || enc == nil { - return nil, &MissingNodeError{NodeHash: hash, Path: prefix} + if node := t.db.node(hash, t.cachegen); node != nil { + return node, nil } - return mustDecodeNode(n, enc, t.cachegen), nil + return nil, &MissingNodeError{NodeHash: hash, Path: prefix} } // Root returns the root hash of the trie. From e187711c6545487d4cac3701f0f506bb536234e2 Mon Sep 17 00:00:00 2001 From: ethersphere Date: Wed, 20 Jun 2018 14:06:27 +0200 Subject: [PATCH 07/36] swarm: network rewrite merge --- .github/CODEOWNERS | 38 +- bmt/bmt.go | 560 --- bmt/bmt_r.go | 85 - bmt/bmt_test.go | 481 --- cmd/p2psim/main.go | 5 +- cmd/swarm/config.go | 109 +- cmd/swarm/config_test.go | 129 +- cmd/swarm/db.go | 28 +- cmd/swarm/download.go | 85 + cmd/swarm/export_test.go | 139 + cmd/swarm/fs.go | 127 + cmd/swarm/fs_test.go | 234 ++ cmd/swarm/hash.go | 6 +- cmd/swarm/main.go | 321 +- cmd/swarm/manifest.go | 14 +- cmd/swarm/run_test.go | 132 +- cmd/swarm/swarm-smoke/main.go | 101 + cmd/swarm/swarm-smoke/upload_and_sync.go | 184 + cmd/swarm/upload.go | 9 +- cmd/swarm/upload_test.go | 243 +- p2p/metrics.go | 10 +- p2p/peer.go | 7 +- p2p/protocols/protocol.go | 7 + p2p/simulations/network.go | 67 +- p2p/testing/protocolsession.go | 2 + swarm/AUTHORS | 35 + swarm/OWNERS | 26 + swarm/api/api.go | 446 ++- swarm/api/api_test.go | 58 +- swarm/api/client/client.go | 141 +- swarm/api/client/client_test.go | 51 +- swarm/api/config.go | 119 +- swarm/api/config_test.go | 20 +- swarm/api/filesystem.go | 43 +- swarm/api/filesystem_test.go | 53 +- swarm/api/http/error.go | 54 +- swarm/api/http/error_templates.go | 15 +- swarm/api/http/error_test.go | 12 +- swarm/api/http/roundtripper.go | 2 +- swarm/api/http/server.go | 675 +++- swarm/api/http/server_test.go | 419 ++- swarm/api/http/templates.go | 3 +- swarm/api/manifest.go | 185 +- swarm/api/manifest_test.go | 35 +- swarm/api/storage.go | 32 +- swarm/api/storage_test.go | 14 +- swarm/api/testapi.go | 32 +- swarm/api/uri.go | 46 +- swarm/api/uri_test.go | 47 +- swarm/bmt/bmt.go | 543 +++ swarm/bmt/bmt_r.go | 85 + swarm/bmt/bmt_test.go | 390 ++ swarm/fuse/fuse_dir.go | 15 +- swarm/fuse/fuse_file.go | 43 +- swarm/fuse/swarmfs.go | 4 +- swarm/fuse/swarmfs_test.go | 1695 ++++++--- swarm/fuse/swarmfs_unix.go | 156 +- swarm/fuse/swarmfs_util.go | 18 +- swarm/grafana_dashboards/ldbstore.json | 2278 ++++++++++++ swarm/grafana_dashboards/swarm.json | 3198 +++++++++++++++++ swarm/log/log.go | 48 + swarm/metrics/flags.go | 2 +- swarm/multihash/multihash.go | 92 + swarm/multihash/multihash_test.go | 53 + swarm/network/README.md | 152 + swarm/network/bitvector/bitvector.go | 66 + swarm/network/bitvector/bitvector_test.go | 104 + swarm/network/common.go | 30 + swarm/network/depo.go | 232 -- swarm/network/discovery.go | 210 ++ swarm/network/discovery_test.go | 57 + swarm/network/forwarding.go | 150 - swarm/network/hive.go | 514 +-- swarm/network/hive_test.go | 108 + swarm/network/kademlia.go | 765 ++++ swarm/network/kademlia/address.go | 173 - swarm/network/kademlia/address_test.go | 96 - swarm/network/kademlia/kaddb.go | 350 -- swarm/network/kademlia/kademlia.go | 454 --- swarm/network/kademlia/kademlia_test.go | 392 -- swarm/network/kademlia_test.go | 623 ++++ swarm/network/messages.go | 308 -- swarm/network/priorityqueue/priorityqueue.go | 111 + .../priorityqueue/priorityqueue_test.go | 97 + swarm/network/protocol.go | 759 ++-- swarm/network/protocol_test.go | 225 +- .../simulations/discovery/discovery.go | 17 + .../simulations/discovery/discovery_test.go | 586 +++ .../simulations/discovery/jsonsnapshot.txt | 1 + swarm/network/simulations/overlay.go | 144 + swarm/network/simulations/overlay_test.go | 195 + swarm/network/stream/common_test.go | 449 +++ swarm/network/stream/delivery.go | 272 ++ swarm/network/stream/delivery_test.go | 699 ++++ .../network/stream/intervals/dbstore_test.go | 42 + swarm/network/stream/intervals/intervals.go | 206 ++ .../stream/intervals/intervals_test.go | 395 ++ swarm/network/stream/intervals/store_test.go | 80 + swarm/network/stream/intervals_test.go | 313 ++ swarm/network/stream/messages.go | 370 ++ swarm/network/stream/peer.go | 328 ++ .../network/stream/snapshot_retrieval_test.go | 791 ++++ swarm/network/stream/snapshot_sync_test.go | 719 ++++ swarm/network/stream/stream.go | 739 ++++ swarm/network/stream/streamer_test.go | 684 ++++ swarm/network/stream/syncer.go | 297 ++ swarm/network/stream/syncer_test.go | 264 ++ .../network/stream/testing/snapshot_128.json | 1 + swarm/network/stream/testing/snapshot_16.json | 1 + .../network/stream/testing/snapshot_256.json | 1 + swarm/network/stream/testing/snapshot_32.json | 1 + swarm/network/stream/testing/snapshot_64.json | 1 + swarm/network/stream/testing/testing.go | 293 ++ swarm/network/syncdb.go | 389 -- swarm/network/syncdb_test.go | 222 -- swarm/network/syncer.go | 781 ---- swarm/network_test.go | 656 ++++ swarm/pot/address.go | 252 ++ swarm/pot/doc.go | 83 + swarm/pot/pot.go | 807 +++++ swarm/pot/pot_test.go | 685 ++++ swarm/pss/ARCHITECTURE.md | 144 + swarm/pss/README.md | 318 ++ swarm/pss/api.go | 169 + swarm/pss/client/client.go | 354 ++ swarm/pss/client/client_test.go | 302 ++ swarm/pss/client/doc.go | 96 + swarm/pss/doc.go | 61 + swarm/pss/handshake.go | 568 +++ swarm/pss/handshake_none.go | 27 + swarm/pss/handshake_test.go | 266 ++ swarm/pss/ping.go | 96 + swarm/pss/protocol.go | 279 ++ swarm/pss/protocol_none.go | 23 + swarm/pss/protocol_test.go | 158 + swarm/pss/pss.go | 951 +++++ swarm/pss/pss_test.go | 1683 +++++++++ .../testdata/addpsstodiscoverytestsnapshot.pl | 28 + .../testdata/addpsstodiscoverytestsnapshot.sh | 3 + swarm/pss/testdata/snapshot_128.json | 1 + swarm/pss/testdata/snapshot_16.json | 1 + swarm/pss/testdata/snapshot_2.json | 67 + swarm/pss/testdata/snapshot_256.json | 1 + swarm/pss/testdata/snapshot_3.json | 100 + swarm/pss/testdata/snapshot_32.json | 1 + swarm/pss/testdata/snapshot_4.json | 133 + swarm/pss/testdata/snapshot_64.json | 1 + swarm/pss/testdata/snapshot_8.json | 1 + swarm/pss/types.go | 191 + swarm/pss/writeup.md | 125 + swarm/services/swap/swap.go | 143 +- swarm/services/swap/swap/swap.go | 180 +- swarm/services/swap/swap/swap_test.go | 42 +- {p2p/simulations/adapters => swarm}/state.go | 20 +- swarm/state/dbstore.go | 96 + swarm/state/dbstore_test.go | 122 + swarm/state/inmemorystore.go | 94 + swarm/state/store.go | 26 + swarm/storage/chunker.go | 490 +-- swarm/storage/chunker_test.go | 451 +-- swarm/storage/chunkstore.go | 66 + swarm/storage/common.go | 43 + swarm/storage/common_test.go | 204 +- swarm/storage/database.go | 37 +- swarm/storage/dbapi.go | 52 + swarm/storage/dbstore.go | 600 ---- swarm/storage/dbstore_test.go | 191 - swarm/storage/dpa.go | 241 -- swarm/storage/encryption/encryption.go | 116 + swarm/storage/encryption/encryption_test.go | 149 + swarm/storage/error.go | 45 + swarm/storage/filestore.go | 97 + .../{dpa_test.go => filestore_test.go} | 105 +- swarm/storage/hasherstore.go | 229 ++ swarm/storage/hasherstore_test.go | 118 + swarm/storage/ldbstore.go | 771 ++++ swarm/storage/ldbstore_test.go | 522 +++ swarm/storage/localstore.go | 202 +- swarm/storage/localstore_test.go | 118 + swarm/storage/memstore.go | 361 +- swarm/storage/memstore_test.go | 228 +- swarm/storage/mock/db/db.go | 236 ++ swarm/storage/mock/db/db_test.go | 75 + swarm/storage/mock/mem/mem.go | 175 + swarm/storage/mock/mem/mem_test.go | 36 + swarm/storage/mock/mock.go | 111 + swarm/storage/mock/rpc/rpc.go | 84 + swarm/storage/mock/rpc/rpc_test.go | 41 + swarm/storage/mock/test/test.go | 186 + swarm/storage/mru/error.go | 32 + swarm/storage/mru/resource.go | 1066 ++++++ swarm/storage/mru/resource_sign.go | 42 + swarm/storage/mru/resource_test.go | 766 ++++ swarm/storage/netstore.go | 229 +- swarm/storage/netstore_test.go | 122 + swarm/storage/pyramid.go | 520 +-- swarm/storage/swarmhasher.go | 11 +- swarm/storage/types.go | 338 +- swarm/swarm.go | 340 +- swarm/swarm_test.go | 245 ++ swarm/testutil/http.go | 86 +- 201 files changed, 39389 insertions(+), 9705 deletions(-) delete mode 100644 bmt/bmt.go delete mode 100644 bmt/bmt_r.go delete mode 100644 bmt/bmt_test.go create mode 100644 cmd/swarm/download.go create mode 100644 cmd/swarm/export_test.go create mode 100644 cmd/swarm/fs.go create mode 100644 cmd/swarm/fs_test.go create mode 100644 cmd/swarm/swarm-smoke/main.go create mode 100644 cmd/swarm/swarm-smoke/upload_and_sync.go create mode 100644 swarm/AUTHORS create mode 100644 swarm/OWNERS create mode 100644 swarm/bmt/bmt.go create mode 100644 swarm/bmt/bmt_r.go create mode 100644 swarm/bmt/bmt_test.go create mode 100644 swarm/grafana_dashboards/ldbstore.json create mode 100644 swarm/grafana_dashboards/swarm.json create mode 100644 swarm/log/log.go create mode 100644 swarm/multihash/multihash.go create mode 100644 swarm/multihash/multihash_test.go create mode 100644 swarm/network/README.md create mode 100644 swarm/network/bitvector/bitvector.go create mode 100644 swarm/network/bitvector/bitvector_test.go create mode 100644 swarm/network/common.go delete mode 100644 swarm/network/depo.go create mode 100644 swarm/network/discovery.go create mode 100644 swarm/network/discovery_test.go delete mode 100644 swarm/network/forwarding.go create mode 100644 swarm/network/hive_test.go create mode 100644 swarm/network/kademlia.go delete mode 100644 swarm/network/kademlia/address.go delete mode 100644 swarm/network/kademlia/address_test.go delete mode 100644 swarm/network/kademlia/kaddb.go delete mode 100644 swarm/network/kademlia/kademlia.go delete mode 100644 swarm/network/kademlia/kademlia_test.go create mode 100644 swarm/network/kademlia_test.go delete mode 100644 swarm/network/messages.go create mode 100644 swarm/network/priorityqueue/priorityqueue.go create mode 100644 swarm/network/priorityqueue/priorityqueue_test.go create mode 100644 swarm/network/simulations/discovery/discovery.go create mode 100644 swarm/network/simulations/discovery/discovery_test.go create mode 100755 swarm/network/simulations/discovery/jsonsnapshot.txt create mode 100644 swarm/network/simulations/overlay.go create mode 100644 swarm/network/simulations/overlay_test.go create mode 100644 swarm/network/stream/common_test.go create mode 100644 swarm/network/stream/delivery.go create mode 100644 swarm/network/stream/delivery_test.go create mode 100644 swarm/network/stream/intervals/dbstore_test.go create mode 100644 swarm/network/stream/intervals/intervals.go create mode 100644 swarm/network/stream/intervals/intervals_test.go create mode 100644 swarm/network/stream/intervals/store_test.go create mode 100644 swarm/network/stream/intervals_test.go create mode 100644 swarm/network/stream/messages.go create mode 100644 swarm/network/stream/peer.go create mode 100644 swarm/network/stream/snapshot_retrieval_test.go create mode 100644 swarm/network/stream/snapshot_sync_test.go create mode 100644 swarm/network/stream/stream.go create mode 100644 swarm/network/stream/streamer_test.go create mode 100644 swarm/network/stream/syncer.go create mode 100644 swarm/network/stream/syncer_test.go create mode 100644 swarm/network/stream/testing/snapshot_128.json create mode 100644 swarm/network/stream/testing/snapshot_16.json create mode 100644 swarm/network/stream/testing/snapshot_256.json create mode 100644 swarm/network/stream/testing/snapshot_32.json create mode 100644 swarm/network/stream/testing/snapshot_64.json create mode 100644 swarm/network/stream/testing/testing.go delete mode 100644 swarm/network/syncdb.go delete mode 100644 swarm/network/syncdb_test.go delete mode 100644 swarm/network/syncer.go create mode 100644 swarm/network_test.go create mode 100644 swarm/pot/address.go create mode 100644 swarm/pot/doc.go create mode 100644 swarm/pot/pot.go create mode 100644 swarm/pot/pot_test.go create mode 100644 swarm/pss/ARCHITECTURE.md create mode 100644 swarm/pss/README.md create mode 100644 swarm/pss/api.go create mode 100644 swarm/pss/client/client.go create mode 100644 swarm/pss/client/client_test.go create mode 100644 swarm/pss/client/doc.go create mode 100644 swarm/pss/doc.go create mode 100644 swarm/pss/handshake.go create mode 100644 swarm/pss/handshake_none.go create mode 100644 swarm/pss/handshake_test.go create mode 100644 swarm/pss/ping.go create mode 100644 swarm/pss/protocol.go create mode 100644 swarm/pss/protocol_none.go create mode 100644 swarm/pss/protocol_test.go create mode 100644 swarm/pss/pss.go create mode 100644 swarm/pss/pss_test.go create mode 100644 swarm/pss/testdata/addpsstodiscoverytestsnapshot.pl create mode 100644 swarm/pss/testdata/addpsstodiscoverytestsnapshot.sh create mode 100644 swarm/pss/testdata/snapshot_128.json create mode 100644 swarm/pss/testdata/snapshot_16.json create mode 100644 swarm/pss/testdata/snapshot_2.json create mode 100644 swarm/pss/testdata/snapshot_256.json create mode 100644 swarm/pss/testdata/snapshot_3.json create mode 100644 swarm/pss/testdata/snapshot_32.json create mode 100644 swarm/pss/testdata/snapshot_4.json create mode 100644 swarm/pss/testdata/snapshot_64.json create mode 100644 swarm/pss/testdata/snapshot_8.json create mode 100644 swarm/pss/types.go create mode 100644 swarm/pss/writeup.md rename {p2p/simulations/adapters => swarm}/state.go (68%) create mode 100644 swarm/state/dbstore.go create mode 100644 swarm/state/dbstore_test.go create mode 100644 swarm/state/inmemorystore.go create mode 100644 swarm/state/store.go create mode 100644 swarm/storage/chunkstore.go create mode 100644 swarm/storage/common.go create mode 100644 swarm/storage/dbapi.go delete mode 100644 swarm/storage/dbstore.go delete mode 100644 swarm/storage/dbstore_test.go delete mode 100644 swarm/storage/dpa.go create mode 100644 swarm/storage/encryption/encryption.go create mode 100644 swarm/storage/encryption/encryption_test.go create mode 100644 swarm/storage/error.go create mode 100644 swarm/storage/filestore.go rename swarm/storage/{dpa_test.go => filestore_test.go} (58%) create mode 100644 swarm/storage/hasherstore.go create mode 100644 swarm/storage/hasherstore_test.go create mode 100644 swarm/storage/ldbstore.go create mode 100644 swarm/storage/ldbstore_test.go create mode 100644 swarm/storage/localstore_test.go create mode 100644 swarm/storage/mock/db/db.go create mode 100644 swarm/storage/mock/db/db_test.go create mode 100644 swarm/storage/mock/mem/mem.go create mode 100644 swarm/storage/mock/mem/mem_test.go create mode 100644 swarm/storage/mock/mock.go create mode 100644 swarm/storage/mock/rpc/rpc.go create mode 100644 swarm/storage/mock/rpc/rpc_test.go create mode 100644 swarm/storage/mock/test/test.go create mode 100644 swarm/storage/mru/error.go create mode 100644 swarm/storage/mru/resource.go create mode 100644 swarm/storage/mru/resource_sign.go create mode 100644 swarm/storage/mru/resource_test.go create mode 100644 swarm/storage/netstore_test.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 79c7a53014533..0a09baef7d242 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,12 +1,32 @@ # Lines starting with '#' are comments. # Each line is a file pattern followed by one or more owners. -accounts/usbwallet @karalabe -consensus @karalabe -core/ @karalabe @holiman -eth/ @karalabe -les/ @zsfelfoldi -light/ @zsfelfoldi -mobile/ @karalabe -p2p/ @fjl @zsfelfoldi -whisper/ @gballet @gluk256 +accounts/usbwallet @karalabe +consensus @karalabe +core/ @karalabe @holiman +eth/ @karalabe +les/ @zsfelfoldi +light/ @zsfelfoldi +mobile/ @karalabe +p2p/ @fjl @zsfelfoldi +swarm/bmt @zelig +swarm/dev @lmars +swarm/fuse @jmozah @holisticode +swarm/grafana_dashboards @nonsense +swarm/metrics @nonsense @holisticode +swarm/multihash @nolash +swarm/network/bitvector @zelig @janos @gbalint +swarm/network/priorityqueue @zelig @janos @gbalint +swarm/network/simulations @zelig +swarm/network/stream @janos @zelig @gbalint @holisticode @justelad +swarm/network/stream/intervals @janos +swarm/network/stream/testing @zelig +swarm/pot @zelig +swarm/pss @nolash @zelig @nonsense +swarm/services @zelig +swarm/state @justelad +swarm/storage/encryption @gbalint @zelig @nagydani +swarm/storage/mock @janos +swarm/storage/mru @nolash +swarm/testutil @lmars +whisper/ @gballet @gluk256 diff --git a/bmt/bmt.go b/bmt/bmt.go deleted file mode 100644 index c290223452fac..0000000000000 --- a/bmt/bmt.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package bmt provides a binary merkle tree implementation -package bmt - -import ( - "fmt" - "hash" - "io" - "strings" - "sync" - "sync/atomic" -) - -/* -Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size -It is defined as the root hash of the binary merkle tree built over fixed size segments -of the underlying chunk using any base hash function (e.g keccak 256 SHA3) - -It is used as the chunk hash function in swarm which in turn is the basis for the -128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash - -The BMT is optimal for providing compact inclusion proofs, i.e. prove that a -segment is a substring of a chunk starting at a particular offset -The size of the underlying segments is fixed at 32 bytes (called the resolution -of the BMT hash), the EVM word size to optimize for on-chain BMT verification -as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash. - -Two implementations are provided: - -* RefHasher is optimized for code simplicity and meant as a reference implementation -* Hasher is optimized for speed taking advantage of concurrency with minimalistic - control structure to coordinate the concurrent routines - It implements the ChunkHash interface as well as the go standard hash.Hash interface - -*/ - -const ( - // DefaultSegmentCount is the maximum number of segments of the underlying chunk - DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches - // DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e, - // the maximum number of concurrent BMT hashing operations performed by the same hasher - DefaultPoolSize = 8 -) - -// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT. -type BaseHasher func() hash.Hash - -// Hasher a reusable hasher for fixed maximum size chunks representing a BMT -// implements the hash.Hash interface -// reuse pool of Tree-s for amortised memory allocation and resource control -// supports order-agnostic concurrent segment writes -// as well as sequential read and write -// can not be called concurrently on more than one chunk -// can be further appended after Sum -// Reset gives back the Tree to the pool and guaranteed to leave -// the tree and itself in a state reusable for hashing a new chunk -type Hasher struct { - pool *TreePool // BMT resource pool - bmt *Tree // prebuilt BMT resource for flowcontrol and proofs - blocksize int // segment size (size of hash) also for hash.Hash - count int // segment count - size int // for hash.Hash same as hashsize - cur int // cursor position for rightmost currently open chunk - segment []byte // the rightmost open segment (not complete) - depth int // index of last level - result chan []byte // result channel - hash []byte // to record the result - max int32 // max segments for SegmentWriter interface - blockLength []byte // The block length that needes to be added in Sum -} - -// New creates a reusable Hasher -// implements the hash.Hash interface -// pulls a new Tree from a resource pool for hashing each chunk -func New(p *TreePool) *Hasher { - return &Hasher{ - pool: p, - depth: depth(p.SegmentCount), - size: p.SegmentSize, - blocksize: p.SegmentSize, - count: p.SegmentCount, - result: make(chan []byte), - } -} - -// Node is a reuseable segment hasher representing a node in a BMT -// it allows for continued writes after a Sum -// and is left in completely reusable state after Reset -type Node struct { - level, index int // position of node for information/logging only - initial bool // first and last node - root bool // whether the node is root to a smaller BMT - isLeft bool // whether it is left side of the parent double segment - unbalanced bool // indicates if a node has only the left segment - parent *Node // BMT connections - state int32 // atomic increment impl concurrent boolean toggle - left, right []byte -} - -// NewNode constructor for segment hasher nodes in the BMT -func NewNode(level, index int, parent *Node) *Node { - return &Node{ - parent: parent, - level: level, - index: index, - initial: index == 0, - isLeft: index%2 == 0, - } -} - -// TreePool provides a pool of Trees used as resources by Hasher -// a Tree popped from the pool is guaranteed to have clean state -// for hashing a new chunk -// Hasher Reset releases the Tree to the pool -type TreePool struct { - lock sync.Mutex - c chan *Tree - hasher BaseHasher - SegmentSize int - SegmentCount int - Capacity int - count int -} - -// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity -// on GetTree it reuses free Trees or creates a new one if size is not reached -func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool { - return &TreePool{ - c: make(chan *Tree, capacity), - hasher: hasher, - SegmentSize: hasher().Size(), - SegmentCount: segmentCount, - Capacity: capacity, - } -} - -// Drain drains the pool until it has no more than n resources -func (p *TreePool) Drain(n int) { - p.lock.Lock() - defer p.lock.Unlock() - for len(p.c) > n { - <-p.c - p.count-- - } -} - -// Reserve is blocking until it returns an available Tree -// it reuses free Trees or creates a new one if size is not reached -func (p *TreePool) Reserve() *Tree { - p.lock.Lock() - defer p.lock.Unlock() - var t *Tree - if p.count == p.Capacity { - return <-p.c - } - select { - case t = <-p.c: - default: - t = NewTree(p.hasher, p.SegmentSize, p.SegmentCount) - p.count++ - } - return t -} - -// Release gives back a Tree to the pool. -// This Tree is guaranteed to be in reusable state -// does not need locking -func (p *TreePool) Release(t *Tree) { - p.c <- t // can never fail but... -} - -// Tree is a reusable control structure representing a BMT -// organised in a binary tree -// Hasher uses a TreePool to pick one for each chunk hash -// the Tree is 'locked' while not in the pool -type Tree struct { - leaves []*Node -} - -// Draw draws the BMT (badly) -func (t *Tree) Draw(hash []byte, d int) string { - var left, right []string - var anc []*Node - for i, n := range t.leaves { - left = append(left, fmt.Sprintf("%v", hashstr(n.left))) - if i%2 == 0 { - anc = append(anc, n.parent) - } - right = append(right, fmt.Sprintf("%v", hashstr(n.right))) - } - anc = t.leaves - var hashes [][]string - for l := 0; len(anc) > 0; l++ { - var nodes []*Node - hash := []string{""} - for i, n := range anc { - hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right))) - if i%2 == 0 && n.parent != nil { - nodes = append(nodes, n.parent) - } - } - hash = append(hash, "") - hashes = append(hashes, hash) - anc = nodes - } - hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""}) - total := 60 - del := " " - var rows []string - for i := len(hashes) - 1; i >= 0; i-- { - var textlen int - hash := hashes[i] - for _, s := range hash { - textlen += len(s) - } - if total < textlen { - total = textlen + len(hash) - } - delsize := (total - textlen) / (len(hash) - 1) - if delsize > len(del) { - delsize = len(del) - } - row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize])) - rows = append(rows, row) - - } - rows = append(rows, strings.Join(left, " ")) - rows = append(rows, strings.Join(right, " ")) - return strings.Join(rows, "\n") + "\n" -} - -// NewTree initialises the Tree by building up the nodes of a BMT -// segment size is stipulated to be the size of the hash -// segmentCount needs to be positive integer and does not need to be -// a power of two and can even be an odd number -// segmentSize * segmentCount determines the maximum chunk size -// hashed using the tree -func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree { - n := NewNode(0, 0, nil) - n.root = true - prevlevel := []*Node{n} - // iterate over levels and creates 2^level nodes - level := 1 - count := 2 - for d := 1; d <= depth(segmentCount); d++ { - nodes := make([]*Node, count) - for i := 0; i < len(nodes); i++ { - parent := prevlevel[i/2] - t := NewNode(level, i, parent) - nodes[i] = t - } - prevlevel = nodes - level++ - count *= 2 - } - // the datanode level is the nodes on the last level where - return &Tree{ - leaves: prevlevel, - } -} - -// methods needed by hash.Hash - -// Size returns the size -func (h *Hasher) Size() int { - return h.size -} - -// BlockSize returns the block size -func (h *Hasher) BlockSize() int { - return h.blocksize -} - -// Sum returns the hash of the buffer -// hash.Hash interface Sum method appends the byte slice to the underlying -// data before it calculates and returns the hash of the chunk -func (h *Hasher) Sum(b []byte) (r []byte) { - t := h.bmt - i := h.cur - n := t.leaves[i] - j := i - // must run strictly before all nodes calculate - // datanodes are guaranteed to have a parent - if len(h.segment) > h.size && i > 0 && n.parent != nil { - n = n.parent - } else { - i *= 2 - } - d := h.finalise(n, i) - h.writeSegment(j, h.segment, d) - c := <-h.result - h.releaseTree() - - // sha3(length + BMT(pure_chunk)) - if h.blockLength == nil { - return c - } - res := h.pool.hasher() - res.Reset() - res.Write(h.blockLength) - res.Write(c) - return res.Sum(nil) -} - -// Hasher implements the SwarmHash interface - -// Hash waits for the hasher result and returns it -// caller must call this on a BMT Hasher being written to -func (h *Hasher) Hash() []byte { - return <-h.result -} - -// Hasher implements the io.Writer interface - -// Write fills the buffer to hash -// with every full segment complete launches a hasher go routine -// that shoots up the BMT -func (h *Hasher) Write(b []byte) (int, error) { - l := len(b) - if l <= 0 { - return 0, nil - } - s := h.segment - i := h.cur - count := (h.count + 1) / 2 - need := h.count*h.size - h.cur*2*h.size - size := h.size - if need > size { - size *= 2 - } - if l < need { - need = l - } - // calculate missing bit to complete current open segment - rest := size - len(s) - if need < rest { - rest = need - } - s = append(s, b[:rest]...) - need -= rest - // read full segments and the last possibly partial segment - for need > 0 && i < count-1 { - // push all finished chunks we read - h.writeSegment(i, s, h.depth) - need -= size - if need < 0 { - size += need - } - s = b[rest : rest+size] - rest += size - i++ - } - h.segment = s - h.cur = i - // otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full - return l, nil -} - -// Hasher implements the io.ReaderFrom interface - -// ReadFrom reads from io.Reader and appends to the data to hash using Write -// it reads so that chunk to hash is maximum length or reader reaches EOF -// caller must Reset the hasher prior to call -func (h *Hasher) ReadFrom(r io.Reader) (m int64, err error) { - bufsize := h.size*h.count - h.size*h.cur - len(h.segment) - buf := make([]byte, bufsize) - var read int - for { - var n int - n, err = r.Read(buf) - read += n - if err == io.EOF || read == len(buf) { - hash := h.Sum(buf[:n]) - if read == len(buf) { - err = NewEOC(hash) - } - break - } - if err != nil { - break - } - n, err = h.Write(buf[:n]) - if err != nil { - break - } - } - return int64(read), err -} - -// Reset needs to be called before writing to the hasher -func (h *Hasher) Reset() { - h.getTree() - h.blockLength = nil -} - -// Hasher implements the SwarmHash interface - -// ResetWithLength needs to be called before writing to the hasher -// the argument is supposed to be the byte slice binary representation of -// the length of the data subsumed under the hash -func (h *Hasher) ResetWithLength(l []byte) { - h.Reset() - h.blockLength = l -} - -// Release gives back the Tree to the pool whereby it unlocks -// it resets tree, segment and index -func (h *Hasher) releaseTree() { - if h.bmt != nil { - n := h.bmt.leaves[h.cur] - for ; n != nil; n = n.parent { - n.unbalanced = false - if n.parent != nil { - n.root = false - } - } - h.pool.Release(h.bmt) - h.bmt = nil - - } - h.cur = 0 - h.segment = nil -} - -func (h *Hasher) writeSegment(i int, s []byte, d int) { - hash := h.pool.hasher() - n := h.bmt.leaves[i] - - if len(s) > h.size && n.parent != nil { - go func() { - hash.Reset() - hash.Write(s) - s = hash.Sum(nil) - - if n.root { - h.result <- s - return - } - h.run(n.parent, hash, d, n.index, s) - }() - return - } - go h.run(n, hash, d, i*2, s) -} - -func (h *Hasher) run(n *Node, hash hash.Hash, d int, i int, s []byte) { - isLeft := i%2 == 0 - for { - if isLeft { - n.left = s - } else { - n.right = s - } - if !n.unbalanced && n.toggle() { - return - } - if !n.unbalanced || !isLeft || i == 0 && d == 0 { - hash.Reset() - hash.Write(n.left) - hash.Write(n.right) - s = hash.Sum(nil) - - } else { - s = append(n.left, n.right...) - } - - h.hash = s - if n.root { - h.result <- s - return - } - - isLeft = n.isLeft - n = n.parent - i++ - } -} - -// getTree obtains a BMT resource by reserving one from the pool -func (h *Hasher) getTree() *Tree { - if h.bmt != nil { - return h.bmt - } - t := h.pool.Reserve() - h.bmt = t - return t -} - -// atomic bool toggle implementing a concurrent reusable 2-state object -// atomic addint with %2 implements atomic bool toggle -// it returns true if the toggler just put it in the active/waiting state -func (n *Node) toggle() bool { - return atomic.AddInt32(&n.state, 1)%2 == 1 -} - -func hashstr(b []byte) string { - end := len(b) - if end > 4 { - end = 4 - } - return fmt.Sprintf("%x", b[:end]) -} - -func depth(n int) (d int) { - for l := (n - 1) / 2; l > 0; l /= 2 { - d++ - } - return d -} - -// finalise is following the zigzags on the tree belonging -// to the final datasegment -func (h *Hasher) finalise(n *Node, i int) (d int) { - isLeft := i%2 == 0 - for { - // when the final segment's path is going via left segments - // the incoming data is pushed to the parent upon pulling the left - // we do not need toggle the state since this condition is - // detectable - n.unbalanced = isLeft - n.right = nil - if n.initial { - n.root = true - return d - } - isLeft = n.isLeft - n = n.parent - d++ - } -} - -// EOC (end of chunk) implements the error interface -type EOC struct { - Hash []byte // read the hash of the chunk off the error -} - -// Error returns the error string -func (e *EOC) Error() string { - return fmt.Sprintf("hasher limit reached, chunk hash: %x", e.Hash) -} - -// NewEOC creates new end of chunk error with the hash -func NewEOC(hash []byte) *EOC { - return &EOC{hash} -} diff --git a/bmt/bmt_r.go b/bmt/bmt_r.go deleted file mode 100644 index 3cb337ab94bde..0000000000000 --- a/bmt/bmt_r.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package bmt is a simple nonconcurrent reference implementation for hashsize segment based -// Binary Merkle tree hash on arbitrary but fixed maximum chunksize -// -// This implementation does not take advantage of any paralellisms and uses -// far more memory than necessary, but it is easy to see that it is correct. -// It can be used for generating test cases for optimized implementations. -// see testBMTHasherCorrectness function in bmt_test.go -package bmt - -import ( - "hash" -) - -// RefHasher is the non-optimized easy to read reference implementation of BMT -type RefHasher struct { - span int - section int - cap int - h hash.Hash -} - -// NewRefHasher returns a new RefHasher -func NewRefHasher(hasher BaseHasher, count int) *RefHasher { - h := hasher() - hashsize := h.Size() - maxsize := hashsize * count - c := 2 - for ; c < count; c *= 2 { - } - if c > 2 { - c /= 2 - } - return &RefHasher{ - section: 2 * hashsize, - span: c * hashsize, - cap: maxsize, - h: h, - } -} - -// Hash returns the BMT hash of the byte slice -// implements the SwarmHash interface -func (rh *RefHasher) Hash(d []byte) []byte { - if len(d) > rh.cap { - d = d[:rh.cap] - } - - return rh.hash(d, rh.span) -} - -func (rh *RefHasher) hash(d []byte, s int) []byte { - l := len(d) - left := d - var right []byte - if l > rh.section { - for ; s >= l; s /= 2 { - } - left = rh.hash(d[:s], s) - right = d[s:] - if l-s > rh.section/2 { - right = rh.hash(right, s) - } - } - defer rh.h.Reset() - rh.h.Write(left) - rh.h.Write(right) - h := rh.h.Sum(nil) - return h -} diff --git a/bmt/bmt_test.go b/bmt/bmt_test.go deleted file mode 100644 index 57df83060ab01..0000000000000 --- a/bmt/bmt_test.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bmt - -import ( - "bytes" - crand "crypto/rand" - "fmt" - "hash" - "io" - "math/rand" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ethereum/go-ethereum/crypto/sha3" -) - -const ( - maxproccnt = 8 -) - -// TestRefHasher tests that the RefHasher computes the expected BMT hash for -// all data lengths between 0 and 256 bytes -func TestRefHasher(t *testing.T) { - hashFunc := sha3.NewKeccak256 - - sha3 := func(data ...[]byte) []byte { - h := hashFunc() - for _, v := range data { - h.Write(v) - } - return h.Sum(nil) - } - - // the test struct is used to specify the expected BMT hash for data - // lengths between "from" and "to" - type test struct { - from int64 - to int64 - expected func([]byte) []byte - } - - var tests []*test - - // all lengths in [0,64] should be: - // - // sha3(data) - // - tests = append(tests, &test{ - from: 0, - to: 64, - expected: func(data []byte) []byte { - return sha3(data) - }, - }) - - // all lengths in [65,96] should be: - // - // sha3( - // sha3(data[:64]) - // data[64:] - // ) - // - tests = append(tests, &test{ - from: 65, - to: 96, - expected: func(data []byte) []byte { - return sha3(sha3(data[:64]), data[64:]) - }, - }) - - // all lengths in [97,128] should be: - // - // sha3( - // sha3(data[:64]) - // sha3(data[64:]) - // ) - // - tests = append(tests, &test{ - from: 97, - to: 128, - expected: func(data []byte) []byte { - return sha3(sha3(data[:64]), sha3(data[64:])) - }, - }) - - // all lengths in [129,160] should be: - // - // sha3( - // sha3( - // sha3(data[:64]) - // sha3(data[64:128]) - // ) - // data[128:] - // ) - // - tests = append(tests, &test{ - from: 129, - to: 160, - expected: func(data []byte) []byte { - return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:]) - }, - }) - - // all lengths in [161,192] should be: - // - // sha3( - // sha3( - // sha3(data[:64]) - // sha3(data[64:128]) - // ) - // sha3(data[128:]) - // ) - // - tests = append(tests, &test{ - from: 161, - to: 192, - expected: func(data []byte) []byte { - return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:])) - }, - }) - - // all lengths in [193,224] should be: - // - // sha3( - // sha3( - // sha3(data[:64]) - // sha3(data[64:128]) - // ) - // sha3( - // sha3(data[128:192]) - // data[192:] - // ) - // ) - // - tests = append(tests, &test{ - from: 193, - to: 224, - expected: func(data []byte) []byte { - return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:])) - }, - }) - - // all lengths in [225,256] should be: - // - // sha3( - // sha3( - // sha3(data[:64]) - // sha3(data[64:128]) - // ) - // sha3( - // sha3(data[128:192]) - // sha3(data[192:]) - // ) - // ) - // - tests = append(tests, &test{ - from: 225, - to: 256, - expected: func(data []byte) []byte { - return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:]))) - }, - }) - - // run the tests - for _, x := range tests { - for length := x.from; length <= x.to; length++ { - t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) { - data := make([]byte, length) - if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF { - t.Fatal(err) - } - expected := x.expected(data) - actual := NewRefHasher(hashFunc, 128).Hash(data) - if !bytes.Equal(actual, expected) { - t.Fatalf("expected %x, got %x", expected, actual) - } - }) - } - } -} - -func testDataReader(l int) (r io.Reader) { - return io.LimitReader(crand.Reader, int64(l)) -} - -func TestHasherCorrectness(t *testing.T) { - err := testHasher(testBaseHasher) - if err != nil { - t.Fatal(err) - } -} - -func testHasher(f func(BaseHasher, []byte, int, int) error) error { - tdata := testDataReader(4128) - data := make([]byte, 4128) - tdata.Read(data) - hasher := sha3.NewKeccak256 - size := hasher().Size() - counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128} - - var err error - for _, count := range counts { - max := count * size - incr := 1 - for n := 0; n <= max+incr; n += incr { - err = f(hasher, data, n, count) - if err != nil { - return err - } - } - } - return nil -} - -func TestHasherReuseWithoutRelease(t *testing.T) { - testHasherReuse(1, t) -} - -func TestHasherReuseWithRelease(t *testing.T) { - testHasherReuse(maxproccnt, t) -} - -func testHasherReuse(i int, t *testing.T) { - hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, 128, i) - defer pool.Drain(0) - bmt := New(pool) - - for i := 0; i < 500; i++ { - n := rand.Intn(4096) - tdata := testDataReader(n) - data := make([]byte, n) - tdata.Read(data) - - err := testHasherCorrectness(bmt, hasher, data, n, 128) - if err != nil { - t.Fatal(err) - } - } -} - -func TestHasherConcurrency(t *testing.T) { - hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, 128, maxproccnt) - defer pool.Drain(0) - wg := sync.WaitGroup{} - cycles := 100 - wg.Add(maxproccnt * cycles) - errc := make(chan error) - - for p := 0; p < maxproccnt; p++ { - for i := 0; i < cycles; i++ { - go func() { - bmt := New(pool) - n := rand.Intn(4096) - tdata := testDataReader(n) - data := make([]byte, n) - tdata.Read(data) - err := testHasherCorrectness(bmt, hasher, data, n, 128) - wg.Done() - if err != nil { - errc <- err - } - }() - } - } - go func() { - wg.Wait() - close(errc) - }() - var err error - select { - case <-time.NewTimer(5 * time.Second).C: - err = fmt.Errorf("timed out") - case err = <-errc: - } - if err != nil { - t.Fatal(err) - } -} - -func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error { - pool := NewTreePool(hasher, count, 1) - defer pool.Drain(0) - bmt := New(pool) - return testHasherCorrectness(bmt, hasher, d, n, count) -} - -func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) { - data := d[:n] - rbmt := NewRefHasher(hasher, count) - exp := rbmt.Hash(data) - timeout := time.NewTimer(time.Second) - c := make(chan error) - - go func() { - bmt.Reset() - bmt.Write(data) - got := bmt.Sum(nil) - if !bytes.Equal(got, exp) { - c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got) - } - close(c) - }() - select { - case <-timeout.C: - err = fmt.Errorf("BMT hash calculation timed out") - case err = <-c: - } - return err -} - -func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) } -func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) } -func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) } -func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) } -func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) } -func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) } - -func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) } -func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) } -func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) } -func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) } -func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) } -func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) } - -func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) } -func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) } -func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) } -func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) } -func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) } -func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) } - -func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) } -func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) } -func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) } -func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) } -func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) } -func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) } - -func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) } -func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) } -func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) } -func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) } -func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) } -func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) } - -func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) } -func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) } -func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) } -func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) } -func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) } -func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) } - -// benchmarks the minimum hashing time for a balanced (for simplicity) BMT -// by doing count/segmentsize parallel hashings of 2*segmentsize bytes -// doing it on n maxproccnt each reusing the base hasher -// the premise is that this is the minimum computation needed for a BMT -// therefore this serves as a theoretical optimum for concurrent implementations -func benchmarkBMTBaseline(n int, t *testing.B) { - tdata := testDataReader(64) - data := make([]byte, 64) - tdata.Read(data) - hasher := sha3.NewKeccak256 - - t.ReportAllocs() - t.ResetTimer() - for i := 0; i < t.N; i++ { - count := int32((n-1)/hasher().Size() + 1) - wg := sync.WaitGroup{} - wg.Add(maxproccnt) - var i int32 - for j := 0; j < maxproccnt; j++ { - go func() { - defer wg.Done() - h := hasher() - for atomic.AddInt32(&i, 1) < count { - h.Reset() - h.Write(data) - h.Sum(nil) - } - }() - } - wg.Wait() - } -} - -func benchmarkHasher(n int, t *testing.B) { - tdata := testDataReader(n) - data := make([]byte, n) - tdata.Read(data) - - size := 1 - hasher := sha3.NewKeccak256 - segmentCount := 128 - pool := NewTreePool(hasher, segmentCount, size) - bmt := New(pool) - - t.ReportAllocs() - t.ResetTimer() - for i := 0; i < t.N; i++ { - bmt.Reset() - bmt.Write(data) - bmt.Sum(nil) - } -} - -func benchmarkHasherReuse(poolsize, n int, t *testing.B) { - tdata := testDataReader(n) - data := make([]byte, n) - tdata.Read(data) - - hasher := sha3.NewKeccak256 - segmentCount := 128 - pool := NewTreePool(hasher, segmentCount, poolsize) - cycles := 200 - - t.ReportAllocs() - t.ResetTimer() - for i := 0; i < t.N; i++ { - wg := sync.WaitGroup{} - wg.Add(cycles) - for j := 0; j < cycles; j++ { - bmt := New(pool) - go func() { - defer wg.Done() - bmt.Reset() - bmt.Write(data) - bmt.Sum(nil) - }() - } - wg.Wait() - } -} - -func benchmarkSHA3(n int, t *testing.B) { - data := make([]byte, n) - tdata := testDataReader(n) - tdata.Read(data) - hasher := sha3.NewKeccak256 - h := hasher() - - t.ReportAllocs() - t.ResetTimer() - for i := 0; i < t.N; i++ { - h.Reset() - h.Write(data) - h.Sum(nil) - } -} - -func benchmarkRefHasher(n int, t *testing.B) { - data := make([]byte, n) - tdata := testDataReader(n) - tdata.Read(data) - hasher := sha3.NewKeccak256 - rbmt := NewRefHasher(hasher, 128) - - t.ReportAllocs() - t.ResetTimer() - for i := 0; i < t.N; i++ { - rbmt.Hash(data) - } -} diff --git a/cmd/p2psim/main.go b/cmd/p2psim/main.go index 0c8ed038d5b6b..d32c298631490 100644 --- a/cmd/p2psim/main.go +++ b/cmd/p2psim/main.go @@ -275,9 +275,8 @@ func createNode(ctx *cli.Context) error { if len(ctx.Args()) != 0 { return cli.ShowCommandHelp(ctx, ctx.Command.Name) } - config := &adapters.NodeConfig{ - Name: ctx.String("name"), - } + config := adapters.RandomNodeConfig() + config.Name = ctx.String("name") if key := ctx.String("key"); key != "" { privKey, err := crypto.HexToECDSA(key) if err != nil { diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go index adac772babce8..64c37a0b5ef9b 100644 --- a/cmd/swarm/config.go +++ b/cmd/swarm/config.go @@ -24,6 +24,7 @@ import ( "reflect" "strconv" "strings" + "time" "unicode" cli "gopkg.in/urfave/cli.v1" @@ -37,6 +38,8 @@ import ( bzzapi "github.com/ethereum/go-ethereum/swarm/api" ) +const SWARM_VERSION = "0.3" + var ( //flag definition for the dumpconfig command DumpConfigCommand = cli.Command{ @@ -58,19 +61,25 @@ var ( //constants for environment variables const ( - SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR" - SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT" - SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR" - SWARM_ENV_PORT = "SWARM_PORT" - SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID" - SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE" - SWARM_ENV_SWAP_API = "SWARM_SWAP_API" - SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE" - SWARM_ENV_ENS_API = "SWARM_ENS_API" - SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR" - SWARM_ENV_CORS = "SWARM_CORS" - SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES" - GETH_ENV_DATADIR = "GETH_DATADIR" + SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR" + SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT" + SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR" + SWARM_ENV_PORT = "SWARM_PORT" + SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID" + SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE" + SWARM_ENV_SWAP_API = "SWARM_SWAP_API" + SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE" + SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY" + SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK" + SWARM_ENV_ENS_API = "SWARM_ENS_API" + SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR" + SWARM_ENV_CORS = "SWARM_CORS" + SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES" + SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE" + SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH" + SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY" + SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY" + GETH_ENV_DATADIR = "GETH_DATADIR" ) // These settings ensure that TOML keys use the same names as Go struct fields. @@ -92,10 +101,8 @@ var tomlSettings = toml.Config{ //before booting the swarm node, build the configuration func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) { - //check for deprecated flags - checkDeprecated(ctx) //start by creating a default config - config = bzzapi.NewDefaultConfig() + config = bzzapi.NewConfig() //first load settings from config file (if provided) config, err = configFileOverride(config, ctx) if err != nil { @@ -168,7 +175,7 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" { if id, _ := strconv.Atoi(networkid); id != 0 { - currentConfig.NetworkId = uint64(id) + currentConfig.NetworkID = uint64(id) } } @@ -191,12 +198,20 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con currentConfig.SwapEnabled = true } - if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) { - currentConfig.SyncEnabled = true + if ctx.GlobalIsSet(SwarmSyncDisabledFlag.Name) { + currentConfig.SyncEnabled = false + } + + if d := ctx.GlobalDuration(SwarmSyncUpdateDelay.Name); d > 0 { + currentConfig.SyncUpdateDelay = d } - currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name) - if currentConfig.SwapEnabled && currentConfig.SwapApi == "" { + if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) { + currentConfig.DeliverySkipCheck = true + } + + currentConfig.SwapAPI = ctx.GlobalString(SwarmSwapAPIFlag.Name) + if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" { utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API) } @@ -209,10 +224,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con currentConfig.EnsAPIs = ensAPIs } - if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" { - currentConfig.EnsRoot = common.HexToAddress(ensaddr) - } - if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" { currentConfig.Cors = cors } @@ -221,6 +232,18 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name) } + if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" { + currentConfig.LocalStoreParams.ChunkDbPath = storePath + } + + if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 { + currentConfig.LocalStoreParams.DbCapacity = storeCapacity + } + + if storeCacheCapacity := ctx.GlobalUint(SwarmStoreCacheCapacity.Name); storeCacheCapacity != 0 { + currentConfig.LocalStoreParams.CacheCapacity = storeCacheCapacity + } + return currentConfig } @@ -239,7 +262,7 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) { if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" { if id, _ := strconv.Atoi(networkid); id != 0 { - currentConfig.NetworkId = uint64(id) + currentConfig.NetworkID = uint64(id) } } @@ -262,17 +285,29 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) { } } - if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" { - if sync, err := strconv.ParseBool(syncenable); err != nil { - currentConfig.SyncEnabled = sync + if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" { + if sync, err := strconv.ParseBool(syncdisable); err != nil { + currentConfig.SyncEnabled = !sync + } + } + + if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" { + if skipCheck, err := strconv.ParseBool(v); err != nil { + currentConfig.DeliverySkipCheck = skipCheck + } + } + + if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" { + if d, err := time.ParseDuration(v); err != nil { + currentConfig.SyncUpdateDelay = d } } if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" { - currentConfig.SwapApi = swapapi + currentConfig.SwapAPI = swapapi } - if currentConfig.SwapEnabled && currentConfig.SwapApi == "" { + if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" { utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API) } @@ -312,18 +347,6 @@ func dumpConfig(ctx *cli.Context) error { return nil } -//deprecated flags checked here -func checkDeprecated(ctx *cli.Context) { - // exit if the deprecated --ethapi flag is set - if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" { - utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.") - } - // warn if --ens-api flag is set - if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" { - log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.") - } -} - //validate configuration parameters func validateConfig(cfg *bzzapi.Config) (err error) { for _, ensAPI := range cfg.EnsAPIs { diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go index 9bf584f50c8b9..d5011e3a70693 100644 --- a/cmd/swarm/config_test.go +++ b/cmd/swarm/config_test.go @@ -34,7 +34,7 @@ import ( func TestDumpConfig(t *testing.T) { swarm := runSwarm(t, "dumpconfig") - defaultConf := api.NewDefaultConfig() + defaultConf := api.NewConfig() out, err := tomlSettings.Marshal(&defaultConf) if err != nil { t.Fatal(err) @@ -43,7 +43,7 @@ func TestDumpConfig(t *testing.T) { swarm.ExpectExit() } -func TestFailsSwapEnabledNoSwapApi(t *testing.T) { +func TestConfigFailsSwapEnabledNoSwapApi(t *testing.T) { flags := []string{ fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545", @@ -55,7 +55,7 @@ func TestFailsSwapEnabledNoSwapApi(t *testing.T) { swarm.ExpectExit() } -func TestFailsNoBzzAccount(t *testing.T) { +func TestConfigFailsNoBzzAccount(t *testing.T) { flags := []string{ fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545", @@ -66,7 +66,7 @@ func TestFailsNoBzzAccount(t *testing.T) { swarm.ExpectExit() } -func TestCmdLineOverrides(t *testing.T) { +func TestConfigCmdLineOverrides(t *testing.T) { dir, err := ioutil.TempDir("", "bzztest") if err != nil { t.Fatal(err) @@ -85,9 +85,10 @@ func TestCmdLineOverrides(t *testing.T) { flags := []string{ fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort, - fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name), + fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name), fmt.Sprintf("--%s", CorsStringFlag.Name), "*", fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), + fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name), fmt.Sprintf("--%s", EnsAPIFlag.Name), "", "--datadir", dir, "--ipcpath", conf.IPCPath, @@ -120,12 +121,16 @@ func TestCmdLineOverrides(t *testing.T) { t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) } - if info.NetworkId != 42 { - t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId) + if info.NetworkID != 42 { + t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkID) } - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") + if info.SyncEnabled { + t.Fatal("Expected Sync to be disabled, but is true") + } + + if !info.DeliverySkipCheck { + t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not") } if info.Cors != "*" { @@ -135,7 +140,7 @@ func TestCmdLineOverrides(t *testing.T) { node.Shutdown() } -func TestFileOverrides(t *testing.T) { +func TestConfigFileOverrides(t *testing.T) { // assign ports httpPort, err := assignTCPPort() @@ -145,16 +150,16 @@ func TestFileOverrides(t *testing.T) { //create a config file //first, create a default conf - defaultConf := api.NewDefaultConfig() + defaultConf := api.NewConfig() //change some values in order to test if they have been loaded - defaultConf.SyncEnabled = true - defaultConf.NetworkId = 54 + defaultConf.SyncEnabled = false + defaultConf.DeliverySkipCheck = true + defaultConf.NetworkID = 54 defaultConf.Port = httpPort - defaultConf.StoreParams.DbCapacity = 9000000 - defaultConf.ChunkerParams.Branches = 64 - defaultConf.HiveParams.CallInterval = 6000000000 + defaultConf.DbCapacity = 9000000 + defaultConf.HiveParams.KeepAliveInterval = 6000000000 defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second - defaultConf.SyncParams.KeyBufferSize = 512 + //defaultConf.SyncParams.KeyBufferSize = 512 //create a TOML string out, err := tomlSettings.Marshal(&defaultConf) if err != nil { @@ -215,38 +220,38 @@ func TestFileOverrides(t *testing.T) { t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) } - if info.NetworkId != 54 { - t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) + if info.NetworkID != 54 { + t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID) } - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") + if info.SyncEnabled { + t.Fatal("Expected Sync to be disabled, but is true") } - if info.StoreParams.DbCapacity != 9000000 { - t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) + if !info.DeliverySkipCheck { + t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not") } - if info.ChunkerParams.Branches != 64 { - t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches) + if info.DbCapacity != 9000000 { + t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID) } - if info.HiveParams.CallInterval != 6000000000 { - t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval)) + if info.HiveParams.KeepAliveInterval != 6000000000 { + t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval)) } if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second { t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval) } - if info.SyncParams.KeyBufferSize != 512 { - t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) - } + // if info.SyncParams.KeyBufferSize != 512 { + // t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) + // } node.Shutdown() } -func TestEnvVars(t *testing.T) { +func TestConfigEnvVars(t *testing.T) { // assign ports httpPort, err := assignTCPPort() if err != nil { @@ -257,7 +262,8 @@ func TestEnvVars(t *testing.T) { envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort)) envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999")) envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*")) - envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true")) + envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncDisabledFlag.EnvVar, "true")) + envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmDeliverySkipCheckFlag.EnvVar, "true")) dir, err := ioutil.TempDir("", "bzztest") if err != nil { @@ -326,23 +332,27 @@ func TestEnvVars(t *testing.T) { t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) } - if info.NetworkId != 999 { - t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId) + if info.NetworkID != 999 { + t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkID) } if info.Cors != "*" { t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors) } - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") + if info.SyncEnabled { + t.Fatal("Expected Sync to be disabled, but is true") + } + + if !info.DeliverySkipCheck { + t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not") } node.Shutdown() cmd.Process.Kill() } -func TestCmdLineOverridesFile(t *testing.T) { +func TestConfigCmdLineOverridesFile(t *testing.T) { // assign ports httpPort, err := assignTCPPort() @@ -352,26 +362,27 @@ func TestCmdLineOverridesFile(t *testing.T) { //create a config file //first, create a default conf - defaultConf := api.NewDefaultConfig() + defaultConf := api.NewConfig() //change some values in order to test if they have been loaded - defaultConf.SyncEnabled = false - defaultConf.NetworkId = 54 + defaultConf.SyncEnabled = true + defaultConf.NetworkID = 54 defaultConf.Port = "8588" - defaultConf.StoreParams.DbCapacity = 9000000 - defaultConf.ChunkerParams.Branches = 64 - defaultConf.HiveParams.CallInterval = 6000000000 + defaultConf.DbCapacity = 9000000 + defaultConf.HiveParams.KeepAliveInterval = 6000000000 defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second - defaultConf.SyncParams.KeyBufferSize = 512 + //defaultConf.SyncParams.KeyBufferSize = 512 //create a TOML file out, err := tomlSettings.Marshal(&defaultConf) if err != nil { t.Fatalf("Error creating TOML file in TestFileOverride: %v", err) } //write file - f, err := ioutil.TempFile("", "testconfig.toml") + fname := "testconfig.toml" + f, err := ioutil.TempFile("", fname) if err != nil { t.Fatalf("Error writing TOML file in TestFileOverride: %v", err) } + defer os.Remove(fname) //write file _, err = f.WriteString(string(out)) if err != nil { @@ -392,7 +403,7 @@ func TestCmdLineOverridesFile(t *testing.T) { flags := []string{ fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77", fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort, - fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name), + fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name), fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(), fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), "--ens-api", "", @@ -427,33 +438,29 @@ func TestCmdLineOverridesFile(t *testing.T) { t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) } - if info.NetworkId != expectNetworkId { - t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId) + if info.NetworkID != expectNetworkId { + t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkID) } - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") + if info.SyncEnabled { + t.Fatal("Expected Sync to be disabled, but is true") } - if info.StoreParams.DbCapacity != 9000000 { - t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) + if info.LocalStoreParams.DbCapacity != 9000000 { + t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity) } - if info.ChunkerParams.Branches != 64 { - t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches) - } - - if info.HiveParams.CallInterval != 6000000000 { - t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval)) + if info.HiveParams.KeepAliveInterval != 6000000000 { + t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval)) } if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second { t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval) } - if info.SyncParams.KeyBufferSize != 512 { - t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) - } + // if info.SyncParams.KeyBufferSize != 512 { + // t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) + // } node.Shutdown() } diff --git a/cmd/swarm/db.go b/cmd/swarm/db.go index dfd2d069b9754..fe03f2d160a68 100644 --- a/cmd/swarm/db.go +++ b/cmd/swarm/db.go @@ -23,6 +23,7 @@ import ( "path/filepath" "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/swarm/storage" "gopkg.in/urfave/cli.v1" @@ -30,11 +31,11 @@ import ( func dbExport(ctx *cli.Context) { args := ctx.Args() - if len(args) != 2 { - utils.Fatalf("invalid arguments, please specify both (path to a local chunk database) and (path to write the tar archive to, - for stdout)") + if len(args) != 3 { + utils.Fatalf("invalid arguments, please specify both (path to a local chunk database), (path to write the tar archive to, - for stdout) and the base key") } - store, err := openDbStore(args[0]) + store, err := openLDBStore(args[0], common.Hex2Bytes(args[2])) if err != nil { utils.Fatalf("error opening local chunk database: %s", err) } @@ -62,11 +63,11 @@ func dbExport(ctx *cli.Context) { func dbImport(ctx *cli.Context) { args := ctx.Args() - if len(args) != 2 { - utils.Fatalf("invalid arguments, please specify both (path to a local chunk database) and (path to read the tar archive from, - for stdin)") + if len(args) != 3 { + utils.Fatalf("invalid arguments, please specify both (path to a local chunk database), (path to read the tar archive from, - for stdin) and the base key") } - store, err := openDbStore(args[0]) + store, err := openLDBStore(args[0], common.Hex2Bytes(args[2])) if err != nil { utils.Fatalf("error opening local chunk database: %s", err) } @@ -94,11 +95,11 @@ func dbImport(ctx *cli.Context) { func dbClean(ctx *cli.Context) { args := ctx.Args() - if len(args) != 1 { - utils.Fatalf("invalid arguments, please specify (path to a local chunk database)") + if len(args) != 2 { + utils.Fatalf("invalid arguments, please specify (path to a local chunk database) and the base key") } - store, err := openDbStore(args[0]) + store, err := openLDBStore(args[0], common.Hex2Bytes(args[1])) if err != nil { utils.Fatalf("error opening local chunk database: %s", err) } @@ -107,10 +108,13 @@ func dbClean(ctx *cli.Context) { store.Cleanup() } -func openDbStore(path string) (*storage.DbStore, error) { +func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) { if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { return nil, fmt.Errorf("invalid chunkdb path: %s", err) } - hash := storage.MakeHashFunc("SHA3") - return storage.NewDbStore(path, hash, 10000000, 0) + + storeparams := storage.NewDefaultStoreParams() + ldbparams := storage.NewLDBStoreParams(storeparams, path) + ldbparams.BaseKey = basekey + return storage.NewLDBStore(ldbparams) } diff --git a/cmd/swarm/download.go b/cmd/swarm/download.go new file mode 100644 index 0000000000000..c2418f744c848 --- /dev/null +++ b/cmd/swarm/download.go @@ -0,0 +1,85 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/api" + swarm "github.com/ethereum/go-ethereum/swarm/api/client" + "gopkg.in/urfave/cli.v1" +) + +func download(ctx *cli.Context) { + log.Debug("downloading content using swarm down") + args := ctx.Args() + dest := "." + + switch len(args) { + case 0: + utils.Fatalf("Usage: swarm down [options] []") + case 1: + log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir")) + default: + log.Trace(fmt.Sprintf("destination path arg: %s", args[1])) + if absDest, err := filepath.Abs(args[1]); err == nil { + dest = absDest + } else { + utils.Fatalf("could not get download path: %v", err) + } + } + + var ( + bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + isRecursive = ctx.Bool(SwarmRecursiveFlag.Name) + client = swarm.NewClient(bzzapi) + ) + + if fi, err := os.Stat(dest); err == nil { + if isRecursive && !fi.Mode().IsDir() { + utils.Fatalf("destination path is not a directory!") + } + } else { + if !os.IsNotExist(err) { + utils.Fatalf("could not stat path: %v", err) + } + } + + uri, err := api.Parse(args[0]) + if err != nil { + utils.Fatalf("could not parse uri argument: %v", err) + } + + // assume behaviour according to --recursive switch + if isRecursive { + if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil { + utils.Fatalf("encoutered an error while downloading directory: %v", err) + } + } else { + // we are downloading a file + log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path)) + + err := client.DownloadFile(uri.Addr, uri.Path, dest) + if err != nil { + utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err) + } + } +} diff --git a/cmd/swarm/export_test.go b/cmd/swarm/export_test.go new file mode 100644 index 0000000000000..525538ad758f2 --- /dev/null +++ b/cmd/swarm/export_test.go @@ -0,0 +1,139 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "io" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/swarm" +) + +// TestCLISwarmExportImport perform the following test: +// 1. runs swarm node +// 2. uploads a random file +// 3. runs an export of the local datastore +// 4. runs a second swarm node +// 5. imports the exported datastore +// 6. fetches the uploaded random file from the second node +func TestCLISwarmExportImport(t *testing.T) { + cluster := newTestCluster(t, 1) + + // generate random 10mb file + f, cleanup := generateRandomFile(t, 10000000) + defer cleanup() + + // upload the file with 'swarm up' and expect a hash + up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name()) + _, matches := up.ExpectRegexp(`[a-f\d]{64}`) + up.ExpectExit() + hash := matches[0] + + var info swarm.Info + if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil { + t.Fatal(err) + } + + cluster.Stop() + defer cluster.Cleanup() + + // generate an export.tar + exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x")) + exportCmd.ExpectExit() + + // start second cluster + cluster2 := newTestCluster(t, 1) + + var info2 swarm.Info + if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil { + t.Fatal(err) + } + + // stop second cluster, so that we close LevelDB + cluster2.Stop() + defer cluster2.Cleanup() + + // import the export.tar + importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x")) + importCmd.ExpectExit() + + // spin second cluster back up + cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x")) + + // try to fetch imported file + res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash) + if err != nil { + t.Fatal(err) + } + + if res.StatusCode != 200 { + t.Fatalf("expected HTTP status %d, got %s", 200, res.Status) + } + + // compare downloaded file with the generated random file + mustEqualFiles(t, f, res.Body) +} + +func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) { + h := md5.New() + upLen, err := io.Copy(h, up) + if err != nil { + t.Fatal(err) + } + upHash := h.Sum(nil) + h.Reset() + downLen, err := io.Copy(h, down) + if err != nil { + t.Fatal(err) + } + downHash := h.Sum(nil) + + if !bytes.Equal(upHash, downHash) || upLen != downLen { + t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen) + } +} + +func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) { + // create a tmp file + tmp, err := ioutil.TempFile("", "swarm-test") + if err != nil { + t.Fatal(err) + } + + // callback for tmp file cleanup + teardown = func() { + tmp.Close() + os.Remove(tmp.Name()) + } + + // write 10mb random data to file + buf := make([]byte, 10000000) + _, err = rand.Read(buf) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmp.Name(), buf, 0755) + + return tmp, teardown +} diff --git a/cmd/swarm/fs.go b/cmd/swarm/fs.go new file mode 100644 index 0000000000000..0124586cfe19b --- /dev/null +++ b/cmd/swarm/fs.go @@ -0,0 +1,127 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/swarm/fuse" + "gopkg.in/urfave/cli.v1" +) + +func mount(cliContext *cli.Context) { + args := cliContext.Args() + if len(args) < 2 { + utils.Fatalf("Usage: swarm fs mount --ipcpath ") + } + + client, err := dialRPC(cliContext) + if err != nil { + utils.Fatalf("had an error dailing to RPC endpoint: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + mf := &fuse.MountInfo{} + mountPoint, err := filepath.Abs(filepath.Clean(args[1])) + if err != nil { + utils.Fatalf("error expanding path for mount point: %v", err) + } + err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint) + if err != nil { + utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err) + } +} + +func unmount(cliContext *cli.Context) { + args := cliContext.Args() + + if len(args) < 1 { + utils.Fatalf("Usage: swarm fs unmount --ipcpath ") + } + client, err := dialRPC(cliContext) + if err != nil { + utils.Fatalf("had an error dailing to RPC endpoint: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + mf := fuse.MountInfo{} + err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0]) + if err != nil { + utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err) + } + fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference +} + +func listMounts(cliContext *cli.Context) { + client, err := dialRPC(cliContext) + if err != nil { + utils.Fatalf("had an error dailing to RPC endpoint: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + mf := []fuse.MountInfo{} + err = client.CallContext(ctx, &mf, "swarmfs_listmounts") + if err != nil { + utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err) + } + if len(mf) == 0 { + fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n") + } else { + fmt.Printf("Found %d swarmfs mount(s):\n", len(mf)) + for i, mountInfo := range mf { + fmt.Printf("%d:\n", i) + fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint) + fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest) + fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest) + } + } +} + +func dialRPC(ctx *cli.Context) (*rpc.Client, error) { + var endpoint string + + if ctx.IsSet(utils.IPCPathFlag.Name) { + endpoint = ctx.String(utils.IPCPathFlag.Name) + } else { + utils.Fatalf("swarm ipc endpoint not specified") + } + + if endpoint == "" { + endpoint = node.DefaultIPCEndpoint(clientIdentifier) + } else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") { + // Backwards compatibility with geth < 1.5 which required + // these prefixes. + endpoint = endpoint[4:] + } + return rpc.Dial(endpoint) +} diff --git a/cmd/swarm/fs_test.go b/cmd/swarm/fs_test.go new file mode 100644 index 0000000000000..25705c0a49f96 --- /dev/null +++ b/cmd/swarm/fs_test.go @@ -0,0 +1,234 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + colorable "github.com/mattn/go-colorable" +) + +func init() { + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) +} + +type testFile struct { + filePath string + content string +} + +// TestCLISwarmFs is a high-level test of swarmfs +func TestCLISwarmFs(t *testing.T) { + cluster := newTestCluster(t, 3) + defer cluster.Shutdown() + + // create a tmp dir + mountPoint, err := ioutil.TempDir("", "swarm-test") + log.Debug("swarmfs cli test", "1st mount", mountPoint) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(mountPoint) + + handlingNode := cluster.Nodes[0] + mhash := doUploadEmptyDir(t, handlingNode) + log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) + + mount := runSwarm(t, []string{ + "fs", + "mount", + "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), + mhash, + mountPoint, + }...) + mount.ExpectExit() + + filesToAssert := []*testFile{} + + dirPath, err := createDirInDir(mountPoint, "testSubDir") + if err != nil { + t.Fatal(err) + } + dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir") + + dummyContent := "somerandomtestcontentthatshouldbeasserted" + dirs := []string{ + mountPoint, + dirPath, + dirPath2, + } + files := []string{"f1.tmp", "f2.tmp"} + for _, d := range dirs { + for _, entry := range files { + tFile, err := createTestFileInPath(d, entry, dummyContent) + if err != nil { + t.Fatal(err) + } + filesToAssert = append(filesToAssert, tFile) + } + } + if len(filesToAssert) != len(dirs)*len(files) { + t.Fatalf("should have %d files to assert now, got %d", len(dirs)*len(files), len(filesToAssert)) + } + hashRegexp := `[a-f\d]{64}` + log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) + + unmount := runSwarm(t, []string{ + "fs", + "unmount", + "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), + mountPoint, + }...) + _, matches := unmount.ExpectRegexp(hashRegexp) + unmount.ExpectExit() + + hash := matches[0] + if hash == mhash { + t.Fatal("this should not be equal") + } + log.Debug("swarmfs cli test: asserting no files in mount point") + + //check that there's nothing in the mount folder + filesInDir, err := ioutil.ReadDir(mountPoint) + if err != nil { + t.Fatalf("had an error reading the directory: %v", err) + } + + if len(filesInDir) != 0 { + t.Fatal("there shouldn't be anything here") + } + + secondMountPoint, err := ioutil.TempDir("", "swarm-test") + log.Debug("swarmfs cli test", "2nd mount point at", secondMountPoint) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(secondMountPoint) + + log.Debug("swarmfs cli test: remounting at second mount point", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) + + //remount, check files + newMount := runSwarm(t, []string{ + "fs", + "mount", + "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), + hash, // the latest hash + secondMountPoint, + }...) + + newMount.ExpectExit() + time.Sleep(1 * time.Second) + + filesInDir, err = ioutil.ReadDir(secondMountPoint) + if err != nil { + t.Fatal(err) + } + + if len(filesInDir) == 0 { + t.Fatal("there should be something here") + } + + log.Debug("swarmfs cli test: traversing file tree to see it matches previous mount") + + for _, file := range filesToAssert { + file.filePath = strings.Replace(file.filePath, mountPoint, secondMountPoint, -1) + fileBytes, err := ioutil.ReadFile(file.filePath) + + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(fileBytes, bytes.NewBufferString(file.content).Bytes()) { + t.Fatal("this should be equal") + } + } + + log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) + + unmountSec := runSwarm(t, []string{ + "fs", + "unmount", + "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), + secondMountPoint, + }...) + + _, matches = unmountSec.ExpectRegexp(hashRegexp) + unmountSec.ExpectExit() + + if matches[0] != hash { + t.Fatal("these should be equal - no changes made") + } +} + +func doUploadEmptyDir(t *testing.T, node *testNode) string { + // create a tmp dir + tmpDir, err := ioutil.TempDir("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + hashRegexp := `[a-f\d]{64}` + + flags := []string{ + "--bzzapi", node.URL, + "--recursive", + "up", + tmpDir} + + log.Info("swarmfs cli test: uploading dir with 'swarm up'") + up := runSwarm(t, flags...) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + hash := matches[0] + log.Info("swarmfs cli test: dir uploaded", "hash", hash) + return hash +} + +func createDirInDir(createInDir string, dirToCreate string) (string, error) { + fullpath := filepath.Join(createInDir, dirToCreate) + err := os.MkdirAll(fullpath, 0777) + if err != nil { + return "", err + } + return fullpath, nil +} + +func createTestFileInPath(dir, filename, content string) (*testFile, error) { + tFile := &testFile{} + filePath := filepath.Join(dir, filename) + if file, err := os.Create(filePath); err == nil { + tFile.content = content + tFile.filePath = filePath + + _, err = io.WriteString(file, content) + if err != nil { + return nil, err + } + file.Close() + } + + return tFile, nil +} diff --git a/cmd/swarm/hash.go b/cmd/swarm/hash.go index 792e8d0d7afb4..c82456b3cddae 100644 --- a/cmd/swarm/hash.go +++ b/cmd/swarm/hash.go @@ -38,11 +38,11 @@ func hash(ctx *cli.Context) { defer f.Close() stat, _ := f.Stat() - chunker := storage.NewTreeChunker(storage.NewChunkerParams()) - key, err := chunker.Split(f, stat.Size(), nil, nil, nil) + fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams()) + addr, _, err := fileStore.Store(f, stat.Size(), false) if err != nil { utils.Fatalf("%v\n", err) } else { - fmt.Printf("%v\n", key) + fmt.Printf("%v\n", addr) } } diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index 360020b77b8c9..9877e9150d470 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -34,7 +34,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -49,6 +48,22 @@ import ( ) const clientIdentifier = "swarm" +const helpTemplate = `NAME: +{{.HelpName}} - {{.Usage}} + +USAGE: +{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} + +CATEGORY: +{{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: +{{.Description}}{{end}}{{if .VisibleFlags}} + +OPTIONS: +{{range .VisibleFlags}}{{.}} +{{end}}{{end}} +` var ( gitCommit string // Git SHA1 commit hash of the release (set via linker flags) @@ -87,10 +102,6 @@ var ( Usage: "Network identifier (integer, default 3=swarm testnet)", EnvVar: SWARM_ENV_NETWORK_ID, } - SwarmConfigPathFlag = cli.StringFlag{ - Name: "bzzconfig", - Usage: "DEPRECATED: please use --config path/to/TOML-file", - } SwarmSwapEnabledFlag = cli.BoolFlag{ Name: "swap", Usage: "Swarm SWAP enabled (default false)", @@ -101,10 +112,20 @@ var ( Usage: "URL of the Ethereum API provider to use to settle SWAP payments", EnvVar: SWARM_ENV_SWAP_API, } - SwarmSyncEnabledFlag = cli.BoolTFlag{ - Name: "sync", - Usage: "Swarm Syncing enabled (default true)", - EnvVar: SWARM_ENV_SYNC_ENABLE, + SwarmSyncDisabledFlag = cli.BoolTFlag{ + Name: "nosync", + Usage: "Disable swarm syncing", + EnvVar: SWARM_ENV_SYNC_DISABLE, + } + SwarmSyncUpdateDelay = cli.DurationFlag{ + Name: "sync-update-delay", + Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)", + EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY, + } + SwarmDeliverySkipCheckFlag = cli.BoolFlag{ + Name: "delivery-skip-check", + Usage: "Skip chunk delivery check (default false)", + EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK, } EnsAPIFlag = cli.StringSliceFlag{ Name: "ens-api", @@ -116,7 +137,7 @@ var ( Usage: "Swarm HTTP endpoint", Value: "http://127.0.0.1:8500", } - SwarmRecursiveUploadFlag = cli.BoolFlag{ + SwarmRecursiveFlag = cli.BoolFlag{ Name: "recursive", Usage: "Upload directories recursively", } @@ -136,20 +157,29 @@ var ( Name: "mime", Usage: "force mime type", } + SwarmEncryptedFlag = cli.BoolFlag{ + Name: "encrypt", + Usage: "use encrypted upload", + } CorsStringFlag = cli.StringFlag{ Name: "corsdomain", Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')", EnvVar: SWARM_ENV_CORS, } - - // the following flags are deprecated and should be removed in the future - DeprecatedEthAPIFlag = cli.StringFlag{ - Name: "ethapi", - Usage: "DEPRECATED: please use --ens-api and --swap-api", + SwarmStorePath = cli.StringFlag{ + Name: "store.path", + Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)", + EnvVar: SWARM_ENV_STORE_PATH, } - DeprecatedEnsAddrFlag = cli.StringFlag{ - Name: "ens-addr", - Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format", + SwarmStoreCapacity = cli.Uint64Flag{ + Name: "store.size", + Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)", + EnvVar: SWARM_ENV_STORE_CAPACITY, + } + SwarmStoreCacheCapacity = cli.UintFlag{ + Name: "store.cache.size", + Usage: "Number of recent chunks cached in memory (default 5000)", + EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY, } ) @@ -180,91 +210,130 @@ func init() { app.Copyright = "Copyright 2013-2016 The go-ethereum Authors" app.Commands = []cli.Command{ { - Action: version, - Name: "version", - Usage: "Print version numbers", - ArgsUsage: " ", - Description: ` -The output of this command is supposed to be machine-readable. -`, + Action: version, + CustomHelpTemplate: helpTemplate, + Name: "version", + Usage: "Print version numbers", + Description: "The output of this command is supposed to be machine-readable", }, { - Action: upload, - Name: "up", - Usage: "upload a file or directory to swarm using the HTTP API", - ArgsUsage: " ", - Description: ` -"upload a file or directory to swarm using the HTTP API and prints the root hash", -`, + Action: upload, + CustomHelpTemplate: helpTemplate, + Name: "up", + Usage: "uploads a file or directory to swarm using the HTTP API", + ArgsUsage: "", + Flags: []cli.Flag{SwarmEncryptedFlag}, + Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash", }, { - Action: list, - Name: "ls", - Usage: "list files and directories contained in a manifest", - ArgsUsage: " []", - Description: ` -Lists files and directories contained in a manifest. -`, + Action: list, + CustomHelpTemplate: helpTemplate, + Name: "ls", + Usage: "list files and directories contained in a manifest", + ArgsUsage: " []", + Description: "Lists files and directories contained in a manifest", }, { - Action: hash, - Name: "hash", - Usage: "print the swarm hash of a file or directory", - ArgsUsage: " ", - Description: ` -Prints the swarm hash of file or directory. -`, + Action: hash, + CustomHelpTemplate: helpTemplate, + Name: "hash", + Usage: "print the swarm hash of a file or directory", + ArgsUsage: "", + Description: "Prints the swarm hash of file or directory", }, { - Name: "manifest", - Usage: "update a MANIFEST", - ArgsUsage: "manifest COMMAND", + Action: download, + Name: "down", + Flags: []cli.Flag{SwarmRecursiveFlag}, + Usage: "downloads a swarm manifest or a file inside a manifest", + ArgsUsage: " []", Description: ` -Updates a MANIFEST by adding/removing/updating the hash of a path. +Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries. `, + }, + + { + Name: "manifest", + CustomHelpTemplate: helpTemplate, + Usage: "perform operations on swarm manifests", + ArgsUsage: "COMMAND", + Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove", Subcommands: []cli.Command{ { - Action: add, - Name: "add", - Usage: "add a new path to the manifest", - ArgsUsage: " []", - Description: ` -Adds a new path to the manifest -`, + Action: add, + CustomHelpTemplate: helpTemplate, + Name: "add", + Usage: "add a new path to the manifest", + ArgsUsage: " []", + Description: "Adds a new path to the manifest", }, { - Action: update, - Name: "update", - Usage: "update the hash for an already existing path in the manifest", - ArgsUsage: " []", - Description: ` -Update the hash for an already existing path in the manifest -`, + Action: update, + CustomHelpTemplate: helpTemplate, + Name: "update", + Usage: "update the hash for an already existing path in the manifest", + ArgsUsage: " []", + Description: "Update the hash for an already existing path in the manifest", }, { - Action: remove, - Name: "remove", - Usage: "removes a path from the manifest", - ArgsUsage: " ", - Description: ` -Removes a path from the manifest -`, + Action: remove, + CustomHelpTemplate: helpTemplate, + Name: "remove", + Usage: "removes a path from the manifest", + ArgsUsage: " ", + Description: "Removes a path from the manifest", }, }, }, { - Name: "db", - Usage: "manage the local chunk database", - ArgsUsage: "db COMMAND", - Description: ` -Manage the local chunk database. -`, + Name: "fs", + CustomHelpTemplate: helpTemplate, + Usage: "perform FUSE operations", + ArgsUsage: "fs COMMAND", + Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node", Subcommands: []cli.Command{ { - Action: dbExport, - Name: "export", - Usage: "export a local chunk database as a tar archive (use - to send to stdout)", - ArgsUsage: " ", + Action: mount, + CustomHelpTemplate: helpTemplate, + Name: "mount", + Flags: []cli.Flag{utils.IPCPathFlag}, + Usage: "mount a swarm hash to a mount point", + ArgsUsage: "swarm fs mount --ipcpath ", + Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", + }, + { + Action: unmount, + CustomHelpTemplate: helpTemplate, + Name: "unmount", + Flags: []cli.Flag{utils.IPCPathFlag}, + Usage: "unmount a swarmfs mount", + ArgsUsage: "swarm fs unmount --ipcpath ", + Description: "Unmounts a swarmfs mount residing at . This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", + }, + { + Action: listMounts, + CustomHelpTemplate: helpTemplate, + Name: "list", + Flags: []cli.Flag{utils.IPCPathFlag}, + Usage: "list swarmfs mounts", + ArgsUsage: "swarm fs list --ipcpath ", + Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", + }, + }, + }, + { + Name: "db", + CustomHelpTemplate: helpTemplate, + Usage: "manage the local chunk database", + ArgsUsage: "db COMMAND", + Description: "Manage the local chunk database", + Subcommands: []cli.Command{ + { + Action: dbExport, + CustomHelpTemplate: helpTemplate, + Name: "export", + Usage: "export a local chunk database as a tar archive (use - to send to stdout)", + ArgsUsage: " ", Description: ` Export a local chunk database as a tar archive (use - to send to stdout). @@ -277,10 +346,11 @@ pv(1) tool to get a progress bar: `, }, { - Action: dbImport, - Name: "import", - Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)", - ArgsUsage: " ", + Action: dbImport, + CustomHelpTemplate: helpTemplate, + Name: "import", + Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)", + ArgsUsage: " ", Description: ` Import chunks from a tar archive into a local chunk database (use - to read from stdin). @@ -293,27 +363,16 @@ pv(1) tool to get a progress bar: `, }, { - Action: dbClean, - Name: "clean", - Usage: "remove corrupt entries from a local chunk database", - ArgsUsage: "", - Description: ` -Remove corrupt entries from a local chunk database. -`, + Action: dbClean, + CustomHelpTemplate: helpTemplate, + Name: "clean", + Usage: "remove corrupt entries from a local chunk database", + ArgsUsage: "", + Description: "Remove corrupt entries from a local chunk database", }, }, }, - { - Action: func(ctx *cli.Context) { - utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.") - }, - Name: "cleandb", - Usage: "DEPRECATED: use 'swarm db clean'", - ArgsUsage: " ", - Description: ` -DEPRECATED: use 'swarm db clean'. -`, - }, + // See config.go DumpConfigCommand, } @@ -339,10 +398,11 @@ DEPRECATED: use 'swarm db clean'. CorsStringFlag, EnsAPIFlag, SwarmTomlConfigPathFlag, - SwarmConfigPathFlag, SwarmSwapEnabledFlag, SwarmSwapAPIFlag, - SwarmSyncEnabledFlag, + SwarmSyncDisabledFlag, + SwarmSyncUpdateDelay, + SwarmDeliverySkipCheckFlag, SwarmListenAddrFlag, SwarmPortFlag, SwarmAccountFlag, @@ -350,15 +410,24 @@ DEPRECATED: use 'swarm db clean'. ChequebookAddrFlag, // upload flags SwarmApiFlag, - SwarmRecursiveUploadFlag, + SwarmRecursiveFlag, SwarmWantManifestFlag, SwarmUploadDefaultPath, SwarmUpFromStdinFlag, SwarmUploadMimeType, - //deprecated flags - DeprecatedEthAPIFlag, - DeprecatedEnsAddrFlag, - } + // storage flags + SwarmStorePath, + SwarmStoreCapacity, + SwarmStoreCacheCapacity, + } + rpcFlags := []cli.Flag{ + utils.WSEnabledFlag, + utils.WSListenAddrFlag, + utils.WSPortFlag, + utils.WSApiFlag, + utils.WSAllowedOriginsFlag, + } + app.Flags = append(app.Flags, rpcFlags...) app.Flags = append(app.Flags, debug.Flags...) app.Flags = append(app.Flags, swarmmetrics.Flags...) app.Before = func(ctx *cli.Context) error { @@ -383,16 +452,12 @@ func main() { } func version(ctx *cli.Context) error { - fmt.Println(strings.Title(clientIdentifier)) - fmt.Println("Version:", params.Version) + fmt.Println("Version:", SWARM_VERSION) if gitCommit != "" { fmt.Println("Git Commit:", gitCommit) } - fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name)) fmt.Println("Go Version:", runtime.Version()) fmt.Println("OS:", runtime.GOOS) - fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) - fmt.Printf("GOROOT=%s\n", runtime.GOROOT()) return nil } @@ -405,6 +470,10 @@ func bzzd(ctx *cli.Context) error { } cfg := defaultNodeConfig + + //pss operates on ws + cfg.WSModules = append(cfg.WSModules, "pss") + //geth only supports --datadir via command line //in order to be consistent within swarm, if we pass --datadir via environment variable //or via config file, we get the same directory for geth and swarm @@ -421,7 +490,7 @@ func bzzd(ctx *cli.Context) error { //due to overriding behavior initSwarmNode(bzzconfig, stack, ctx) //register BZZ as node.Service in the ethereum node - registerBzzService(bzzconfig, ctx, stack) + registerBzzService(bzzconfig, stack) //start the node utils.StartNode(stack) @@ -439,7 +508,7 @@ func bzzd(ctx *cli.Context) error { bootnodes := strings.Split(bzzconfig.BootNodes, ",") injectBootnodes(stack.Server(), bootnodes) } else { - if bzzconfig.NetworkId == 3 { + if bzzconfig.NetworkID == 3 { injectBootnodes(stack.Server(), testbetBootNodes) } } @@ -448,21 +517,11 @@ func bzzd(ctx *cli.Context) error { return nil } -func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) { - +func registerBzzService(bzzconfig *bzzapi.Config, stack *node.Node) { //define the swarm service boot function - boot := func(ctx *node.ServiceContext) (node.Service, error) { - var swapClient *ethclient.Client - var err error - if bzzconfig.SwapApi != "" { - log.Info("connecting to SWAP API", "url", bzzconfig.SwapApi) - swapClient, err = ethclient.Dial(bzzconfig.SwapApi) - if err != nil { - return nil, fmt.Errorf("error connecting to SWAP API %s: %s", bzzconfig.SwapApi, err) - } - } - - return swarm.NewSwarm(ctx, swapClient, bzzconfig) + boot := func(_ *node.ServiceContext) (node.Service, error) { + // In production, mockStore must be always nil. + return swarm.NewSwarm(bzzconfig, nil) } //register within the ethereum node if err := stack.Register(boot); err != nil { diff --git a/cmd/swarm/manifest.go b/cmd/swarm/manifest.go index 41a69a5d05f36..82166edf6c808 100644 --- a/cmd/swarm/manifest.go +++ b/cmd/swarm/manifest.go @@ -131,13 +131,13 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin longestPathEntry = api.ManifestEntry{} ) - mroot, err := client.DownloadManifest(mhash) + mroot, isEncrypted, err := client.DownloadManifest(mhash) if err != nil { utils.Fatalf("Manifest download failed: %v", err) } //TODO: check if the "hash" to add is valid and present in swarm - _, err = client.DownloadManifest(hash) + _, _, err = client.DownloadManifest(hash) if err != nil { utils.Fatalf("Hash to add is not present: %v", err) } @@ -180,7 +180,7 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin mroot.Entries = append(mroot.Entries, newEntry) } - newManifestHash, err := client.UploadManifest(mroot) + newManifestHash, err := client.UploadManifest(mroot, isEncrypted) if err != nil { utils.Fatalf("Manifest upload failed: %v", err) } @@ -197,7 +197,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st longestPathEntry = api.ManifestEntry{} ) - mroot, err := client.DownloadManifest(mhash) + mroot, isEncrypted, err := client.DownloadManifest(mhash) if err != nil { utils.Fatalf("Manifest download failed: %v", err) } @@ -257,7 +257,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st mroot = newMRoot } - newManifestHash, err := client.UploadManifest(mroot) + newManifestHash, err := client.UploadManifest(mroot, isEncrypted) if err != nil { utils.Fatalf("Manifest upload failed: %v", err) } @@ -273,7 +273,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string { longestPathEntry = api.ManifestEntry{} ) - mroot, err := client.DownloadManifest(mhash) + mroot, isEncrypted, err := client.DownloadManifest(mhash) if err != nil { utils.Fatalf("Manifest download failed: %v", err) } @@ -323,7 +323,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string { mroot = newMRoot } - newManifestHash, err := client.UploadManifest(mroot) + newManifestHash, err := client.UploadManifest(mroot, isEncrypted) if err != nil { utils.Fatalf("Manifest upload failed: %v", err) } diff --git a/cmd/swarm/run_test.go b/cmd/swarm/run_test.go index 594cfa55cb486..a70c4686dd515 100644 --- a/cmd/swarm/run_test.go +++ b/cmd/swarm/run_test.go @@ -81,6 +81,7 @@ type testCluster struct { // // When starting more than one node, they are connected together using the // admin SetPeer RPC method. + func newTestCluster(t *testing.T, size int) *testCluster { cluster := &testCluster{} defer func() { @@ -96,18 +97,7 @@ func newTestCluster(t *testing.T, size int) *testCluster { cluster.TmpDir = tmpdir // start the nodes - cluster.Nodes = make([]*testNode, 0, size) - for i := 0; i < size; i++ { - dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i)) - if err := os.Mkdir(dir, 0700); err != nil { - t.Fatal(err) - } - - node := newTestNode(t, dir) - node.Name = fmt.Sprintf("swarm%02d", i) - - cluster.Nodes = append(cluster.Nodes, node) - } + cluster.StartNewNodes(t, size) if size == 1 { return cluster @@ -145,14 +135,51 @@ func (c *testCluster) Shutdown() { os.RemoveAll(c.TmpDir) } +func (c *testCluster) Stop() { + for _, node := range c.Nodes { + node.Shutdown() + } +} + +func (c *testCluster) StartNewNodes(t *testing.T, size int) { + c.Nodes = make([]*testNode, 0, size) + for i := 0; i < size; i++ { + dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i)) + if err := os.Mkdir(dir, 0700); err != nil { + t.Fatal(err) + } + + node := newTestNode(t, dir) + node.Name = fmt.Sprintf("swarm%02d", i) + + c.Nodes = append(c.Nodes, node) + } +} + +func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) { + c.Nodes = make([]*testNode, 0, size) + for i := 0; i < size; i++ { + dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i)) + node := existingTestNode(t, dir, bzzaccount) + node.Name = fmt.Sprintf("swarm%02d", i) + + c.Nodes = append(c.Nodes, node) + } +} + +func (c *testCluster) Cleanup() { + os.RemoveAll(c.TmpDir) +} + type testNode struct { - Name string - Addr string - URL string - Enode string - Dir string - Client *rpc.Client - Cmd *cmdtest.TestCmd + Name string + Addr string + URL string + Enode string + Dir string + IpcPath string + Client *rpc.Client + Cmd *cmdtest.TestCmd } const testPassphrase = "swarm-test-passphrase" @@ -181,6 +208,72 @@ func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accoun return conf, account } +func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { + conf, _ := getTestAccount(t, dir) + node := &testNode{Dir: dir} + + // use a unique IPCPath when running tests on Windows + if runtime.GOOS == "windows" { + conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount) + } + + // assign ports + httpPort, err := assignTCPPort() + if err != nil { + t.Fatal(err) + } + p2pPort, err := assignTCPPort() + if err != nil { + t.Fatal(err) + } + + // start the node + node.Cmd = runSwarm(t, + "--port", p2pPort, + "--nodiscover", + "--datadir", dir, + "--ipcpath", conf.IPCPath, + "--ens-api", "", + "--bzzaccount", bzzaccount, + "--bzznetworkid", "321", + "--bzzport", httpPort, + "--verbosity", "6", + ) + node.Cmd.InputLine(testPassphrase) + defer func() { + if t.Failed() { + node.Shutdown() + } + }() + + // wait for the node to start + for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { + node.Client, err = rpc.Dial(conf.IPCEndpoint()) + if err == nil { + break + } + } + if node.Client == nil { + t.Fatal(err) + } + + // load info + var info swarm.Info + if err := node.Client.Call(&info, "bzz_info"); err != nil { + t.Fatal(err) + } + node.Addr = net.JoinHostPort("127.0.0.1", info.Port) + node.URL = "http://" + node.Addr + + var nodeInfo p2p.NodeInfo + if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil { + t.Fatal(err) + } + node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort) + + return node +} + func newTestNode(t *testing.T, dir string) *testNode { conf, account := getTestAccount(t, dir) @@ -239,6 +332,7 @@ func newTestNode(t *testing.T, dir string) *testNode { t.Fatal(err) } node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort) + node.IpcPath = conf.IPCPath return node } diff --git a/cmd/swarm/swarm-smoke/main.go b/cmd/swarm/swarm-smoke/main.go new file mode 100644 index 0000000000000..87bc39816d6e0 --- /dev/null +++ b/cmd/swarm/swarm-smoke/main.go @@ -0,0 +1,101 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "os" + "sort" + + "github.com/ethereum/go-ethereum/log" + colorable "github.com/mattn/go-colorable" + + cli "gopkg.in/urfave/cli.v1" +) + +var ( + endpoints []string + includeLocalhost bool + cluster string + scheme string + filesize int + from int + to int +) + +func main() { + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) + + app := cli.NewApp() + app.Name = "smoke-test" + app.Usage = "" + + app.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "cluster-endpoint", + Value: "testing", + Usage: "cluster to point to (open, or testing)", + Destination: &cluster, + }, + cli.IntFlag{ + Name: "cluster-from", + Value: 8501, + Usage: "swarm node (from)", + Destination: &from, + }, + cli.IntFlag{ + Name: "cluster-to", + Value: 8512, + Usage: "swarm node (to)", + Destination: &to, + }, + cli.StringFlag{ + Name: "cluster-scheme", + Value: "http", + Usage: "http or https", + Destination: &scheme, + }, + cli.BoolFlag{ + Name: "include-localhost", + Usage: "whether to include localhost:8500 as an endpoint", + Destination: &includeLocalhost, + }, + cli.IntFlag{ + Name: "filesize", + Value: 1, + Usage: "file size for generated random file in MB", + Destination: &filesize, + }, + } + + app.Commands = []cli.Command{ + { + Name: "upload_and_sync", + Aliases: []string{"c"}, + Usage: "upload and sync", + Action: cliUploadAndSync, + }, + } + + sort.Sort(cli.FlagsByName(app.Flags)) + sort.Sort(cli.CommandsByName(app.Commands)) + + err := app.Run(os.Args) + if err != nil { + log.Error(err.Error()) + } +} diff --git a/cmd/swarm/swarm-smoke/upload_and_sync.go b/cmd/swarm/swarm-smoke/upload_and_sync.go new file mode 100644 index 0000000000000..7f9051e7fecbc --- /dev/null +++ b/cmd/swarm/swarm-smoke/upload_and_sync.go @@ -0,0 +1,184 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/pborman/uuid" + + cli "gopkg.in/urfave/cli.v1" +) + +func generateEndpoints(scheme string, cluster string, from int, to int) { + for port := from; port <= to; port++ { + endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster)) + } + + if includeLocalhost { + endpoints = append(endpoints, "http://localhost:8500") + } +} + +func cliUploadAndSync(c *cli.Context) error { + defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size", filesize) }(time.Now()) + + generateEndpoints(scheme, cluster, from, to) + + log.Info("uploading to " + endpoints[0] + " and syncing") + + f, cleanup := generateRandomFile(filesize * 1000000) + defer cleanup() + + hash, err := upload(f, endpoints[0]) + if err != nil { + log.Error(err.Error()) + return err + } + + fhash, err := digest(f) + if err != nil { + log.Error(err.Error()) + return err + } + + log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash)) + + if filesize < 10 { + time.Sleep(15 * time.Second) + } else { + time.Sleep(2 * time.Duration(filesize) * time.Second) + } + + wg := sync.WaitGroup{} + for _, endpoint := range endpoints { + endpoint := endpoint + ruid := uuid.New()[:8] + wg.Add(1) + go func(endpoint string, ruid string) { + for { + err := fetch(hash, endpoint, fhash, ruid) + if err != nil { + continue + } + + wg.Done() + return + } + }(endpoint, ruid) + } + wg.Wait() + log.Info("all endpoints synced random file successfully") + + return nil +} + +// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file +func fetch(hash string, endpoint string, original []byte, ruid string) error { + log.Trace("sleeping", "ruid", ruid) + time.Sleep(1 * time.Second) + + log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash) + res, err := http.Get(endpoint + "/bzz:/" + hash + "/") + if err != nil { + log.Warn(err.Error(), "ruid", ruid) + return err + } + log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength) + + if res.StatusCode != 200 { + err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode) + log.Warn(err.Error(), "ruid", ruid) + return err + } + + defer res.Body.Close() + + rdigest, err := digest(res.Body) + if err != nil { + log.Warn(err.Error(), "ruid", ruid) + return err + } + + if !bytes.Equal(rdigest, original) { + err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original) + log.Warn(err.Error(), "ruid", ruid) + return err + } + + log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength) + + return nil +} + +// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd +func upload(f *os.File, endpoint string) (string, error) { + var out bytes.Buffer + cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name()) + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", err + } + hash := strings.TrimRight(out.String(), "\r\n") + return hash, nil +} + +func digest(r io.Reader) ([]byte, error) { + h := md5.New() + _, err := io.Copy(h, r) + if err != nil { + return nil, err + } + return h.Sum(nil), nil +} + +// generateRandomFile is creating a temporary file with the requested byte size +func generateRandomFile(size int) (f *os.File, teardown func()) { + // create a tmp file + tmp, err := ioutil.TempFile("", "swarm-test") + if err != nil { + panic(err) + } + + // callback for tmp file cleanup + teardown = func() { + tmp.Close() + os.Remove(tmp.Name()) + } + + buf := make([]byte, size) + _, err = rand.Read(buf) + if err != nil { + panic(err) + } + ioutil.WriteFile(tmp.Name(), buf, 0755) + + return tmp, teardown +} diff --git a/cmd/swarm/upload.go b/cmd/swarm/upload.go index 9f4c525bb92ac..8ba0e7c5f0c02 100644 --- a/cmd/swarm/upload.go +++ b/cmd/swarm/upload.go @@ -40,12 +40,13 @@ func upload(ctx *cli.Context) { args := ctx.Args() var ( bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - recursive = ctx.GlobalBool(SwarmRecursiveUploadFlag.Name) + recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name) wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name) fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name) mimeType = ctx.GlobalString(SwarmUploadMimeType.Name) client = swarm.NewClient(bzzapi) + toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name) file string ) @@ -76,7 +77,7 @@ func upload(ctx *cli.Context) { utils.Fatalf("Error opening file: %s", err) } defer f.Close() - hash, err := client.UploadRaw(f, f.Size) + hash, err := client.UploadRaw(f, f.Size, toEncrypt) if err != nil { utils.Fatalf("Upload failed: %s", err) } @@ -97,7 +98,7 @@ func upload(ctx *cli.Context) { if !recursive { return "", errors.New("Argument is a directory and recursive upload is disabled") } - return client.UploadDirectory(file, defaultPath, "") + return client.UploadDirectory(file, defaultPath, "", toEncrypt) } } else { doUpload = func() (string, error) { @@ -110,7 +111,7 @@ func upload(ctx *cli.Context) { mimeType = detectMimeType(file) } f.ContentType = mimeType - return client.Upload(f, "") + return client.Upload(f, "", toEncrypt) } } hash, err := doUpload() diff --git a/cmd/swarm/upload_test.go b/cmd/swarm/upload_test.go index df7fc216af141..2afc9b3a1186e 100644 --- a/cmd/swarm/upload_test.go +++ b/cmd/swarm/upload_test.go @@ -17,60 +17,259 @@ package main import ( + "bytes" + "flag" + "fmt" "io" "io/ioutil" "net/http" "os" + "path" + "path/filepath" + "strings" "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + swarm "github.com/ethereum/go-ethereum/swarm/api/client" + colorable "github.com/mattn/go-colorable" ) +var loglevel = flag.Int("loglevel", 3, "verbosity of logs") + +func init() { + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) +} + // TestCLISwarmUp tests that running 'swarm up' makes the resulting file // available from all nodes via the HTTP API func TestCLISwarmUp(t *testing.T) { - // start 3 node cluster - t.Log("starting 3 node cluster") + testCLISwarmUp(false, t) +} +func TestCLISwarmUpRecursive(t *testing.T) { + testCLISwarmUpRecursive(false, t) +} + +// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file +// available from all nodes via the HTTP API +func TestCLISwarmUpEncrypted(t *testing.T) { + testCLISwarmUp(true, t) +} +func TestCLISwarmUpEncryptedRecursive(t *testing.T) { + testCLISwarmUpRecursive(true, t) +} + +func testCLISwarmUp(toEncrypt bool, t *testing.T) { + log.Info("starting 3 node cluster") cluster := newTestCluster(t, 3) defer cluster.Shutdown() // create a tmp file tmp, err := ioutil.TempFile("", "swarm-test") - assertNil(t, err) + if err != nil { + t.Fatal(err) + } defer tmp.Close() defer os.Remove(tmp.Name()) - _, err = io.WriteString(tmp, "data") - assertNil(t, err) + // write data to file + data := "notsorandomdata" + _, err = io.WriteString(tmp, data) + if err != nil { + t.Fatal(err) + } + + hashRegexp := `[a-f\d]{64}` + flags := []string{ + "--bzzapi", cluster.Nodes[0].URL, + "up", + tmp.Name()} + if toEncrypt { + hashRegexp = `[a-f\d]{128}` + flags = []string{ + "--bzzapi", cluster.Nodes[0].URL, + "up", + "--encrypt", + tmp.Name()} + } // upload the file with 'swarm up' and expect a hash - t.Log("uploading file with 'swarm up'") - up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name()) - _, matches := up.ExpectRegexp(`[a-f\d]{64}`) + log.Info(fmt.Sprintf("uploading file with 'swarm up'")) + up := runSwarm(t, flags...) + _, matches := up.ExpectRegexp(hashRegexp) up.ExpectExit() hash := matches[0] - t.Logf("file uploaded with hash %s", hash) + log.Info("file uploaded", "hash", hash) // get the file from the HTTP API of each node for _, node := range cluster.Nodes { - t.Logf("getting file from %s", node.Name) + log.Info("getting file from node", "node", node.Name) + res, err := http.Get(node.URL + "/bzz:/" + hash) - assertNil(t, err) - assertHTTPResponse(t, res, http.StatusOK, "data") + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + reply, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 200 { + t.Fatalf("expected HTTP status 200, got %s", res.Status) + } + if string(reply) != data { + t.Fatalf("expected HTTP body %q, got %q", data, reply) + } + log.Debug("verifying uploaded file using `swarm down`") + //try to get the content with `swarm down` + tmpDownload, err := ioutil.TempDir("", "swarm-test") + tmpDownload = path.Join(tmpDownload, "tmpfile.tmp") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDownload) + + bzzLocator := "bzz:/" + hash + flags = []string{ + "--bzzapi", cluster.Nodes[0].URL, + "down", + bzzLocator, + tmpDownload, + } + + down := runSwarm(t, flags...) + down.ExpectExit() + + fi, err := os.Stat(tmpDownload) + if err != nil { + t.Fatalf("could not stat path: %v", err) + } + + switch mode := fi.Mode(); { + case mode.IsRegular(): + downloadedBytes, err := ioutil.ReadFile(tmpDownload) + if err != nil { + t.Fatalf("had an error reading the downloaded file: %v", err) + } + if !bytes.Equal(downloadedBytes, bytes.NewBufferString(data).Bytes()) { + t.Fatalf("retrieved data and posted data not equal!") + } + + default: + t.Fatalf("expected to download regular file, got %s", fi.Mode()) + } + } + + timeout := time.Duration(2 * time.Second) + httpClient := http.Client{ + Timeout: timeout, + } + + // try to squeeze a timeout by getting an non-existent hash from each node + for _, node := range cluster.Nodes { + _, err := httpClient.Get(node.URL + "/bzz:/1023e8bae0f70be7d7b5f74343088ba408a218254391490c85ae16278e230340") + // we're speeding up the timeout here since netstore has a 60 seconds timeout on a request + if err != nil && !strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") { + t.Fatal(err) + } + // this is disabled since it takes 60s due to netstore timeout + // if res.StatusCode != 404 { + // t.Fatalf("expected HTTP status 404, got %s", res.Status) + // } } } -func assertNil(t *testing.T, err error) { +func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) { + fmt.Println("starting 3 node cluster") + cluster := newTestCluster(t, 3) + defer cluster.Shutdown() + + tmpUploadDir, err := ioutil.TempDir("", "swarm-test") if err != nil { t.Fatal(err) } -} + defer os.RemoveAll(tmpUploadDir) + // create tmp files + data := "notsorandomdata" + for _, path := range []string{"tmp1", "tmp2"} { + if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil { + t.Fatal(err) + } + } -func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) { - defer res.Body.Close() - if res.StatusCode != expectedStatus { - t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status) + hashRegexp := `[a-f\d]{64}` + flags := []string{ + "--bzzapi", cluster.Nodes[0].URL, + "--recursive", + "up", + tmpUploadDir} + if toEncrypt { + hashRegexp = `[a-f\d]{128}` + flags = []string{ + "--bzzapi", cluster.Nodes[0].URL, + "--recursive", + "up", + "--encrypt", + tmpUploadDir} } - data, err := ioutil.ReadAll(res.Body) - assertNil(t, err) - if string(data) != expectedBody { - t.Fatalf("expected HTTP body %q, got %q", expectedBody, data) + // upload the file with 'swarm up' and expect a hash + log.Info(fmt.Sprintf("uploading file with 'swarm up'")) + up := runSwarm(t, flags...) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + hash := matches[0] + log.Info("dir uploaded", "hash", hash) + + // get the file from the HTTP API of each node + for _, node := range cluster.Nodes { + log.Info("getting file from node", "node", node.Name) + //try to get the content with `swarm down` + tmpDownload, err := ioutil.TempDir("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDownload) + bzzLocator := "bzz:/" + hash + flagss := []string{} + flagss = []string{ + "--bzzapi", cluster.Nodes[0].URL, + "down", + "--recursive", + bzzLocator, + tmpDownload, + } + + fmt.Println("downloading from swarm with recursive") + down := runSwarm(t, flagss...) + down.ExpectExit() + + files, err := ioutil.ReadDir(tmpDownload) + for _, v := range files { + fi, err := os.Stat(path.Join(tmpDownload, v.Name())) + if err != nil { + t.Fatalf("got an error: %v", err) + } + + switch mode := fi.Mode(); { + case mode.IsRegular(): + if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil { + t.Fatalf("encountered an error opening the file returned from the CLI: %v", err) + } else { + ff := make([]byte, len(data)) + io.ReadFull(file, ff) + buf := bytes.NewBufferString(data) + + if !bytes.Equal(ff, buf.Bytes()) { + t.Fatalf("retrieved data and posted data not equal!") + } + } + default: + t.Fatalf("this shouldnt happen") + } + } + if err != nil { + t.Fatalf("could not list files at: %v", files) + } } } diff --git a/p2p/metrics.go b/p2p/metrics.go index 4cbff90aca67b..2d52fd1fd1d0e 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -31,10 +31,10 @@ var ( egressTrafficMeter = metrics.NewRegisteredMeter("p2p/OutboundTraffic", nil) ) -// meteredConn is a wrapper around a network TCP connection that meters both the +// meteredConn is a wrapper around a net.Conn that meters both the // inbound and outbound network traffic. type meteredConn struct { - *net.TCPConn // Network connection to wrap with metering + net.Conn // Network connection to wrap with metering } // newMeteredConn creates a new metered connection, also bumping the ingress or @@ -51,13 +51,13 @@ func newMeteredConn(conn net.Conn, ingress bool) net.Conn { } else { egressConnectMeter.Mark(1) } - return &meteredConn{conn.(*net.TCPConn)} + return &meteredConn{Conn: conn} } // Read delegates a network read to the underlying connection, bumping the ingress // traffic meter along the way. func (c *meteredConn) Read(b []byte) (n int, err error) { - n, err = c.TCPConn.Read(b) + n, err = c.Conn.Read(b) ingressTrafficMeter.Mark(int64(n)) return } @@ -65,7 +65,7 @@ func (c *meteredConn) Read(b []byte) (n int, err error) { // Write delegates a network write to the underlying connection, bumping the // egress traffic meter along the way. func (c *meteredConn) Write(b []byte) (n int, err error) { - n, err = c.TCPConn.Write(b) + n, err = c.Conn.Write(b) egressTrafficMeter.Mark(int64(n)) return } diff --git a/p2p/peer.go b/p2p/peer.go index c3907349fc5ae..eb2d34441c233 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -17,6 +17,7 @@ package p2p import ( + "errors" "fmt" "io" "net" @@ -31,6 +32,10 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) +var ( + ErrShuttingDown = errors.New("shutting down") +) + const ( baseProtocolVersion = 5 baseProtocolLength = uint64(16) @@ -393,7 +398,7 @@ func (rw *protoRW) WriteMsg(msg Msg) (err error) { // as well but we don't want to rely on that. rw.werr <- err case <-rw.closed: - err = fmt.Errorf("shutting down") + err = ErrShuttingDown } return err } diff --git a/p2p/protocols/protocol.go b/p2p/protocols/protocol.go index 849a7ef3995e6..d5c0375ac7083 100644 --- a/p2p/protocols/protocol.go +++ b/p2p/protocols/protocol.go @@ -31,10 +31,12 @@ package protocols import ( "context" "fmt" + "io" "reflect" "sync" "time" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" ) @@ -202,6 +204,11 @@ func NewPeer(p *p2p.Peer, rw p2p.MsgReadWriter, spec *Spec) *Peer { func (p *Peer) Run(handler func(msg interface{}) error) error { for { if err := p.handleIncoming(handler); err != nil { + if err != io.EOF { + metrics.GetOrRegisterCounter("peer.handleincoming.error", nil).Inc(1) + log.Error("peer.handleIncoming", "err", err) + } + return err } } diff --git a/p2p/simulations/network.go b/p2p/simulations/network.go index a8a46cd874878..0fb7485ad0946 100644 --- a/p2p/simulations/network.go +++ b/p2p/simulations/network.go @@ -31,7 +31,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/simulations/adapters" ) -var dialBanTimeout = 200 * time.Millisecond +var DialBanTimeout = 200 * time.Millisecond // NetworkConfig defines configuration options for starting a Network type NetworkConfig struct { @@ -78,41 +78,25 @@ func (net *Network) Events() *event.Feed { return &net.events } -// NewNode adds a new node to the network with a random ID -func (net *Network) NewNode() (*Node, error) { - conf := adapters.RandomNodeConfig() - conf.Services = []string{net.DefaultService} - return net.NewNodeWithConfig(conf) -} - // NewNodeWithConfig adds a new node to the network with the given config, // returning an error if a node with the same ID or name already exists func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) { net.lock.Lock() defer net.lock.Unlock() - // create a random ID and PrivateKey if not set - if conf.ID == (discover.NodeID{}) { - c := adapters.RandomNodeConfig() - conf.ID = c.ID - conf.PrivateKey = c.PrivateKey - } - id := conf.ID if conf.Reachable == nil { conf.Reachable = func(otherID discover.NodeID) bool { _, err := net.InitConn(conf.ID, otherID) - return err == nil + if err != nil && bytes.Compare(conf.ID.Bytes(), otherID.Bytes()) < 0 { + return false + } + return true } } - // assign a name to the node if not set - if conf.Name == "" { - conf.Name = fmt.Sprintf("node%02d", len(net.Nodes)+1) - } - // check the node doesn't already exist - if node := net.getNode(id); node != nil { - return nil, fmt.Errorf("node with ID %q already exists", id) + if node := net.getNode(conf.ID); node != nil { + return nil, fmt.Errorf("node with ID %q already exists", conf.ID) } if node := net.getNodeByName(conf.Name); node != nil { return nil, fmt.Errorf("node with name %q already exists", conf.Name) @@ -132,8 +116,8 @@ func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) Node: adapterNode, Config: conf, } - log.Trace(fmt.Sprintf("node %v created", id)) - net.nodeMap[id] = len(net.Nodes) + log.Trace(fmt.Sprintf("node %v created", conf.ID)) + net.nodeMap[conf.ID] = len(net.Nodes) net.Nodes = append(net.Nodes, node) // emit a "control" event @@ -181,7 +165,9 @@ func (net *Network) Start(id discover.NodeID) error { // startWithSnapshots starts the node with the given ID using the give // snapshots func (net *Network) startWithSnapshots(id discover.NodeID, snapshots map[string][]byte) error { - node := net.GetNode(id) + net.lock.Lock() + defer net.lock.Unlock() + node := net.getNode(id) if node == nil { return fmt.Errorf("node %v does not exist", id) } @@ -220,9 +206,13 @@ func (net *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEve // assume the node is now down net.lock.Lock() + defer net.lock.Unlock() node := net.getNode(id) + if node == nil { + log.Error("Can not find node for id", "id", id) + return + } node.Up = false - net.lock.Unlock() net.events.Send(NewEvent(node)) }() for { @@ -259,7 +249,9 @@ func (net *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEve // Stop stops the node with the given ID func (net *Network) Stop(id discover.NodeID) error { - node := net.GetNode(id) + net.lock.Lock() + defer net.lock.Unlock() + node := net.getNode(id) if node == nil { return fmt.Errorf("node %v does not exist", id) } @@ -312,7 +304,9 @@ func (net *Network) Disconnect(oneID, otherID discover.NodeID) error { // DidConnect tracks the fact that the "one" node connected to the "other" node func (net *Network) DidConnect(one, other discover.NodeID) error { - conn, err := net.GetOrCreateConn(one, other) + net.lock.Lock() + defer net.lock.Unlock() + conn, err := net.getOrCreateConn(one, other) if err != nil { return fmt.Errorf("connection between %v and %v does not exist", one, other) } @@ -327,7 +321,9 @@ func (net *Network) DidConnect(one, other discover.NodeID) error { // DidDisconnect tracks the fact that the "one" node disconnected from the // "other" node func (net *Network) DidDisconnect(one, other discover.NodeID) error { - conn := net.GetConn(one, other) + net.lock.Lock() + defer net.lock.Unlock() + conn := net.getConn(one, other) if conn == nil { return fmt.Errorf("connection between %v and %v does not exist", one, other) } @@ -335,7 +331,7 @@ func (net *Network) DidDisconnect(one, other discover.NodeID) error { return fmt.Errorf("%v and %v already disconnected", one, other) } conn.Up = false - conn.initiated = time.Now().Add(-dialBanTimeout) + conn.initiated = time.Now().Add(-DialBanTimeout) net.events.Send(NewEvent(conn)) return nil } @@ -476,16 +472,19 @@ func (net *Network) InitConn(oneID, otherID discover.NodeID) (*Conn, error) { if err != nil { return nil, err } - if time.Since(conn.initiated) < dialBanTimeout { - return nil, fmt.Errorf("connection between %v and %v recently attempted", oneID, otherID) - } if conn.Up { return nil, fmt.Errorf("%v and %v already connected", oneID, otherID) } + if time.Since(conn.initiated) < DialBanTimeout { + return nil, fmt.Errorf("connection between %v and %v recently attempted", oneID, otherID) + } + err = conn.nodesUp() if err != nil { + log.Trace(fmt.Sprintf("nodes not up: %v", err)) return nil, fmt.Errorf("nodes not up: %v", err) } + log.Debug("InitConn - connection initiated") conn.initiated = time.Now() return conn, nil } diff --git a/p2p/testing/protocolsession.go b/p2p/testing/protocolsession.go index 8f73bfa03eb48..e3ec41ad67c73 100644 --- a/p2p/testing/protocolsession.go +++ b/p2p/testing/protocolsession.go @@ -91,7 +91,9 @@ func (s *ProtocolSession) trigger(trig Trigger) error { errc := make(chan error) go func() { + log.Trace(fmt.Sprintf("trigger %v (%v)....", trig.Msg, trig.Code)) errc <- mockNode.Trigger(&trig) + log.Trace(fmt.Sprintf("triggered %v (%v)", trig.Msg, trig.Code)) }() t := trig.Timeout diff --git a/swarm/AUTHORS b/swarm/AUTHORS new file mode 100644 index 0000000000000..f7232f07ce74c --- /dev/null +++ b/swarm/AUTHORS @@ -0,0 +1,35 @@ +# Core team members + +Viktor Trón - @zelig +Louis Holbrook - @nolash +Lewis Marshall - @lmars +Anton Evangelatov - @nonsense +Janoš Guljaš - @janos +Balint Gabor - @gbalint +Elad Nachmias - @justelad +Daniel A. Nagy - @nagydani +Aron Fischer - @homotopycolimit +Fabio Barone - @holisticode +Zahoor Mohamed - @jmozah +Zsolt Felföldi - @zsfelfoldi + +# External contributors + +Kiel Barry +Gary Rong +Jared Wasinger +Leon Stanko +Javier Peletier [epiclabs.io] +Bartek Borkowski [tungsten-labs.com] +Shane Howley [mainframe.com] +Doug Leonard [mainframe.com] +Ivan Daniluk [status.im] +Felix Lange [EF] +Martin Holst Swende [EF] +Guillaume Ballet [EF] +ligi [EF] +Christopher Dro [blick-labs.com] +Sergii Bomko [ledgerleopard.com] +Domino Valdano +Rafael Matias +Coogan Brennan \ No newline at end of file diff --git a/swarm/OWNERS b/swarm/OWNERS new file mode 100644 index 0000000000000..774cd7db9de50 --- /dev/null +++ b/swarm/OWNERS @@ -0,0 +1,26 @@ +# Ownership by go packages + +swarm +├── api ─────────────────── ethersphere +├── bmt ─────────────────── @zelig +├── dev ─────────────────── @lmars +├── fuse ────────────────── @jmozah, @holisticode +├── grafana_dashboards ──── @nonsense +├── metrics ─────────────── @nonsense, @holisticode +├── multihash ───────────── @nolash +├── network ─────────────── ethersphere +│ ├── bitvector ───────── @zelig, @janos, @gbalint +│ ├── priorityqueue ───── @zelig, @janos, @gbalint +│ ├── simulations ─────── @zelig +│ └── stream ──────────── @janos, @zelig, @gbalint, @holisticode, @justelad +│ ├── intervals ───── @janos +│ └── testing ─────── @zelig +├── pot ─────────────────── @zelig +├── pss ─────────────────── @nolash, @zelig, @nonsense +├── services ────────────── @zelig +├── state ───────────────── @justelad +├── storage ─────────────── ethersphere +│ ├── encryption ──────── @gbalint, @zelig, @nagydani +│ ├── mock ────────────── @janos +│ └── mru ─────────────── @nolash +└── testutil ────────────── @lmars \ No newline at end of file diff --git a/swarm/api/api.go b/swarm/api/api.go index 0cf12fdbed6f6..36f19998af340 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -17,13 +17,13 @@ package api import ( + "context" "fmt" "io" + "math/big" "net/http" "path" - "regexp" "strings" - "sync" "bytes" "mime" @@ -31,14 +31,15 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/contracts/ens" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/mru" ) -var hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}") - -//setup metrics var ( apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil) apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil) @@ -46,7 +47,7 @@ var ( apiPutFail = metrics.NewRegisteredCounter("api.put.fail", nil) apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil) apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil) - apiGetHttp300 = metrics.NewRegisteredCounter("api.get.http.300", nil) + apiGetHTTP300 = metrics.NewRegisteredCounter("api.get.http.300", nil) apiModifyCount = metrics.NewRegisteredCounter("api.modify.count", nil) apiModifyFail = metrics.NewRegisteredCounter("api.modify.fail", nil) apiAddFileCount = metrics.NewRegisteredCounter("api.addfile.count", nil) @@ -55,22 +56,33 @@ var ( apiRmFileFail = metrics.NewRegisteredCounter("api.removefile.fail", nil) apiAppendFileCount = metrics.NewRegisteredCounter("api.appendfile.count", nil) apiAppendFileFail = metrics.NewRegisteredCounter("api.appendfile.fail", nil) + apiGetInvalid = metrics.NewRegisteredCounter("api.get.invalid", nil) ) +// Resolver interface resolve a domain name to a hash using ENS type Resolver interface { Resolve(string) (common.Hash, error) } +// ResolveValidator is used to validate the contained Resolver +type ResolveValidator interface { + Resolver + Owner(node [32]byte) (common.Address, error) + HeaderByNumber(context.Context, *big.Int) (*types.Header, error) +} + // NoResolverError is returned by MultiResolver.Resolve if no resolver // can be found for the address. type NoResolverError struct { TLD string } +// NewNoResolverError creates a NoResolverError for the given top level domain func NewNoResolverError(tld string) *NoResolverError { return &NoResolverError{TLD: tld} } +// Error NoResolverError implements error func (e *NoResolverError) Error() string { if e.TLD == "" { return "no ENS resolver" @@ -82,7 +94,8 @@ func (e *NoResolverError) Error() string { // Each TLD can have multiple resolvers, and the resoluton from the // first one in the sequence will be returned. type MultiResolver struct { - resolvers map[string][]Resolver + resolvers map[string][]ResolveValidator + nameHash func(string) common.Hash } // MultiResolverOption sets options for MultiResolver and is used as @@ -93,16 +106,24 @@ type MultiResolverOption func(*MultiResolver) // for a specific TLD. If TLD is an empty string, the resolver will be added // to the list of default resolver, the ones that will be used for resolution // of addresses which do not have their TLD resolver specified. -func MultiResolverOptionWithResolver(r Resolver, tld string) MultiResolverOption { +func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolverOption { return func(m *MultiResolver) { m.resolvers[tld] = append(m.resolvers[tld], r) } } +// MultiResolverOptionWithNameHash is unused at the time of this writing +func MultiResolverOptionWithNameHash(nameHash func(string) common.Hash) MultiResolverOption { + return func(m *MultiResolver) { + m.nameHash = nameHash + } +} + // NewMultiResolver creates a new instance of MultiResolver. func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) { m = &MultiResolver{ - resolvers: make(map[string][]Resolver), + resolvers: make(map[string][]ResolveValidator), + nameHash: ens.EnsNode, } for _, o := range opts { o(m) @@ -114,18 +135,10 @@ func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) { // If there are more default Resolvers, or for a specific TLD, // the Hash from the the first one which does not return error // will be returned. -func (m MultiResolver) Resolve(addr string) (h common.Hash, err error) { - rs := m.resolvers[""] - tld := path.Ext(addr) - if tld != "" { - tld = tld[1:] - rstld, ok := m.resolvers[tld] - if ok { - rs = rstld - } - } - if rs == nil { - return h, NewNoResolverError(tld) +func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) { + rs, err := m.getResolveValidator(addr) + if err != nil { + return h, err } for _, r := range rs { h, err = r.Resolve(addr) @@ -136,104 +149,171 @@ func (m MultiResolver) Resolve(addr string) (h common.Hash, err error) { return } +// ValidateOwner checks the ENS to validate that the owner of the given domain is the given eth address +func (m *MultiResolver) ValidateOwner(name string, address common.Address) (bool, error) { + rs, err := m.getResolveValidator(name) + if err != nil { + return false, err + } + var addr common.Address + for _, r := range rs { + addr, err = r.Owner(m.nameHash(name)) + // we hide the error if it is not for the last resolver we check + if err == nil { + return addr == address, nil + } + } + return false, err +} + +// HeaderByNumber uses the validator of the given domainname and retrieves the header for the given block number +func (m *MultiResolver) HeaderByNumber(ctx context.Context, name string, blockNr *big.Int) (*types.Header, error) { + rs, err := m.getResolveValidator(name) + if err != nil { + return nil, err + } + for _, r := range rs { + var header *types.Header + header, err = r.HeaderByNumber(ctx, blockNr) + // we hide the error if it is not for the last resolver we check + if err == nil { + return header, nil + } + } + return nil, err +} + +// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain +func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) { + rs := m.resolvers[""] + tld := path.Ext(name) + if tld != "" { + tld = tld[1:] + rstld, ok := m.resolvers[tld] + if ok { + return rstld, nil + } + } + if len(rs) == 0 { + return rs, NewNoResolverError(tld) + } + return rs, nil +} + +// SetNameHash sets the hasher function that hashes the domain into a name hash that ENS uses +func (m *MultiResolver) SetNameHash(nameHash func(string) common.Hash) { + m.nameHash = nameHash +} + /* -Api implements webserver/file system related content storage and retrieval -on top of the dpa -it is the public interface of the dpa which is included in the ethereum stack +API implements webserver/file system related content storage and retrieval +on top of the FileStore +it is the public interface of the FileStore which is included in the ethereum stack */ -type Api struct { - dpa *storage.DPA - dns Resolver +type API struct { + resource *mru.Handler + fileStore *storage.FileStore + dns Resolver } -//the api constructor initialises -func NewApi(dpa *storage.DPA, dns Resolver) (self *Api) { - self = &Api{ - dpa: dpa, - dns: dns, +// NewAPI the api constructor initialises a new API instance. +func NewAPI(fileStore *storage.FileStore, dns Resolver, resourceHandler *mru.Handler) (self *API) { + self = &API{ + fileStore: fileStore, + dns: dns, + resource: resourceHandler, } return } -// to be used only in TEST -func (self *Api) Upload(uploadDir, index string) (hash string, err error) { - fs := NewFileSystem(self) - hash, err = fs.Upload(uploadDir, index) +// Upload to be used only in TEST +func (a *API) Upload(uploadDir, index string, toEncrypt bool) (hash string, err error) { + fs := NewFileSystem(a) + hash, err = fs.Upload(uploadDir, index, toEncrypt) return hash, err } -// DPA reader API -func (self *Api) Retrieve(key storage.Key) storage.LazySectionReader { - return self.dpa.Retrieve(key) +// Retrieve FileStore reader API +func (a *API) Retrieve(addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) { + return a.fileStore.Retrieve(addr) } -func (self *Api) Store(data io.Reader, size int64, wg *sync.WaitGroup) (key storage.Key, err error) { - return self.dpa.Store(data, size, wg, nil) +// Store wraps the Store API call of the embedded FileStore +func (a *API) Store(data io.Reader, size int64, toEncrypt bool) (addr storage.Address, wait func(), err error) { + log.Debug("api.store", "size", size) + return a.fileStore.Store(data, size, toEncrypt) } +// ErrResolve is returned when an URI cannot be resolved from ENS. type ErrResolve error -// DNS Resolver -func (self *Api) Resolve(uri *URI) (storage.Key, error) { +// Resolve resolves a URI to an Address using the MultiResolver. +func (a *API) Resolve(uri *URI) (storage.Address, error) { apiResolveCount.Inc(1) - log.Trace(fmt.Sprintf("Resolving : %v", uri.Addr)) + log.Trace("resolving", "uri", uri.Addr) - // if the URI is immutable, check if the address is a hash - isHash := hashMatcher.MatchString(uri.Addr) - if uri.Immutable() || uri.DeprecatedImmutable() { - if !isHash { + // if the URI is immutable, check if the address looks like a hash + if uri.Immutable() { + key := uri.Address() + if key == nil { return nil, fmt.Errorf("immutable address not a content hash: %q", uri.Addr) } - return common.Hex2Bytes(uri.Addr), nil + return key, nil } // if DNS is not configured, check if the address is a hash - if self.dns == nil { - if !isHash { + if a.dns == nil { + key := uri.Address() + if key == nil { apiResolveFail.Inc(1) return nil, fmt.Errorf("no DNS to resolve name: %q", uri.Addr) } - return common.Hex2Bytes(uri.Addr), nil + return key, nil } // try and resolve the address - resolved, err := self.dns.Resolve(uri.Addr) + resolved, err := a.dns.Resolve(uri.Addr) if err == nil { return resolved[:], nil - } else if !isHash { + } + + key := uri.Address() + if key == nil { apiResolveFail.Inc(1) return nil, err } - return common.Hex2Bytes(uri.Addr), nil + return key, nil } -// Put provides singleton manifest creation on top of dpa store -func (self *Api) Put(content, contentType string) (storage.Key, error) { +// Put provides singleton manifest creation on top of FileStore store +func (a *API) Put(content, contentType string, toEncrypt bool) (k storage.Address, wait func(), err error) { apiPutCount.Inc(1) r := strings.NewReader(content) - wg := &sync.WaitGroup{} - key, err := self.dpa.Store(r, int64(len(content)), wg, nil) + key, waitContent, err := a.fileStore.Store(r, int64(len(content)), toEncrypt) if err != nil { apiPutFail.Inc(1) - return nil, err + return nil, nil, err } manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType) r = strings.NewReader(manifest) - key, err = self.dpa.Store(r, int64(len(manifest)), wg, nil) + key, waitManifest, err := a.fileStore.Store(r, int64(len(manifest)), toEncrypt) if err != nil { apiPutFail.Inc(1) - return nil, err + return nil, nil, err } - wg.Wait() - return key, nil + return key, func() { + waitContent() + waitManifest() + }, nil } // Get uses iterative manifest retrieval and prefix matching -// to resolve basePath to content using dpa retrieve -// it returns a section reader, mimeType, status and an error -func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionReader, mimeType string, status int, err error) { +// to resolve basePath to content using FileStore retrieve +// it returns a section reader, mimeType, status, the key of the actual content and an error +func (a *API) Get(manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) { + log.Debug("api.get", "key", manifestAddr, "path", path) apiGetCount.Inc(1) - trie, err := loadManifest(self.dpa, key, nil) + trie, err := loadManifest(a.fileStore, manifestAddr, nil) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound @@ -241,34 +321,111 @@ func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionRe return } - log.Trace(fmt.Sprintf("getEntry(%s)", path)) - + log.Debug("trie getting entry", "key", manifestAddr, "path", path) entry, _ := trie.getEntry(path) if entry != nil { - key = common.Hex2Bytes(entry.Hash) + log.Debug("trie got entry", "key", manifestAddr, "path", path, "entry.Hash", entry.Hash) + // we need to do some extra work if this is a mutable resource manifest + if entry.ContentType == ResourceContentType { + + // get the resource root chunk key + log.Trace("resource type", "key", manifestAddr, "hash", entry.Hash) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + rsrc, err := a.resource.Load(storage.Address(common.FromHex(entry.Hash))) + if err != nil { + apiGetNotFound.Inc(1) + status = http.StatusNotFound + log.Debug(fmt.Sprintf("get resource content error: %v", err)) + return reader, mimeType, status, nil, err + } + + // use this key to retrieve the latest update + rsrc, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, &mru.LookupParams{}) + if err != nil { + apiGetNotFound.Inc(1) + status = http.StatusNotFound + log.Debug(fmt.Sprintf("get resource content error: %v", err)) + return reader, mimeType, status, nil, err + } + + // if it's multihash, we will transparently serve the content this multihash points to + // \TODO this resolve is rather expensive all in all, review to see if it can be achieved cheaper + if rsrc.Multihash { + + // get the data of the update + _, rsrcData, err := a.resource.GetContent(rsrc.NameHash().Hex()) + if err != nil { + apiGetNotFound.Inc(1) + status = http.StatusNotFound + log.Warn(fmt.Sprintf("get resource content error: %v", err)) + return reader, mimeType, status, nil, err + } + + // validate that data as multihash + decodedMultihash, err := multihash.FromMultihash(rsrcData) + if err != nil { + apiGetInvalid.Inc(1) + status = http.StatusUnprocessableEntity + log.Warn("invalid resource multihash", "err", err) + return reader, mimeType, status, nil, err + } + manifestAddr = storage.Address(decodedMultihash) + log.Trace("resource is multihash", "key", manifestAddr) + + // get the manifest the multihash digest points to + trie, err := loadManifest(a.fileStore, manifestAddr, nil) + if err != nil { + apiGetNotFound.Inc(1) + status = http.StatusNotFound + log.Warn(fmt.Sprintf("loadManifestTrie (resource multihash) error: %v", err)) + return reader, mimeType, status, nil, err + } + + // finally, get the manifest entry + // it will always be the entry on path "" + entry, _ = trie.getEntry(path) + if entry == nil { + status = http.StatusNotFound + apiGetNotFound.Inc(1) + err = fmt.Errorf("manifest (resource multihash) entry for '%s' not found", path) + log.Trace("manifest (resource multihash) entry not found", "key", manifestAddr, "path", path) + return reader, mimeType, status, nil, err + } + + } else { + // data is returned verbatim since it's not a multihash + return rsrc, "application/octet-stream", http.StatusOK, nil, nil + } + } + + // regardless of resource update manifests or normal manifests we will converge at this point + // get the key the manifest entry points to and serve it if it's unambiguous + contentAddr = common.Hex2Bytes(entry.Hash) status = entry.Status if status == http.StatusMultipleChoices { - apiGetHttp300.Inc(1) - return - } else { - mimeType = entry.ContentType - log.Trace(fmt.Sprintf("content lookup key: '%v' (%v)", key, mimeType)) - reader = self.dpa.Retrieve(key) + apiGetHTTP300.Inc(1) + return nil, entry.ContentType, status, contentAddr, err } + mimeType = entry.ContentType + log.Debug("content lookup key", "key", contentAddr, "mimetype", mimeType) + reader, _ = a.fileStore.Retrieve(contentAddr) } else { + // no entry found status = http.StatusNotFound apiGetNotFound.Inc(1) err = fmt.Errorf("manifest entry for '%s' not found", path) - log.Warn(fmt.Sprintf("%v", err)) + log.Trace("manifest entry not found", "key", contentAddr, "path", path) } return } -func (self *Api) Modify(key storage.Key, path, contentHash, contentType string) (storage.Key, error) { +// Modify loads manifest and checks the content hash before recalculating and storing the manifest. +func (a *API) Modify(addr storage.Address, path, contentHash, contentType string) (storage.Address, error) { apiModifyCount.Inc(1) quitC := make(chan bool) - trie, err := loadManifest(self.dpa, key, quitC) + trie, err := loadManifest(a.fileStore, addr, quitC) if err != nil { apiModifyFail.Inc(1) return nil, err @@ -288,10 +445,11 @@ func (self *Api) Modify(key storage.Key, path, contentHash, contentType string) apiModifyFail.Inc(1) return nil, err } - return trie.hash, nil + return trie.ref, nil } -func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Key, string, error) { +// AddFile creates a new manifest entry, adds it to swarm, then adds a file to swarm. +func (a *API) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Address, string, error) { apiAddFileCount.Inc(1) uri, err := Parse("bzz:/" + mhash) @@ -299,7 +457,7 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver apiAddFileFail.Inc(1) return nil, "", err } - mkey, err := self.Resolve(uri) + mkey, err := a.Resolve(uri) if err != nil { apiAddFileFail.Inc(1) return nil, "", err @@ -318,7 +476,7 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver ModTime: time.Now(), } - mw, err := self.NewManifestWriter(mkey, nil) + mw, err := a.NewManifestWriter(mkey, nil) if err != nil { apiAddFileFail.Inc(1) return nil, "", err @@ -341,7 +499,8 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver } -func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) { +// RemoveFile removes a file entry in a manifest. +func (a *API) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) { apiRmFileCount.Inc(1) uri, err := Parse("bzz:/" + mhash) @@ -349,7 +508,7 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin apiRmFileFail.Inc(1) return "", err } - mkey, err := self.Resolve(uri) + mkey, err := a.Resolve(uri) if err != nil { apiRmFileFail.Inc(1) return "", err @@ -360,7 +519,7 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin path = path[1:] } - mw, err := self.NewManifestWriter(mkey, nil) + mw, err := a.NewManifestWriter(mkey, nil) if err != nil { apiRmFileFail.Inc(1) return "", err @@ -382,7 +541,8 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin return newMkey.String(), nil } -func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldKey storage.Key, offset int64, addSize int64, nameresolver bool) (storage.Key, string, error) { +// AppendFile removes old manifest, appends file entry to new manifest and adds it to Swarm. +func (a *API) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldAddr storage.Address, offset int64, addSize int64, nameresolver bool) (storage.Address, string, error) { apiAppendFileCount.Inc(1) buffSize := offset + addSize @@ -392,7 +552,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte buf := make([]byte, buffSize) - oldReader := self.Retrieve(oldKey) + oldReader, _ := a.Retrieve(oldAddr) io.ReadAtLeast(oldReader, buf, int(offset)) newReader := bytes.NewReader(content) @@ -406,7 +566,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte totalSize := int64(len(buf)) // TODO(jmozah): to append using pyramid chunker when it is ready - //oldReader := self.Retrieve(oldKey) + //oldReader := a.Retrieve(oldKey) //newReader := bytes.NewReader(content) //combinedReader := io.MultiReader(oldReader, newReader) @@ -415,7 +575,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte apiAppendFileFail.Inc(1) return nil, "", err } - mkey, err := self.Resolve(uri) + mkey, err := a.Resolve(uri) if err != nil { apiAppendFileFail.Inc(1) return nil, "", err @@ -426,7 +586,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte path = path[1:] } - mw, err := self.NewManifestWriter(mkey, nil) + mw, err := a.NewManifestWriter(mkey, nil) if err != nil { apiAppendFileFail.Inc(1) return nil, "", err @@ -463,21 +623,22 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte } -func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storage.Key, manifestEntryMap map[string]*manifestTrieEntry, err error) { +// BuildDirectoryTree used by swarmfs_unix +func (a *API) BuildDirectoryTree(mhash string, nameresolver bool) (addr storage.Address, manifestEntryMap map[string]*manifestTrieEntry, err error) { uri, err := Parse("bzz:/" + mhash) if err != nil { return nil, nil, err } - key, err = self.Resolve(uri) + addr, err = a.Resolve(uri) if err != nil { return nil, nil, err } quitC := make(chan bool) - rootTrie, err := loadManifest(self.dpa, key, quitC) + rootTrie, err := loadManifest(a.fileStore, addr, quitC) if err != nil { - return nil, nil, fmt.Errorf("can't load manifest %v: %v", key.String(), err) + return nil, nil, fmt.Errorf("can't load manifest %v: %v", addr.String(), err) } manifestEntryMap = map[string]*manifestTrieEntry{} @@ -486,7 +647,94 @@ func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storag }) if err != nil { - return nil, nil, fmt.Errorf("list with prefix failed %v: %v", key.String(), err) + return nil, nil, fmt.Errorf("list with prefix failed %v: %v", addr.String(), err) + } + return addr, manifestEntryMap, nil +} + +// ResourceLookup Looks up mutable resource updates at specific periods and versions +func (a *API) ResourceLookup(ctx context.Context, addr storage.Address, period uint32, version uint32, maxLookup *mru.LookupParams) (string, []byte, error) { + var err error + rsrc, err := a.resource.Load(addr) + if err != nil { + return "", nil, err + } + if version != 0 { + if period == 0 { + return "", nil, mru.NewError(mru.ErrInvalidValue, "Period can't be 0") + } + _, err = a.resource.LookupVersion(ctx, rsrc.NameHash(), period, version, true, maxLookup) + } else if period != 0 { + _, err = a.resource.LookupHistorical(ctx, rsrc.NameHash(), period, true, maxLookup) + } else { + _, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, maxLookup) + } + if err != nil { + return "", nil, err + } + var data []byte + _, data, err = a.resource.GetContent(rsrc.NameHash().Hex()) + if err != nil { + return "", nil, err + } + return rsrc.Name(), data, nil +} + +// ResourceCreate creates Resource and returns its key +func (a *API) ResourceCreate(ctx context.Context, name string, frequency uint64) (storage.Address, error) { + key, _, err := a.resource.New(ctx, name, frequency) + if err != nil { + return nil, err + } + return key, nil +} + +// ResourceUpdateMultihash updates a Mutable Resource and marks the update's content to be of multihash type, which will be recognized upon retrieval. +// It will fail if the data is not a valid multihash. +func (a *API) ResourceUpdateMultihash(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) { + return a.resourceUpdate(ctx, name, data, true) +} + +// ResourceUpdate updates a Mutable Resource with arbitrary data. +// Upon retrieval the update will be retrieved verbatim as bytes. +func (a *API) ResourceUpdate(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) { + return a.resourceUpdate(ctx, name, data, false) +} + +func (a *API) resourceUpdate(ctx context.Context, name string, data []byte, multihash bool) (storage.Address, uint32, uint32, error) { + var addr storage.Address + var err error + if multihash { + addr, err = a.resource.UpdateMultihash(ctx, name, data) + } else { + addr, err = a.resource.Update(ctx, name, data) } - return key, manifestEntryMap, nil + period, _ := a.resource.GetLastPeriod(name) + version, _ := a.resource.GetVersion(name) + return addr, period, version, err +} + +// ResourceHashSize returned the size of the digest produced by the Mutable Resource hashing function +func (a *API) ResourceHashSize() int { + return a.resource.HashSize +} + +// ResourceIsValidated checks if the Mutable Resource has an active content validator. +func (a *API) ResourceIsValidated() bool { + return a.resource.IsValidated() +} + +// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk. +func (a *API) ResolveResourceManifest(addr storage.Address) (storage.Address, error) { + trie, err := loadManifest(a.fileStore, addr, nil) + if err != nil { + return nil, fmt.Errorf("cannot load resource manifest: %v", err) + } + + entry, _ := trie.getEntry("") + if entry.ContentType != ResourceContentType { + return nil, fmt.Errorf("not a resource manifest: %s", addr) + } + + return storage.Address(common.FromHex(entry.Hash)), nil } diff --git a/swarm/api/api_test.go b/swarm/api/api_test.go index 4ee26bd8ade25..e607dd4fc301c 100644 --- a/swarm/api/api_test.go +++ b/swarm/api/api_test.go @@ -17,33 +17,34 @@ package api import ( + "context" "errors" "fmt" "io" "io/ioutil" + "math/big" "os" "testing" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" ) -func testApi(t *testing.T, f func(*Api)) { +func testAPI(t *testing.T, f func(*API, bool)) { datadir, err := ioutil.TempDir("", "bzz-test") if err != nil { t.Fatalf("unable to create temp dir: %v", err) } - os.RemoveAll(datadir) defer os.RemoveAll(datadir) - dpa, err := storage.NewLocalDPA(datadir) + fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32)) if err != nil { return } - api := NewApi(dpa, nil) - dpa.Start() - f(api) - dpa.Stop() + api := NewAPI(fileStore, nil, nil) + f(api, false) + f(api, true) } type testResponse struct { @@ -82,10 +83,9 @@ func expResponse(content string, mimeType string, status int) *Response { return &Response{mimeType, status, int64(len(content)), content} } -// func testGet(t *testing.T, api *Api, bzzhash string) *testResponse { -func testGet(t *testing.T, api *Api, bzzhash, path string) *testResponse { - key := storage.Key(common.Hex2Bytes(bzzhash)) - reader, mimeType, status, err := api.Get(key, path) +func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse { + addr := storage.Address(common.Hex2Bytes(bzzhash)) + reader, mimeType, status, _, err := api.Get(addr, path) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -106,27 +106,28 @@ func testGet(t *testing.T, api *Api, bzzhash, path string) *testResponse { } func TestApiPut(t *testing.T) { - testApi(t, func(api *Api) { + testAPI(t, func(api *API, toEncrypt bool) { content := "hello" exp := expResponse(content, "text/plain", 0) // exp := expResponse([]byte(content), "text/plain", 0) - key, err := api.Put(content, exp.MimeType) + addr, wait, err := api.Put(content, exp.MimeType, toEncrypt) if err != nil { t.Fatalf("unexpected error: %v", err) } - resp := testGet(t, api, key.String(), "") + wait() + resp := testGet(t, api, addr.Hex(), "") checkResponse(t, resp, exp) }) } // testResolver implements the Resolver interface and either returns the given // hash if it is set, or returns a "name not found" error -type testResolver struct { +type testResolveValidator struct { hash *common.Hash } -func newTestResolver(addr string) *testResolver { - r := &testResolver{} +func newTestResolveValidator(addr string) *testResolveValidator { + r := &testResolveValidator{} if addr != "" { hash := common.HexToHash(addr) r.hash = &hash @@ -134,21 +135,28 @@ func newTestResolver(addr string) *testResolver { return r } -func (t *testResolver) Resolve(addr string) (common.Hash, error) { +func (t *testResolveValidator) Resolve(addr string) (common.Hash, error) { if t.hash == nil { return common.Hash{}, fmt.Errorf("DNS name not found: %q", addr) } return *t.hash, nil } +func (t *testResolveValidator) Owner(node [32]byte) (addr common.Address, err error) { + return +} +func (t *testResolveValidator) HeaderByNumber(context.Context, *big.Int) (header *types.Header, err error) { + return +} + // TestAPIResolve tests resolving URIs which can either contain content hashes // or ENS names func TestAPIResolve(t *testing.T) { ensAddr := "swarm.eth" hashAddr := "1111111111111111111111111111111111111111111111111111111111111111" resolvedAddr := "2222222222222222222222222222222222222222222222222222222222222222" - doesResolve := newTestResolver(resolvedAddr) - doesntResolve := newTestResolver("") + doesResolve := newTestResolveValidator(resolvedAddr) + doesntResolve := newTestResolveValidator("") type test struct { desc string @@ -213,7 +221,7 @@ func TestAPIResolve(t *testing.T) { } for _, x := range tests { t.Run(x.desc, func(t *testing.T) { - api := &Api{dns: x.dns} + api := &API{dns: x.dns} uri := &URI{Addr: x.addr, Scheme: "bzz"} if x.immutable { uri.Scheme = "bzz-immutable" @@ -239,15 +247,15 @@ func TestAPIResolve(t *testing.T) { } func TestMultiResolver(t *testing.T) { - doesntResolve := newTestResolver("") + doesntResolve := newTestResolveValidator("") ethAddr := "swarm.eth" ethHash := "0x2222222222222222222222222222222222222222222222222222222222222222" - ethResolve := newTestResolver(ethHash) + ethResolve := newTestResolveValidator(ethHash) testAddr := "swarm.test" testHash := "0x1111111111111111111111111111111111111111111111111111111111111111" - testResolve := newTestResolver(testHash) + testResolve := newTestResolveValidator(testHash) tests := []struct { desc string diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go index 8165d52d7ebf1..ef6222435fd47 100644 --- a/swarm/api/client/client.go +++ b/swarm/api/client/client.go @@ -30,6 +30,7 @@ import ( "net/textproto" "os" "path/filepath" + "regexp" "strconv" "strings" @@ -52,12 +53,17 @@ type Client struct { Gateway string } -// UploadRaw uploads raw data to swarm and returns the resulting hash -func (c *Client) UploadRaw(r io.Reader, size int64) (string, error) { +// UploadRaw uploads raw data to swarm and returns the resulting hash. If toEncrypt is true it +// uploads encrypted data +func (c *Client) UploadRaw(r io.Reader, size int64, toEncrypt bool) (string, error) { if size <= 0 { return "", errors.New("data size must be greater than zero") } - req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/", r) + addr := "" + if toEncrypt { + addr = "encrypt" + } + req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/"+addr, r) if err != nil { return "", err } @@ -77,18 +83,20 @@ func (c *Client) UploadRaw(r io.Reader, size int64) (string, error) { return string(data), nil } -// DownloadRaw downloads raw data from swarm -func (c *Client) DownloadRaw(hash string) (io.ReadCloser, error) { +// DownloadRaw downloads raw data from swarm and it returns a ReadCloser and a bool whether the +// content was encrypted +func (c *Client) DownloadRaw(hash string) (io.ReadCloser, bool, error) { uri := c.Gateway + "/bzz-raw:/" + hash res, err := http.DefaultClient.Get(uri) if err != nil { - return nil, err + return nil, false, err } if res.StatusCode != http.StatusOK { res.Body.Close() - return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status) + return nil, false, fmt.Errorf("unexpected HTTP status: %s", res.Status) } - return res.Body, nil + isEncrypted := (res.Header.Get("X-Decrypted") == "true") + return res.Body, isEncrypted, nil } // File represents a file in a swarm manifest and is used for uploading and @@ -125,11 +133,11 @@ func Open(path string) (*File, error) { // (if the manifest argument is non-empty) or creates a new manifest containing // the file, returning the resulting manifest hash (the file will then be // available at bzz://) -func (c *Client) Upload(file *File, manifest string) (string, error) { +func (c *Client) Upload(file *File, manifest string, toEncrypt bool) (string, error) { if file.Size <= 0 { return "", errors.New("file size must be greater than zero") } - return c.TarUpload(manifest, &FileUploader{file}) + return c.TarUpload(manifest, &FileUploader{file}, toEncrypt) } // Download downloads a file with the given path from the swarm manifest with @@ -159,14 +167,14 @@ func (c *Client) Download(hash, path string) (*File, error) { // directory will then be available at bzz://path/to/file), with // the file specified in defaultPath being uploaded to the root of the manifest // (i.e. bzz://) -func (c *Client) UploadDirectory(dir, defaultPath, manifest string) (string, error) { +func (c *Client) UploadDirectory(dir, defaultPath, manifest string, toEncrypt bool) (string, error) { stat, err := os.Stat(dir) if err != nil { return "", err } else if !stat.IsDir() { return "", fmt.Errorf("not a directory: %s", dir) } - return c.TarUpload(manifest, &DirectoryUploader{dir, defaultPath}) + return c.TarUpload(manifest, &DirectoryUploader{dir, defaultPath}, toEncrypt) } // DownloadDirectory downloads the files contained in a swarm manifest under @@ -228,27 +236,109 @@ func (c *Client) DownloadDirectory(hash, path, destDir string) error { } } +// DownloadFile downloads a single file into the destination directory +// if the manifest entry does not specify a file name - it will fallback +// to the hash of the file as a filename +func (c *Client) DownloadFile(hash, path, dest string) error { + hasDestinationFilename := false + if stat, err := os.Stat(dest); err == nil { + hasDestinationFilename = !stat.IsDir() + } else { + if os.IsNotExist(err) { + // does not exist - should be created + hasDestinationFilename = true + } else { + return fmt.Errorf("could not stat path: %v", err) + } + } + + manifestList, err := c.List(hash, path) + if err != nil { + return fmt.Errorf("could not list manifest: %v", err) + } + + switch len(manifestList.Entries) { + case 0: + return fmt.Errorf("could not find path requested at manifest address. make sure the path you've specified is correct") + case 1: + //continue + default: + return fmt.Errorf("got too many matches for this path") + } + + uri := c.Gateway + "/bzz:/" + hash + "/" + path + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected HTTP status: expected 200 OK, got %d", res.StatusCode) + } + filename := "" + if hasDestinationFilename { + filename = dest + } else { + // try to assert + re := regexp.MustCompile("[^/]+$") //everything after last slash + + if results := re.FindAllString(path, -1); len(results) > 0 { + filename = results[len(results)-1] + } else { + if entry := manifestList.Entries[0]; entry.Path != "" && entry.Path != "/" { + filename = entry.Path + } else { + // assume hash as name if there's nothing from the command line + filename = hash + } + } + filename = filepath.Join(dest, filename) + } + filePath, err := filepath.Abs(filename) + if err != nil { + return err + } + + if err := os.MkdirAll(filepath.Dir(filePath), 0777); err != nil { + return err + } + + dst, err := os.Create(filename) + if err != nil { + return err + } + defer dst.Close() + + _, err = io.Copy(dst, res.Body) + return err +} + // UploadManifest uploads the given manifest to swarm -func (c *Client) UploadManifest(m *api.Manifest) (string, error) { +func (c *Client) UploadManifest(m *api.Manifest, toEncrypt bool) (string, error) { data, err := json.Marshal(m) if err != nil { return "", err } - return c.UploadRaw(bytes.NewReader(data), int64(len(data))) + return c.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt) } // DownloadManifest downloads a swarm manifest -func (c *Client) DownloadManifest(hash string) (*api.Manifest, error) { - res, err := c.DownloadRaw(hash) +func (c *Client) DownloadManifest(hash string) (*api.Manifest, bool, error) { + res, isEncrypted, err := c.DownloadRaw(hash) if err != nil { - return nil, err + return nil, isEncrypted, err } defer res.Close() var manifest api.Manifest if err := json.NewDecoder(res).Decode(&manifest); err != nil { - return nil, err + return nil, isEncrypted, err } - return &manifest, nil + return &manifest, isEncrypted, nil } // List list files in a swarm manifest which have the given prefix, grouping @@ -350,10 +440,19 @@ type UploadFn func(file *File) error // TarUpload uses the given Uploader to upload files to swarm as a tar stream, // returning the resulting manifest hash -func (c *Client) TarUpload(hash string, uploader Uploader) (string, error) { +func (c *Client) TarUpload(hash string, uploader Uploader, toEncrypt bool) (string, error) { reqR, reqW := io.Pipe() defer reqR.Close() - req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+hash, reqR) + addr := hash + + // If there is a hash already (a manifest), then that manifest will determine if the upload has + // to be encrypted or not. If there is no manifest then the toEncrypt parameter decides if + // there is encryption or not. + if hash == "" && toEncrypt { + // This is the built-in address for the encrypted upload endpoint + addr = "encrypt" + } + req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+addr, reqR) if err != nil { return "", err } diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index c1d144e370e11..a878bff174bdd 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -26,28 +26,43 @@ import ( "testing" "github.com/ethereum/go-ethereum/swarm/api" + swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http" "github.com/ethereum/go-ethereum/swarm/testutil" ) +func serverFunc(api *api.API) testutil.TestServer { + return swarmhttp.NewServer(api) +} + // TestClientUploadDownloadRaw test uploading and downloading raw data to swarm func TestClientUploadDownloadRaw(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) + testClientUploadDownloadRaw(false, t) +} +func TestClientUploadDownloadRawEncrypted(t *testing.T) { + testClientUploadDownloadRaw(true, t) +} + +func testClientUploadDownloadRaw(toEncrypt bool, t *testing.T) { + srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() client := NewClient(srv.URL) // upload some raw data data := []byte("foo123") - hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data))) + hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt) if err != nil { t.Fatal(err) } // check we can download the same data - res, err := client.DownloadRaw(hash) + res, isEncrypted, err := client.DownloadRaw(hash) if err != nil { t.Fatal(err) } + if isEncrypted != toEncrypt { + t.Fatalf("Expected encyption status %v got %v", toEncrypt, isEncrypted) + } defer res.Close() gotData, err := ioutil.ReadAll(res) if err != nil { @@ -61,7 +76,15 @@ func TestClientUploadDownloadRaw(t *testing.T) { // TestClientUploadDownloadFiles test uploading and downloading files to swarm // manifests func TestClientUploadDownloadFiles(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) + testClientUploadDownloadFiles(false, t) +} + +func TestClientUploadDownloadFilesEncrypted(t *testing.T) { + testClientUploadDownloadFiles(true, t) +} + +func testClientUploadDownloadFiles(toEncrypt bool, t *testing.T) { + srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() client := NewClient(srv.URL) @@ -74,7 +97,7 @@ func TestClientUploadDownloadFiles(t *testing.T) { Size: int64(len(data)), }, } - hash, err := client.Upload(file, manifest) + hash, err := client.Upload(file, manifest, toEncrypt) if err != nil { t.Fatal(err) } @@ -159,7 +182,7 @@ func newTestDirectory(t *testing.T) string { // TestClientUploadDownloadDirectory tests uploading and downloading a // directory of files to a swarm manifest func TestClientUploadDownloadDirectory(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) + srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() dir := newTestDirectory(t) @@ -168,7 +191,7 @@ func TestClientUploadDownloadDirectory(t *testing.T) { // upload the directory client := NewClient(srv.URL) defaultPath := filepath.Join(dir, testDirFiles[0]) - hash, err := client.UploadDirectory(dir, defaultPath, "") + hash, err := client.UploadDirectory(dir, defaultPath, "", false) if err != nil { t.Fatalf("error uploading directory: %s", err) } @@ -217,14 +240,22 @@ func TestClientUploadDownloadDirectory(t *testing.T) { // TestClientFileList tests listing files in a swarm manifest func TestClientFileList(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) + testClientFileList(false, t) +} + +func TestClientFileListEncrypted(t *testing.T) { + testClientFileList(true, t) +} + +func testClientFileList(toEncrypt bool, t *testing.T) { + srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() dir := newTestDirectory(t) defer os.RemoveAll(dir) client := NewClient(srv.URL) - hash, err := client.UploadDirectory(dir, "", "") + hash, err := client.UploadDirectory(dir, "", "", toEncrypt) if err != nil { t.Fatalf("error uploading directory: %s", err) } @@ -275,7 +306,7 @@ func TestClientFileList(t *testing.T) { // TestClientMultipartUpload tests uploading files to swarm using a multipart // upload func TestClientMultipartUpload(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) + srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() // define an uploader which uploads testDirFiles with some data diff --git a/swarm/api/config.go b/swarm/api/config.go index 6b224140a4673..939285e09cf3d 100644 --- a/swarm/api/config.go +++ b/swarm/api/config.go @@ -21,13 +21,16 @@ import ( "fmt" "os" "path/filepath" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/contracts/ens" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/network" + "github.com/ethereum/go-ethereum/swarm/pss" "github.com/ethereum/go-ethereum/swarm/services/swap" "github.com/ethereum/go-ethereum/swarm/storage" ) @@ -41,47 +44,55 @@ const ( // allow several bzz nodes running in parallel type Config struct { // serialised/persisted fields - *storage.StoreParams - *storage.ChunkerParams + *storage.FileStoreParams + *storage.LocalStoreParams *network.HiveParams - Swap *swap.SwapParams - *network.SyncParams - Contract common.Address - EnsRoot common.Address - EnsAPIs []string - Path string - ListenAddr string - Port string - PublicKey string - BzzKey string - NetworkId uint64 - SwapEnabled bool - SyncEnabled bool - SwapApi string - Cors string - BzzAccount string - BootNodes string + Swap *swap.LocalProfile + Pss *pss.PssParams + //*network.SyncParams + Contract common.Address + EnsRoot common.Address + EnsAPIs []string + Path string + ListenAddr string + Port string + PublicKey string + BzzKey string + NodeID string + NetworkID uint64 + SwapEnabled bool + SyncEnabled bool + DeliverySkipCheck bool + SyncUpdateDelay time.Duration + SwapAPI string + Cors string + BzzAccount string + BootNodes string + privateKey *ecdsa.PrivateKey } //create a default config with all parameters to set to defaults -func NewDefaultConfig() (self *Config) { - - self = &Config{ - StoreParams: storage.NewDefaultStoreParams(), - ChunkerParams: storage.NewChunkerParams(), - HiveParams: network.NewDefaultHiveParams(), - SyncParams: network.NewDefaultSyncParams(), - Swap: swap.NewDefaultSwapParams(), - ListenAddr: DefaultHTTPListenAddr, - Port: DefaultHTTPPort, - Path: node.DefaultDataDir(), - EnsAPIs: nil, - EnsRoot: ens.TestNetAddress, - NetworkId: network.NetworkId, - SwapEnabled: false, - SyncEnabled: true, - SwapApi: "", - BootNodes: "", +func NewConfig() (c *Config) { + + c = &Config{ + LocalStoreParams: storage.NewDefaultLocalStoreParams(), + FileStoreParams: storage.NewFileStoreParams(), + HiveParams: network.NewHiveParams(), + //SyncParams: network.NewDefaultSyncParams(), + Swap: swap.NewDefaultSwapParams(), + Pss: pss.NewPssParams(), + ListenAddr: DefaultHTTPListenAddr, + Port: DefaultHTTPPort, + Path: node.DefaultDataDir(), + EnsAPIs: nil, + EnsRoot: ens.TestNetAddress, + NetworkID: network.DefaultNetworkID, + SwapEnabled: false, + SyncEnabled: true, + DeliverySkipCheck: false, + SyncUpdateDelay: 15 * time.Second, + SwapAPI: "", + BootNodes: "", } return @@ -89,11 +100,11 @@ func NewDefaultConfig() (self *Config) { //some config params need to be initialized after the complete //config building phase is completed (e.g. due to overriding flags) -func (self *Config) Init(prvKey *ecdsa.PrivateKey) { +func (c *Config) Init(prvKey *ecdsa.PrivateKey) { address := crypto.PubkeyToAddress(prvKey.PublicKey) - self.Path = filepath.Join(self.Path, "bzz-"+common.Bytes2Hex(address.Bytes())) - err := os.MkdirAll(self.Path, os.ModePerm) + c.Path = filepath.Join(c.Path, "bzz-"+common.Bytes2Hex(address.Bytes())) + err := os.MkdirAll(c.Path, os.ModePerm) if err != nil { log.Error(fmt.Sprintf("Error creating root swarm data directory: %v", err)) return @@ -103,11 +114,25 @@ func (self *Config) Init(prvKey *ecdsa.PrivateKey) { pubkeyhex := common.ToHex(pubkey) keyhex := crypto.Keccak256Hash(pubkey).Hex() - self.PublicKey = pubkeyhex - self.BzzKey = keyhex + c.PublicKey = pubkeyhex + c.BzzKey = keyhex + c.NodeID = discover.PubkeyID(&prvKey.PublicKey).String() + + if c.SwapEnabled { + c.Swap.Init(c.Contract, prvKey) + } + + c.privateKey = prvKey + c.LocalStoreParams.Init(c.Path) + c.LocalStoreParams.BaseKey = common.FromHex(keyhex) - self.Swap.Init(self.Contract, prvKey) - self.SyncParams.Init(self.Path) - self.HiveParams.Init(self.Path) - self.StoreParams.Init(self.Path) + c.Pss = c.Pss.WithPrivateKey(c.privateKey) +} + +func (c *Config) ShiftPrivateKey() (privKey *ecdsa.PrivateKey) { + if c.privateKey != nil { + privKey = c.privateKey + c.privateKey = nil + } + return privKey } diff --git a/swarm/api/config_test.go b/swarm/api/config_test.go index 5636b6dafba36..bd7e1d870500d 100644 --- a/swarm/api/config_test.go +++ b/swarm/api/config_test.go @@ -33,9 +33,10 @@ func TestConfig(t *testing.T) { t.Fatalf("failed to load private key: %v", err) } - one := NewDefaultConfig() - two := NewDefaultConfig() + one := NewConfig() + two := NewConfig() + one.LocalStoreParams = two.LocalStoreParams if equal := reflect.DeepEqual(one, two); !equal { t.Fatal("Two default configs are not equal") } @@ -49,21 +50,10 @@ func TestConfig(t *testing.T) { if one.PublicKey == "" { t.Fatal("Expected PublicKey to be set") } - - //the Init function should append subdirs to the given path - if one.Swap.PayProfile.Beneficiary == (common.Address{}) { + if one.Swap.PayProfile.Beneficiary == (common.Address{}) && one.SwapEnabled { t.Fatal("Failed to correctly initialize SwapParams") } - - if one.SyncParams.RequestDbPath == one.Path { - t.Fatal("Failed to correctly initialize SyncParams") - } - - if one.HiveParams.KadDbPath == one.Path { - t.Fatal("Failed to correctly initialize HiveParams") - } - - if one.StoreParams.ChunkDbPath == one.Path { + if one.ChunkDbPath == one.Path { t.Fatal("Failed to correctly initialize StoreParams") } } diff --git a/swarm/api/filesystem.go b/swarm/api/filesystem.go index f5dc90e2e5b50..297cbec79f422 100644 --- a/swarm/api/filesystem.go +++ b/swarm/api/filesystem.go @@ -27,26 +27,27 @@ import ( "sync" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" ) const maxParallelFiles = 5 type FileSystem struct { - api *Api + api *API } -func NewFileSystem(api *Api) *FileSystem { +func NewFileSystem(api *API) *FileSystem { return &FileSystem{api} } // Upload replicates a local directory as a manifest file and uploads it -// using dpa store +// using FileStore store +// This function waits the chunks to be stored. // TODO: localpath should point to a manifest // // DEPRECATED: Use the HTTP API instead -func (self *FileSystem) Upload(lpath, index string) (string, error) { +func (fs *FileSystem) Upload(lpath, index string, toEncrypt bool) (string, error) { var list []*manifestTrieEntry localpath, err := filepath.Abs(filepath.Clean(lpath)) if err != nil { @@ -111,13 +112,13 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) { f, err := os.Open(entry.Path) if err == nil { stat, _ := f.Stat() - var hash storage.Key - wg := &sync.WaitGroup{} - hash, err = self.api.dpa.Store(f, stat.Size(), wg, nil) + var hash storage.Address + var wait func() + hash, wait, err = fs.api.fileStore.Store(f, stat.Size(), toEncrypt) if hash != nil { - list[i].Hash = hash.String() + list[i].Hash = hash.Hex() } - wg.Wait() + wait() awg.Done() if err == nil { first512 := make([]byte, 512) @@ -142,7 +143,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) { } trie := &manifestTrie{ - dpa: self.api.dpa, + fileStore: fs.api.fileStore, } quitC := make(chan bool) for i, entry := range list { @@ -163,7 +164,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) { err2 := trie.recalcAndStore() var hs string if err2 == nil { - hs = trie.hash.String() + hs = trie.ref.Hex() } awg.Wait() return hs, err2 @@ -173,7 +174,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) { // under localpath // // DEPRECATED: Use the HTTP API instead -func (self *FileSystem) Download(bzzpath, localpath string) error { +func (fs *FileSystem) Download(bzzpath, localpath string) error { lpath, err := filepath.Abs(filepath.Clean(localpath)) if err != nil { return err @@ -188,7 +189,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { if err != nil { return err } - key, err := self.api.Resolve(uri) + addr, err := fs.api.Resolve(uri) if err != nil { return err } @@ -199,14 +200,14 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { } quitC := make(chan bool) - trie, err := loadManifest(self.api.dpa, key, quitC) + trie, err := loadManifest(fs.api.fileStore, addr, quitC) if err != nil { log.Warn(fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err)) return err } type downloadListEntry struct { - key storage.Key + addr storage.Address path string } @@ -217,7 +218,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { err = trie.listWithPrefix(path, quitC, func(entry *manifestTrieEntry, suffix string) { log.Trace(fmt.Sprintf("fs.Download: %#v", entry)) - key = common.Hex2Bytes(entry.Hash) + addr = common.Hex2Bytes(entry.Hash) path := lpath + "/" + suffix dir := filepath.Dir(path) if dir != prevPath { @@ -225,7 +226,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { prevPath = dir } if (mde == nil) && (path != dir+"/") { - list = append(list, &downloadListEntry{key: key, path: path}) + list = append(list, &downloadListEntry{addr: addr, path: path}) } }) if err != nil { @@ -244,7 +245,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { } go func(i int, entry *downloadListEntry) { defer wg.Done() - err := retrieveToFile(quitC, self.api.dpa, entry.key, entry.path) + err := retrieveToFile(quitC, fs.api.fileStore, entry.addr, entry.path) if err != nil { select { case errC <- err: @@ -267,12 +268,12 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { } } -func retrieveToFile(quitC chan bool, dpa *storage.DPA, key storage.Key, path string) error { +func retrieveToFile(quitC chan bool, fileStore *storage.FileStore, addr storage.Address, path string) error { f, err := os.Create(path) // TODO: basePath separators if err != nil { return err } - reader := dpa.Retrieve(key) + reader, _ := fileStore.Retrieve(addr) writer := bufio.NewWriter(f) size, err := reader.Size(quitC) if err != nil { diff --git a/swarm/api/filesystem_test.go b/swarm/api/filesystem_test.go index 8a15e735dcb00..915dc4e0b9e5a 100644 --- a/swarm/api/filesystem_test.go +++ b/swarm/api/filesystem_test.go @@ -21,7 +21,6 @@ import ( "io/ioutil" "os" "path/filepath" - "sync" "testing" "github.com/ethereum/go-ethereum/common" @@ -30,9 +29,9 @@ import ( var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test") -func testFileSystem(t *testing.T, f func(*FileSystem)) { - testApi(t, func(api *Api) { - f(NewFileSystem(api)) +func testFileSystem(t *testing.T, f func(*FileSystem, bool)) { + testAPI(t, func(api *API, toEncrypt bool) { + f(NewFileSystem(api), toEncrypt) }) } @@ -47,9 +46,9 @@ func readPath(t *testing.T, parts ...string) string { } func TestApiDirUpload0(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { + testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "") + bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -63,8 +62,8 @@ func TestApiDirUpload0(t *testing.T) { exp = expResponse(content, "text/css", 0) checkResponse(t, resp, exp) - key := storage.Key(common.Hex2Bytes(bzzhash)) - _, _, _, err = api.Get(key, "") + addr := storage.Address(common.Hex2Bytes(bzzhash)) + _, _, _, _, err = api.Get(addr, "") if err == nil { t.Fatalf("expected error: %v", err) } @@ -75,27 +74,28 @@ func TestApiDirUpload0(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - newbzzhash, err := fs.Upload(downloadDir, "") + newbzzhash, err := fs.Upload(downloadDir, "", toEncrypt) if err != nil { t.Fatalf("unexpected error: %v", err) } - if bzzhash != newbzzhash { + // TODO: currently the hash is not deterministic in the encrypted case + if !toEncrypt && bzzhash != newbzzhash { t.Fatalf("download %v reuploaded has incorrect hash, expected %v, got %v", downloadDir, bzzhash, newbzzhash) } }) } func TestApiDirUploadModify(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { + testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "") + bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt) if err != nil { t.Errorf("unexpected error: %v", err) return } - key := storage.Key(common.Hex2Bytes(bzzhash)) - key, err = api.Modify(key, "index.html", "", "") + addr := storage.Address(common.Hex2Bytes(bzzhash)) + addr, err = api.Modify(addr, "index.html", "", "") if err != nil { t.Errorf("unexpected error: %v", err) return @@ -105,24 +105,23 @@ func TestApiDirUploadModify(t *testing.T) { t.Errorf("unexpected error: %v", err) return } - wg := &sync.WaitGroup{} - hash, err := api.Store(bytes.NewReader(index), int64(len(index)), wg) - wg.Wait() + hash, wait, err := api.Store(bytes.NewReader(index), int64(len(index)), toEncrypt) + wait() if err != nil { t.Errorf("unexpected error: %v", err) return } - key, err = api.Modify(key, "index2.html", hash.Hex(), "text/html; charset=utf-8") + addr, err = api.Modify(addr, "index2.html", hash.Hex(), "text/html; charset=utf-8") if err != nil { t.Errorf("unexpected error: %v", err) return } - key, err = api.Modify(key, "img/logo.png", hash.Hex(), "text/html; charset=utf-8") + addr, err = api.Modify(addr, "img/logo.png", hash.Hex(), "text/html; charset=utf-8") if err != nil { t.Errorf("unexpected error: %v", err) return } - bzzhash = key.String() + bzzhash = addr.Hex() content := readPath(t, "testdata", "test0", "index.html") resp := testGet(t, api, bzzhash, "index2.html") @@ -138,7 +137,7 @@ func TestApiDirUploadModify(t *testing.T) { exp = expResponse(content, "text/css", 0) checkResponse(t, resp, exp) - _, _, _, err = api.Get(key, "") + _, _, _, _, err = api.Get(addr, "") if err == nil { t.Errorf("expected error: %v", err) } @@ -146,9 +145,9 @@ func TestApiDirUploadModify(t *testing.T) { } func TestApiDirUploadWithRootFile(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { + testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html") + bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html", toEncrypt) if err != nil { t.Errorf("unexpected error: %v", err) return @@ -162,9 +161,9 @@ func TestApiDirUploadWithRootFile(t *testing.T) { } func TestApiFileUpload(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { + testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "") + bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "", toEncrypt) if err != nil { t.Errorf("unexpected error: %v", err) return @@ -178,9 +177,9 @@ func TestApiFileUpload(t *testing.T) { } func TestApiFileUploadWithRootFile(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { + testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html") + bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html", toEncrypt) if err != nil { t.Errorf("unexpected error: %v", err) return diff --git a/swarm/api/http/error.go b/swarm/api/http/error.go index 9a65412cf9971..5fff7575e8a50 100644 --- a/swarm/api/http/error.go +++ b/swarm/api/http/error.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/swarm/api" + l "github.com/ethereum/go-ethereum/swarm/log" ) //templateMap holds a mapping of an HTTP error code to a template @@ -44,7 +45,7 @@ var ( ) //parameters needed for formatting the correct HTML page -type ErrorParams struct { +type ResponseParams struct { Msg string Code int Timestamp string @@ -113,45 +114,49 @@ func ValidateCaseErrors(r *Request) string { //For example, if the user requests bzz://read and that manifest contains entries //"readme.md" and "readinglist.txt", a HTML page is returned with this two links. //This only applies if the manifest has no default entry -func ShowMultipleChoices(w http.ResponseWriter, r *Request, list api.ManifestList) { +func ShowMultipleChoices(w http.ResponseWriter, req *Request, list api.ManifestList) { msg := "" if list.Entries == nil { - ShowError(w, r, "Could not resolve", http.StatusInternalServerError) + Respond(w, req, "Could not resolve", http.StatusInternalServerError) return } //make links relative //requestURI comes with the prefix of the ambiguous path, e.g. "read" for "readme.md" and "readinglist.txt" //to get clickable links, need to remove the ambiguous path, i.e. "read" - idx := strings.LastIndex(r.RequestURI, "/") + idx := strings.LastIndex(req.RequestURI, "/") if idx == -1 { - ShowError(w, r, "Internal Server Error", http.StatusInternalServerError) + Respond(w, req, "Internal Server Error", http.StatusInternalServerError) return } //remove ambiguous part - base := r.RequestURI[:idx+1] + base := req.RequestURI[:idx+1] for _, e := range list.Entries { //create clickable link for each entry msg += "" + e.Path + "
" } - respond(w, &r.Request, &ErrorParams{ - Code: http.StatusMultipleChoices, - Details: template.HTML(msg), - Timestamp: time.Now().Format(time.RFC1123), - template: getTemplate(http.StatusMultipleChoices), - }) + Respond(w, req, msg, http.StatusMultipleChoices) } -//ShowError is used to show an HTML error page to a client. +//Respond is used to show an HTML page to a client. //If there is an `Accept` header of `application/json`, JSON will be returned instead //The function just takes a string message which will be displayed in the error page. //The code is used to evaluate which template will be displayed //(and return the correct HTTP status code) -func ShowError(w http.ResponseWriter, r *Request, msg string, code int) { - additionalMessage := ValidateCaseErrors(r) - if code == http.StatusInternalServerError { - log.Error(msg) +func Respond(w http.ResponseWriter, req *Request, msg string, code int) { + additionalMessage := ValidateCaseErrors(req) + switch code { + case http.StatusInternalServerError: + log.Output(msg, log.LvlError, l.CallDepth, "ruid", req.ruid, "code", code) + default: + log.Output(msg, log.LvlDebug, l.CallDepth, "ruid", req.ruid, "code", code) + } + + if code >= 400 { + w.Header().Del("Cache-Control") //avoid sending cache headers for errors! + w.Header().Del("ETag") } - respond(w, &r.Request, &ErrorParams{ + + respond(w, &req.Request, &ResponseParams{ Code: code, Msg: msg, Details: template.HTML(additionalMessage), @@ -161,17 +166,17 @@ func ShowError(w http.ResponseWriter, r *Request, msg string, code int) { } //evaluate if client accepts html or json response -func respond(w http.ResponseWriter, r *http.Request, params *ErrorParams) { +func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { w.WriteHeader(params.Code) if r.Header.Get("Accept") == "application/json" { - respondJson(w, params) + respondJSON(w, params) } else { - respondHtml(w, params) + respondHTML(w, params) } } //return a HTML page -func respondHtml(w http.ResponseWriter, params *ErrorParams) { +func respondHTML(w http.ResponseWriter, params *ResponseParams) { htmlCounter.Inc(1) err := params.template.Execute(w, params) if err != nil { @@ -180,7 +185,7 @@ func respondHtml(w http.ResponseWriter, params *ErrorParams) { } //return JSON -func respondJson(w http.ResponseWriter, params *ErrorParams) { +func respondJSON(w http.ResponseWriter, params *ResponseParams) { jsonCounter.Inc(1) w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(params) @@ -190,7 +195,6 @@ func respondJson(w http.ResponseWriter, params *ErrorParams) { func getTemplate(code int) *template.Template { if val, tmpl := templateMap[code]; tmpl { return val - } else { - return templateMap[0] } + return templateMap[0] } diff --git a/swarm/api/http/error_templates.go b/swarm/api/http/error_templates.go index cc9b996ba4668..f3c643c90d39b 100644 --- a/swarm/api/http/error_templates.go +++ b/swarm/api/http/error_templates.go @@ -36,7 +36,6 @@ func GetGenericErrorPage() string { - Swarm::HTTP Disambiguation Page @@ -494,7 +505,7 @@ func GetMultipleChoicesErrorPage() string { -
+
@@ -513,21 +524,10 @@ func GetMultipleChoicesErrorPage() string { - - - - - - -
- Your request yields ambiguous results! + Your request may refer to {{ .Details}}.
- Your request may refer to: -
- {{ .Details}} -
Error code: @@ -543,16 +543,14 @@ func GetMultipleChoicesErrorPage() string {
+ -
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
+
+

+ Swarm: Serverless Hosting Incentivised peer-to-peer Storage and Content Distribution +

+
- -
diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index ba8b2b7ba9151..5897a1cb9df31 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -23,6 +23,7 @@ import ( "archive/tar" "bufio" "bytes" + "context" "encoding/json" "errors" "fmt" @@ -120,7 +121,7 @@ type Request struct { // HandlePostRaw handles a POST request to a raw bzz-raw:/ URI, stores the request // body in swarm and returns the resulting storage address as a text/plain response -func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { +func (s *Server) HandlePostRaw(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.post.raw", "ruid", r.ruid) postRawCount.Inc(1) @@ -147,7 +148,7 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { Respond(w, r, "missing Content-Length header in request", http.StatusBadRequest) return } - addr, _, err := s.api.Store(r.Body, r.ContentLength, toEncrypt) + addr, _, err := s.api.Store(ctx, r.Body, r.ContentLength, toEncrypt) if err != nil { postRawFail.Inc(1) Respond(w, r, err.Error(), http.StatusInternalServerError) @@ -166,7 +167,7 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { // (either a tar archive or multipart form), adds those files either to an // existing manifest or to a new manifest under and returns the // resulting manifest hash as a text/plain response -func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { +func (s *Server) HandlePostFiles(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.post.files", "ruid", r.ruid) postFilesCount.Inc(1) @@ -184,7 +185,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { var addr storage.Address if r.uri.Addr != "" && r.uri.Addr != "encrypt" { - addr, err = s.api.Resolve(r.uri) + addr, err = s.api.Resolve(ctx, r.uri) if err != nil { postFilesFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusInternalServerError) @@ -192,7 +193,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { } log.Debug("resolved key", "ruid", r.ruid, "key", addr) } else { - addr, err = s.api.NewManifest(toEncrypt) + addr, err = s.api.NewManifest(ctx, toEncrypt) if err != nil { postFilesFail.Inc(1) Respond(w, r, err.Error(), http.StatusInternalServerError) @@ -201,17 +202,17 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { log.Debug("new manifest", "ruid", r.ruid, "key", addr) } - newAddr, err := s.updateManifest(addr, func(mw *api.ManifestWriter) error { + newAddr, err := s.updateManifest(ctx, addr, func(mw *api.ManifestWriter) error { switch contentType { case "application/x-tar": - return s.handleTarUpload(r, mw) + return s.handleTarUpload(ctx, r, mw) case "multipart/form-data": - return s.handleMultipartUpload(r, params["boundary"], mw) + return s.handleMultipartUpload(ctx, r, params["boundary"], mw) default: - return s.handleDirectUpload(r, mw) + return s.handleDirectUpload(ctx, r, mw) } }) if err != nil { @@ -227,7 +228,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { fmt.Fprint(w, newAddr) } -func (s *Server) handleTarUpload(req *Request, mw *api.ManifestWriter) error { +func (s *Server) handleTarUpload(ctx context.Context, req *Request, mw *api.ManifestWriter) error { log.Debug("handle.tar.upload", "ruid", req.ruid) tr := tar.NewReader(req.Body) for { @@ -253,7 +254,7 @@ func (s *Server) handleTarUpload(req *Request, mw *api.ManifestWriter) error { ModTime: hdr.ModTime, } log.Debug("adding path to new manifest", "ruid", req.ruid, "bytes", entry.Size, "path", entry.Path) - contentKey, err := mw.AddEntry(tr, entry) + contentKey, err := mw.AddEntry(ctx, tr, entry) if err != nil { return fmt.Errorf("error adding manifest entry from tar stream: %s", err) } @@ -261,7 +262,7 @@ func (s *Server) handleTarUpload(req *Request, mw *api.ManifestWriter) error { } } -func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.ManifestWriter) error { +func (s *Server) handleMultipartUpload(ctx context.Context, req *Request, boundary string, mw *api.ManifestWriter) error { log.Debug("handle.multipart.upload", "ruid", req.ruid) mr := multipart.NewReader(req.Body, boundary) for { @@ -311,7 +312,7 @@ func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.Ma ModTime: time.Now(), } log.Debug("adding path to new manifest", "ruid", req.ruid, "bytes", entry.Size, "path", entry.Path) - contentKey, err := mw.AddEntry(reader, entry) + contentKey, err := mw.AddEntry(ctx, reader, entry) if err != nil { return fmt.Errorf("error adding manifest entry from multipart form: %s", err) } @@ -319,9 +320,9 @@ func (s *Server) handleMultipartUpload(req *Request, boundary string, mw *api.Ma } } -func (s *Server) handleDirectUpload(req *Request, mw *api.ManifestWriter) error { +func (s *Server) handleDirectUpload(ctx context.Context, req *Request, mw *api.ManifestWriter) error { log.Debug("handle.direct.upload", "ruid", req.ruid) - key, err := mw.AddEntry(req.Body, &api.ManifestEntry{ + key, err := mw.AddEntry(ctx, req.Body, &api.ManifestEntry{ Path: req.uri.Path, ContentType: req.Header.Get("Content-Type"), Mode: 0644, @@ -338,18 +339,18 @@ func (s *Server) handleDirectUpload(req *Request, mw *api.ManifestWriter) error // HandleDelete handles a DELETE request to bzz://, removes // from and returns the resulting manifest hash as a // text/plain response -func (s *Server) HandleDelete(w http.ResponseWriter, r *Request) { +func (s *Server) HandleDelete(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.delete", "ruid", r.ruid) deleteCount.Inc(1) - key, err := s.api.Resolve(r.uri) + key, err := s.api.Resolve(ctx, r.uri) if err != nil { deleteFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusInternalServerError) return } - newKey, err := s.updateManifest(key, func(mw *api.ManifestWriter) error { + newKey, err := s.updateManifest(ctx, key, func(mw *api.ManifestWriter) error { log.Debug(fmt.Sprintf("removing %s from manifest %s", r.uri.Path, key.Log()), "ruid", r.ruid) return mw.RemoveEntry(r.uri.Path) }) @@ -399,7 +400,7 @@ func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) { // The resource name will be verbatim what is passed as the address part of the url. // For example, if a POST is made to /bzz-resource:/foo.eth/raw/13 a new resource with frequency 13 // and name "foo.eth" will be created -func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { +func (s *Server) HandlePostResource(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.post.resource", "ruid", r.ruid) var err error var addr storage.Address @@ -428,7 +429,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { // we create a manifest so we can retrieve the resource with bzz:// later // this manifest has a special "resource type" manifest, and its hash is the key of the mutable resource // root chunk - m, err := s.api.NewResourceManifest(addr.Hex()) + m, err := s.api.NewResourceManifest(ctx, addr.Hex()) if err != nil { Respond(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) return @@ -448,7 +449,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { // that means that we retrieve the manifest and inspect its Hash member. manifestAddr := r.uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.uri) + manifestAddr, err = s.api.Resolve(ctx, r.uri) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -459,7 +460,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { } // get the root chunk key from the manifest - addr, err = s.api.ResolveResourceManifest(manifestAddr) + addr, err = s.api.ResolveResourceManifest(ctx, manifestAddr) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -518,19 +519,19 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { // bzz-resource:/// - get latest update on period n // bzz-resource://// - get update version m of period n // = ens name or hash -func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) { - s.handleGetResource(w, r) +func (s *Server) HandleGetResource(ctx context.Context, w http.ResponseWriter, r *Request) { + s.handleGetResource(ctx, w, r) } // TODO: Enable pass maxPeriod parameter -func (s *Server) handleGetResource(w http.ResponseWriter, r *Request) { +func (s *Server) handleGetResource(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.get.resource", "ruid", r.ruid) var err error // resolve the content key. manifestAddr := r.uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.uri) + manifestAddr, err = s.api.Resolve(ctx, r.uri) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -541,7 +542,7 @@ func (s *Server) handleGetResource(w http.ResponseWriter, r *Request) { } // get the root chunk key from the manifest - key, err := s.api.ResolveResourceManifest(manifestAddr) + key, err := s.api.ResolveResourceManifest(ctx, manifestAddr) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -623,13 +624,13 @@ func (s *Server) translateResourceError(w http.ResponseWriter, r *Request, supEr // given storage key // - bzz-hash:// and responds with the hash of the content stored // at the given storage key as a text/plain response -func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { +func (s *Server) HandleGet(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.get", "ruid", r.ruid, "uri", r.uri) getCount.Inc(1) var err error addr := r.uri.Address() if addr == nil { - addr, err = s.api.Resolve(r.uri) + addr, err = s.api.Resolve(ctx, r.uri) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -644,7 +645,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { // if path is set, interpret as a manifest and return the // raw entry at the given path if r.uri.Path != "" { - walker, err := s.api.NewManifestWalker(addr, nil) + walker, err := s.api.NewManifestWalker(ctx, addr, nil) if err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("%s is not a manifest", addr), http.StatusBadRequest) @@ -692,7 +693,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { } // check the root chunk exists by retrieving the file's size - reader, isEncrypted := s.api.Retrieve(addr) + reader, isEncrypted := s.api.Retrieve(ctx, addr) if _, err := reader.Size(nil); err != nil { getFail.Inc(1) Respond(w, r, fmt.Sprintf("root chunk not found %s: %s", addr, err), http.StatusNotFound) @@ -721,7 +722,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { // HandleGetFiles handles a GET request to bzz:/ with an Accept // header of "application/x-tar" and returns a tar stream of all files // contained in the manifest -func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { +func (s *Server) HandleGetFiles(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.get.files", "ruid", r.ruid, "uri", r.uri) getFilesCount.Inc(1) if r.uri.Path != "" { @@ -730,7 +731,7 @@ func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { return } - addr, err := s.api.Resolve(r.uri) + addr, err := s.api.Resolve(ctx, r.uri) if err != nil { getFilesFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -738,7 +739,7 @@ func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { } log.Debug("handle.get.files: resolved", "ruid", r.ruid, "key", addr) - walker, err := s.api.NewManifestWalker(addr, nil) + walker, err := s.api.NewManifestWalker(ctx, addr, nil) if err != nil { getFilesFail.Inc(1) Respond(w, r, err.Error(), http.StatusInternalServerError) @@ -757,7 +758,7 @@ func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { } // retrieve the entry's key and size - reader, isEncrypted := s.api.Retrieve(storage.Address(common.Hex2Bytes(entry.Hash))) + reader, isEncrypted := s.api.Retrieve(ctx, storage.Address(common.Hex2Bytes(entry.Hash))) size, err := reader.Size(nil) if err != nil { return err @@ -797,7 +798,7 @@ func (s *Server) HandleGetFiles(w http.ResponseWriter, r *Request) { // HandleGetList handles a GET request to bzz-list:// and returns // a list of all files contained in under grouped into // common prefixes using "/" as a delimiter -func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { +func (s *Server) HandleGetList(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.get.list", "ruid", r.ruid, "uri", r.uri) getListCount.Inc(1) // ensure the root path has a trailing slash so that relative URLs work @@ -806,7 +807,7 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { return } - addr, err := s.api.Resolve(r.uri) + addr, err := s.api.Resolve(ctx, r.uri) if err != nil { getListFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -814,7 +815,7 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { } log.Debug("handle.get.list: resolved", "ruid", r.ruid, "key", addr) - list, err := s.getManifestList(addr, r.uri.Path) + list, err := s.getManifestList(ctx, addr, r.uri.Path) if err != nil { getListFail.Inc(1) @@ -845,8 +846,8 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { json.NewEncoder(w).Encode(&list) } -func (s *Server) getManifestList(addr storage.Address, prefix string) (list api.ManifestList, err error) { - walker, err := s.api.NewManifestWalker(addr, nil) +func (s *Server) getManifestList(ctx context.Context, addr storage.Address, prefix string) (list api.ManifestList, err error) { + walker, err := s.api.NewManifestWalker(ctx, addr, nil) if err != nil { return } @@ -903,7 +904,7 @@ func (s *Server) getManifestList(addr storage.Address, prefix string) (list api. // HandleGetFile handles a GET request to bzz:/// and responds // with the content of the file at from the given -func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { +func (s *Server) HandleGetFile(ctx context.Context, w http.ResponseWriter, r *Request) { log.Debug("handle.get.file", "ruid", r.ruid) getFileCount.Inc(1) // ensure the root path has a trailing slash so that relative URLs work @@ -915,7 +916,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { manifestAddr := r.uri.Address() if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.uri) + manifestAddr, err = s.api.Resolve(ctx, r.uri) if err != nil { getFileFail.Inc(1) Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) @@ -927,7 +928,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { log.Debug("handle.get.file: resolved", "ruid", r.ruid, "key", manifestAddr) - reader, contentType, status, contentKey, err := s.api.Get(manifestAddr, r.uri.Path) + reader, contentType, status, contentKey, err := s.api.Get(ctx, manifestAddr, r.uri.Path) etag := common.Bytes2Hex(contentKey) noneMatchEtag := r.Header.Get("If-None-Match") @@ -954,7 +955,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { //the request results in ambiguous files //e.g. /read with readme.md and readinglist.txt available in manifest if status == http.StatusMultipleChoices { - list, err := s.getManifestList(manifestAddr, r.uri.Path) + list, err := s.getManifestList(ctx, manifestAddr, r.uri.Path) if err != nil { getFileFail.Inc(1) @@ -1011,6 +1012,8 @@ func (b bufferedReadSeeker) Seek(offset int64, whence int) (int64, error) { } func (s *Server) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + ctx := context.TODO() + defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).UpdateSince(time.Now()) req := &Request{Request: *r, ruid: uuid.New()[:8]} metrics.GetOrRegisterCounter(fmt.Sprintf("http.request.%s", r.Method), nil).Inc(1) @@ -1055,16 +1058,16 @@ func (s *Server) ServeHTTP(rw http.ResponseWriter, r *http.Request) { case "POST": if uri.Raw() { log.Debug("handlePostRaw") - s.HandlePostRaw(w, req) + s.HandlePostRaw(ctx, w, req) } else if uri.Resource() { log.Debug("handlePostResource") - s.HandlePostResource(w, req) + s.HandlePostResource(ctx, w, req) } else if uri.Immutable() || uri.List() || uri.Hash() { log.Debug("POST not allowed on immutable, list or hash") Respond(w, req, fmt.Sprintf("POST method on scheme %s not allowed", uri.Scheme), http.StatusMethodNotAllowed) } else { log.Debug("handlePostFiles") - s.HandlePostFiles(w, req) + s.HandlePostFiles(ctx, w, req) } case "PUT": @@ -1076,31 +1079,31 @@ func (s *Server) ServeHTTP(rw http.ResponseWriter, r *http.Request) { Respond(w, req, fmt.Sprintf("DELETE method to %s not allowed", uri), http.StatusBadRequest) return } - s.HandleDelete(w, req) + s.HandleDelete(ctx, w, req) case "GET": if uri.Resource() { - s.HandleGetResource(w, req) + s.HandleGetResource(ctx, w, req) return } if uri.Raw() || uri.Hash() { - s.HandleGet(w, req) + s.HandleGet(ctx, w, req) return } if uri.List() { - s.HandleGetList(w, req) + s.HandleGetList(ctx, w, req) return } if r.Header.Get("Accept") == "application/x-tar" { - s.HandleGetFiles(w, req) + s.HandleGetFiles(ctx, w, req) return } - s.HandleGetFile(w, req) + s.HandleGetFile(ctx, w, req) default: Respond(w, req, fmt.Sprintf("%s method is not supported", r.Method), http.StatusMethodNotAllowed) @@ -1109,8 +1112,8 @@ func (s *Server) ServeHTTP(rw http.ResponseWriter, r *http.Request) { log.Info("served response", "ruid", req.ruid, "code", w.statusCode) } -func (s *Server) updateManifest(addr storage.Address, update func(mw *api.ManifestWriter) error) (storage.Address, error) { - mw, err := s.api.NewManifestWriter(addr, nil) +func (s *Server) updateManifest(ctx context.Context, addr storage.Address, update func(mw *api.ManifestWriter) error) (storage.Address, error) { + mw, err := s.api.NewManifestWriter(ctx, addr, nil) if err != nil { return nil, err } diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 9fb21f7a357a0..bfbc0a79dbe76 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -18,6 +18,7 @@ package http import ( "bytes" + "context" "crypto/rand" "encoding/json" "errors" @@ -382,15 +383,19 @@ func testBzzGetPath(encrypted bool, t *testing.T) { for i, mf := range testmanifest { reader[i] = bytes.NewReader([]byte(mf)) - var wait func() - addr[i], wait, err = srv.FileStore.Store(reader[i], int64(len(mf)), encrypted) + var wait func(context.Context) error + ctx := context.TODO() + addr[i], wait, err = srv.FileStore.Store(ctx, reader[i], int64(len(mf)), encrypted) for j := i + 1; j < len(testmanifest); j++ { testmanifest[j] = strings.Replace(testmanifest[j], fmt.Sprintf("", i), addr[i].Hex(), -1) } if err != nil { t.Fatal(err) } - wait() + err = wait(ctx) + if err != nil { + t.Fatal(err) + } } rootRef := addr[2].Hex() diff --git a/swarm/api/http/templates.go b/swarm/api/http/templates.go index ffd8164930740..8897b96946049 100644 --- a/swarm/api/http/templates.go +++ b/swarm/api/http/templates.go @@ -79,20 +79,25 @@ var landingPageTemplate = template.Must(template.New("landingPage").Parse(` Swarm :: Welcome to Swarm - - - -
-
- -
-
-

Welcome to Swarm

-
-
- - - - -

Enter the hash or ENS of a Swarm-hosted file below:

- - - -
-
-

- Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution
- Swarm -

-
- + + +
+
+ +
+
+

Welcome to Swarm

+
+
+ + + + +

Enter the hash or ENS of a Swarm-hosted file below:

+
+ + +
+
+
+
+

+ Swarm: Serverless Hosting Incentivised peer-to-peer Storage and Content Distribution +

+
+ `[1:])) diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index 28597636eed67..78d1418bc27d5 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -18,6 +18,7 @@ package api import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -61,20 +62,20 @@ type ManifestList struct { } // NewManifest creates and stores a new, empty manifest -func (a *API) NewManifest(toEncrypt bool) (storage.Address, error) { +func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, error) { var manifest Manifest data, err := json.Marshal(&manifest) if err != nil { return nil, err } - key, wait, err := a.Store(bytes.NewReader(data), int64(len(data)), toEncrypt) - wait() + key, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), toEncrypt) + wait(ctx) return key, err } // Manifest hack for supporting Mutable Resource Updates from the bzz: scheme // see swarm/api/api.go:API.Get() for more information -func (a *API) NewResourceManifest(resourceAddr string) (storage.Address, error) { +func (a *API) NewResourceManifest(ctx context.Context, resourceAddr string) (storage.Address, error) { var manifest Manifest entry := ManifestEntry{ Hash: resourceAddr, @@ -85,7 +86,7 @@ func (a *API) NewResourceManifest(resourceAddr string) (storage.Address, error) if err != nil { return nil, err } - key, _, err := a.Store(bytes.NewReader(data), int64(len(data)), false) + key, _, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), false) return key, err } @@ -96,8 +97,8 @@ type ManifestWriter struct { quitC chan bool } -func (a *API) NewManifestWriter(addr storage.Address, quitC chan bool) (*ManifestWriter, error) { - trie, err := loadManifest(a.fileStore, addr, quitC) +func (a *API) NewManifestWriter(ctx context.Context, addr storage.Address, quitC chan bool) (*ManifestWriter, error) { + trie, err := loadManifest(ctx, a.fileStore, addr, quitC) if err != nil { return nil, fmt.Errorf("error loading manifest %s: %s", addr, err) } @@ -105,9 +106,8 @@ func (a *API) NewManifestWriter(addr storage.Address, quitC chan bool) (*Manifes } // AddEntry stores the given data and adds the resulting key to the manifest -func (m *ManifestWriter) AddEntry(data io.Reader, e *ManifestEntry) (storage.Address, error) { - - key, _, err := m.api.Store(data, e.Size, m.trie.encrypted) +func (m *ManifestWriter) AddEntry(ctx context.Context, data io.Reader, e *ManifestEntry) (storage.Address, error) { + key, _, err := m.api.Store(ctx, data, e.Size, m.trie.encrypted) if err != nil { return nil, err } @@ -136,8 +136,8 @@ type ManifestWalker struct { quitC chan bool } -func (a *API) NewManifestWalker(addr storage.Address, quitC chan bool) (*ManifestWalker, error) { - trie, err := loadManifest(a.fileStore, addr, quitC) +func (a *API) NewManifestWalker(ctx context.Context, addr storage.Address, quitC chan bool) (*ManifestWalker, error) { + trie, err := loadManifest(ctx, a.fileStore, addr, quitC) if err != nil { return nil, fmt.Errorf("error loading manifest %s: %s", addr, err) } @@ -204,10 +204,10 @@ type manifestTrieEntry struct { subtrie *manifestTrie } -func loadManifest(fileStore *storage.FileStore, hash storage.Address, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand +func loadManifest(ctx context.Context, fileStore *storage.FileStore, hash storage.Address, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand log.Trace("manifest lookup", "key", hash) // retrieve manifest via FileStore - manifestReader, isEncrypted := fileStore.Retrieve(hash) + manifestReader, isEncrypted := fileStore.Retrieve(ctx, hash) log.Trace("reader retrieved", "key", hash) return readManifest(manifestReader, hash, fileStore, isEncrypted, quitC) } @@ -382,8 +382,12 @@ func (mt *manifestTrie) recalcAndStore() error { } sr := bytes.NewReader(manifest) - key, wait, err2 := mt.fileStore.Store(sr, int64(len(manifest)), mt.encrypted) - wait() + ctx := context.TODO() + key, wait, err2 := mt.fileStore.Store(ctx, sr, int64(len(manifest)), mt.encrypted) + if err2 != nil { + return err2 + } + err2 = wait(ctx) mt.ref = key return err2 } @@ -391,7 +395,7 @@ func (mt *manifestTrie) recalcAndStore() error { func (mt *manifestTrie) loadSubTrie(entry *manifestTrieEntry, quitC chan bool) (err error) { if entry.subtrie == nil { hash := common.Hex2Bytes(entry.Hash) - entry.subtrie, err = loadManifest(mt.fileStore, hash, quitC) + entry.subtrie, err = loadManifest(context.TODO(), mt.fileStore, hash, quitC) entry.Hash = "" // might not match, should be recalculated } return diff --git a/swarm/api/storage.go b/swarm/api/storage.go index 6ab4af6c4b200..8646dc41f841c 100644 --- a/swarm/api/storage.go +++ b/swarm/api/storage.go @@ -17,6 +17,7 @@ package api import ( + "context" "path" "github.com/ethereum/go-ethereum/swarm/storage" @@ -45,8 +46,8 @@ func NewStorage(api *API) *Storage { // its content type // // DEPRECATED: Use the HTTP API instead -func (s *Storage) Put(content, contentType string, toEncrypt bool) (storage.Address, func(), error) { - return s.api.Put(content, contentType, toEncrypt) +func (s *Storage) Put(ctx context.Context, content string, contentType string, toEncrypt bool) (storage.Address, func(context.Context) error, error) { + return s.api.Put(ctx, content, contentType, toEncrypt) } // Get retrieves the content from bzzpath and reads the response in full @@ -57,16 +58,16 @@ func (s *Storage) Put(content, contentType string, toEncrypt bool) (storage.Addr // size is resp.Size // // DEPRECATED: Use the HTTP API instead -func (s *Storage) Get(bzzpath string) (*Response, error) { +func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) { uri, err := Parse(path.Join("bzz:/", bzzpath)) if err != nil { return nil, err } - addr, err := s.api.Resolve(uri) + addr, err := s.api.Resolve(ctx, uri) if err != nil { return nil, err } - reader, mimeType, status, _, err := s.api.Get(addr, uri.Path) + reader, mimeType, status, _, err := s.api.Get(ctx, addr, uri.Path) if err != nil { return nil, err } @@ -87,16 +88,16 @@ func (s *Storage) Get(bzzpath string) (*Response, error) { // and merge on to it. creating an entry w conentType (mime) // // DEPRECATED: Use the HTTP API instead -func (s *Storage) Modify(rootHash, path, contentHash, contentType string) (newRootHash string, err error) { +func (s *Storage) Modify(ctx context.Context, rootHash, path, contentHash, contentType string) (newRootHash string, err error) { uri, err := Parse("bzz:/" + rootHash) if err != nil { return "", err } - addr, err := s.api.Resolve(uri) + addr, err := s.api.Resolve(ctx, uri) if err != nil { return "", err } - addr, err = s.api.Modify(addr, path, contentHash, contentType) + addr, err = s.api.Modify(ctx, addr, path, contentHash, contentType) if err != nil { return "", err } diff --git a/swarm/api/storage_test.go b/swarm/api/storage_test.go index 9d23e8f136993..ef96972b68a61 100644 --- a/swarm/api/storage_test.go +++ b/swarm/api/storage_test.go @@ -17,6 +17,7 @@ package api import ( + "context" "testing" ) @@ -31,18 +32,22 @@ func TestStoragePutGet(t *testing.T) { content := "hello" exp := expResponse(content, "text/plain", 0) // exp := expResponse([]byte(content), "text/plain", 0) - bzzkey, wait, err := api.Put(content, exp.MimeType, toEncrypt) + ctx := context.TODO() + bzzkey, wait, err := api.Put(ctx, content, exp.MimeType, toEncrypt) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + err = wait(ctx) if err != nil { t.Fatalf("unexpected error: %v", err) } - wait() bzzhash := bzzkey.Hex() // to check put against the API#Get resp0 := testGet(t, api.api, bzzhash, "") checkResponse(t, resp0, exp) // check storage#Get - resp, err := api.Get(bzzhash) + resp, err := api.Get(context.TODO(), bzzhash) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/swarm/bmt/bmt.go b/swarm/bmt/bmt.go index 71aee24955aa3..835587020c393 100644 --- a/swarm/bmt/bmt.go +++ b/swarm/bmt/bmt.go @@ -117,10 +117,7 @@ func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool { zerohashes[0] = zeros h := hasher() for i := 1; i < depth; i++ { - h.Reset() - h.Write(zeros) - h.Write(zeros) - zeros = h.Sum(nil) + zeros = doHash(h, nil, zeros, zeros) zerohashes[i] = zeros } return &TreePool{ @@ -318,41 +315,19 @@ func (h *Hasher) Sum(b []byte) (r []byte) { // * if sequential write is used (can read sections) func (h *Hasher) sum(b []byte, release, section bool) (r []byte) { t := h.bmt - h.finalise(section) - if t.offset > 0 { // get the last node (double segment) - - // padding the segment with zero - copy(t.segment[t.offset:], h.pool.zerohashes[0]) - } - if section { - if t.cur%2 == 1 { - // if just finished current segment, copy it to the right half of the chunk - copy(t.section[h.pool.SegmentSize:], t.segment) - } else { - // copy segment to front of section, zero pad the right half - copy(t.section, t.segment) - copy(t.section[h.pool.SegmentSize:], h.pool.zerohashes[0]) - } - h.writeSection(t.cur, t.section) - } else { - // TODO: h.writeSegment(t.cur, t.segment) - panic("SegmentWriter not implemented") - } + bh := h.pool.hasher() + go h.writeSection(t.cur, t.section, true) bmtHash := <-t.result span := t.span - + // fmt.Println(t.draw(bmtHash)) if release { h.releaseTree() } - // sha3(span + BMT(pure_chunk)) + // b + sha3(span + BMT(pure_chunk)) if span == nil { - return bmtHash + return append(b, bmtHash...) } - bh := h.pool.hasher() - bh.Reset() - bh.Write(span) - bh.Write(bmtHash) - return bh.Sum(b) + return doHash(bh, b, span, bmtHash) } // Hasher implements the SwarmHash interface @@ -367,37 +342,41 @@ func (h *Hasher) Write(b []byte) (int, error) { return 0, nil } t := h.bmt - need := (h.pool.SegmentCount - t.cur) * h.pool.SegmentSize - if l < need { - need = l - } - // calculate missing bit to complete current open segment - rest := h.pool.SegmentSize - t.offset - if need < rest { - rest = need - } - copy(t.segment[t.offset:], b[:rest]) - need -= rest - size := (t.offset + rest) % h.pool.SegmentSize - // read full segments and the last possibly partial segment - for need > 0 { - // push all finished chunks we read - if t.cur%2 == 0 { - copy(t.section, t.segment) - } else { - copy(t.section[h.pool.SegmentSize:], t.segment) - h.writeSection(t.cur, t.section) + secsize := 2 * h.pool.SegmentSize + // calculate length of missing bit to complete current open section + smax := secsize - t.offset + // if at the beginning of chunk or middle of the section + if t.offset < secsize { + // fill up current segment from buffer + copy(t.section[t.offset:], b) + // if input buffer consumed and open section not complete, then + // advance offset and return + if smax == 0 { + smax = secsize + } + if l <= smax { + t.offset += l + return l, nil } - size = h.pool.SegmentSize - if need < size { - size = need + } else { + if t.cur == h.pool.SegmentCount*2 { + return 0, nil } - copy(t.segment, b[rest:rest+size]) - need -= size - rest += size + } + // read full segments and the last possibly partial segment from the input buffer + for smax < l { + // section complete; push to tree asynchronously + go h.writeSection(t.cur, t.section, false) + // reset section + t.section = make([]byte, secsize) + // copy from imput buffer at smax to right half of section + copy(t.section, b[smax:]) + // advance cursor t.cur++ + // smax here represents successive offsets in the input buffer + smax += secsize } - t.offset = size % h.pool.SegmentSize + t.offset = l - smax + secsize return l, nil } @@ -426,6 +405,8 @@ func (h *Hasher) releaseTree() { t.span = nil t.hash = nil h.bmt = nil + t.section = make([]byte, h.pool.SegmentSize*2) + t.segment = make([]byte, h.pool.SegmentSize) h.pool.release(t) } } @@ -435,29 +416,37 @@ func (h *Hasher) releaseTree() { // go h.run(h.bmt.leaves[i/2], h.pool.hasher(), i%2 == 0, s) // } -// writeSection writes the hash of i/2-th segction into right level 1 node of the BMT tree -func (h *Hasher) writeSection(i int, section []byte) { - n := h.bmt.leaves[i/2] +// writeSection writes the hash of i-th section into level 1 node of the BMT tree +func (h *Hasher) writeSection(i int, section []byte, final bool) { + // select the leaf node for the section + n := h.bmt.leaves[i] isLeft := n.isLeft n = n.parent bh := h.pool.hasher() - bh.Write(section) - go func() { - sum := bh.Sum(nil) - if n == nil { - h.bmt.result <- sum - return - } - h.run(n, bh, isLeft, sum) - }() + // hash the section + s := doHash(bh, nil, section) + // write hash into parent node + if final { + // for the last segment use writeFinalNode + h.writeFinalNode(1, n, bh, isLeft, s) + } else { + h.writeNode(n, bh, isLeft, s) + } } -// run pushes the data to the node +// writeNode pushes the data to the node // if it is the first of 2 sisters written the routine returns // if it is the second, it calculates the hash and writes it // to the parent node recursively -func (h *Hasher) run(n *node, bh hash.Hash, isLeft bool, s []byte) { +func (h *Hasher) writeNode(n *node, bh hash.Hash, isLeft bool, s []byte) { + level := 1 for { + // at the root of the bmt just write the result to the result channel + if n == nil { + h.bmt.result <- s + return + } + // otherwise assign child hash to branc if isLeft { n.left = s } else { @@ -467,44 +456,68 @@ func (h *Hasher) run(n *node, bh hash.Hash, isLeft bool, s []byte) { if n.toggle() { return } - // the second thread now can be sure both left and right children are written - // it calculates the hash of left|right and take it to the next level - bh.Reset() - bh.Write(n.left) - bh.Write(n.right) - s = bh.Sum(nil) - - // at the root of the bmt just write the result to the result channel - if n.parent == nil { - h.bmt.result <- s - return - } - - // otherwise iterate on parent + // the thread coming later now can be sure both left and right children are written + // it calculates the hash of left|right and pushes it to the parent + s = doHash(bh, nil, n.left, n.right) isLeft = n.isLeft n = n.parent + level++ } } -// finalise is following the path starting from the final datasegment to the +// writeFinalNode is following the path starting from the final datasegment to the // BMT root via parents // for unbalanced trees it fills in the missing right sister nodes using // the pool's lookup table for BMT subtree root hashes for all-zero sections -func (h *Hasher) finalise(skip bool) { - t := h.bmt - isLeft := t.cur%2 == 0 - n := t.leaves[t.cur/2] - for level := 0; n != nil; level++ { - // when the final segment's path is going via left child node - // we include an all-zero subtree hash for the right level and toggle the node. - // when the path is going through right child node, nothing to do - if isLeft && !skip { +// otherwise behaves like `writeNode` +func (h *Hasher) writeFinalNode(level int, n *node, bh hash.Hash, isLeft bool, s []byte) { + + for { + // at the root of the bmt just write the result to the result channel + if n == nil { + if s != nil { + h.bmt.result <- s + } + return + } + var noHash bool + if isLeft { + // coming from left sister branch + // when the final section's path is going via left child node + // we include an all-zero subtree hash for the right level and toggle the node. + // when the path is going through right child node, nothing to do n.right = h.pool.zerohashes[level] - n.toggle() + if s != nil { + n.left = s + // if a left final node carries a hash, it must be the first (and only thread) + // so the toggle is already in passive state no need no call + // yet thread needs to carry on pushing hash to parent + } else { + // if again first thread then propagate nil and calculate no hash + noHash = n.toggle() + } + } else { + // right sister branch + // if s is nil, then thread arrived first at previous node and here there will be two, + // so no need to do anything + if s != nil { + n.right = s + noHash = n.toggle() + } else { + noHash = true + } + } + // the child-thread first arriving will just continue resetting s to nil + // the second thread now can be sure both left and right children are written + // it calculates the hash of left|right and pushes it to the parent + if noHash { + s = nil + } else { + s = doHash(bh, nil, n.left, n.right) } - skip = false isLeft = n.isLeft n = n.parent + level++ } } @@ -525,6 +538,15 @@ func (n *node) toggle() bool { return atomic.AddInt32(&n.state, 1)%2 == 1 } +// calculates the hash of the data using hash.Hash +func doHash(h hash.Hash, b []byte, data ...[]byte) []byte { + h.Reset() + for _, v := range data { + h.Write(v) + } + return h.Sum(b) +} + func hashstr(b []byte) string { end := len(b) if end > 4 { diff --git a/swarm/bmt/bmt_r.go b/swarm/bmt/bmt_r.go index c61d2dc73212f..0cb6c146f5d72 100644 --- a/swarm/bmt/bmt_r.go +++ b/swarm/bmt/bmt_r.go @@ -80,6 +80,5 @@ func (rh *RefHasher) hash(data []byte, length int) []byte { } rh.hasher.Reset() rh.hasher.Write(section) - s := rh.hasher.Sum(nil) - return s + return rh.hasher.Sum(nil) } diff --git a/swarm/bmt/bmt_test.go b/swarm/bmt/bmt_test.go index e074d90e73dd8..ae40eadab8a05 100644 --- a/swarm/bmt/bmt_test.go +++ b/swarm/bmt/bmt_test.go @@ -34,12 +34,12 @@ import ( // the actual data length generated (could be longer than max datalength of the BMT) const BufferSize = 4128 +var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65, 111, 127, 128} + +// calculates the Keccak256 SHA3 hash of the data func sha3hash(data ...[]byte) []byte { h := sha3.NewKeccak256() - for _, v := range data { - h.Write(v) - } - return h.Sum(nil) + return doHash(h, nil, data...) } // TestRefHasher tests that the RefHasher computes the expected BMT hash for @@ -129,31 +129,48 @@ func TestRefHasher(t *testing.T) { } } -func TestHasherCorrectness(t *testing.T) { - err := testHasher(testBaseHasher) - if err != nil { - t.Fatal(err) +// tests if hasher responds with correct hash +func TestHasherEmptyData(t *testing.T) { + hasher := sha3.NewKeccak256 + var data []byte + for _, count := range counts { + t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) { + pool := NewTreePool(hasher, count, PoolSize) + defer pool.Drain(0) + bmt := New(pool) + rbmt := NewRefHasher(hasher, count) + refHash := rbmt.Hash(data) + expHash := Hash(bmt, nil, data) + if !bytes.Equal(expHash, refHash) { + t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash) + } + }) } } -func testHasher(f func(BaseHasherFunc, []byte, int, int) error) error { +func TestHasherCorrectness(t *testing.T) { data := newData(BufferSize) hasher := sha3.NewKeccak256 size := hasher().Size() - counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128} var err error for _, count := range counts { - max := count * size - incr := 1 - for n := 1; n <= max; n += incr { - err = f(hasher, data, n, count) - if err != nil { - return err + t.Run(fmt.Sprintf("segments_%v", count), func(t *testing.T) { + max := count * size + incr := 1 + capacity := 1 + pool := NewTreePool(hasher, count, capacity) + defer pool.Drain(0) + for n := 0; n <= max; n += incr { + incr = 1 + rand.Intn(5) + bmt := New(pool) + err = testHasherCorrectness(bmt, hasher, data, n, count) + if err != nil { + t.Fatal(err) + } } - } + }) } - return nil } // Tests that the BMT hasher can be synchronously reused with poolsizes 1 and PoolSize @@ -215,12 +232,69 @@ LOOP: } } -// helper function that creates a tree pool -func testBaseHasher(hasher BaseHasherFunc, d []byte, n, count int) error { - pool := NewTreePool(hasher, count, 1) - defer pool.Drain(0) - bmt := New(pool) - return testHasherCorrectness(bmt, hasher, d, n, count) +// Tests BMT Hasher io.Writer interface is working correctly +// even multiple short random write buffers +func TestBMTHasherWriterBuffers(t *testing.T) { + hasher := sha3.NewKeccak256 + + for _, count := range counts { + t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) { + errc := make(chan error) + pool := NewTreePool(hasher, count, PoolSize) + defer pool.Drain(0) + n := count * 32 + bmt := New(pool) + data := newData(n) + rbmt := NewRefHasher(hasher, count) + refHash := rbmt.Hash(data) + expHash := Hash(bmt, nil, data) + if !bytes.Equal(expHash, refHash) { + t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash) + } + attempts := 10 + f := func() error { + bmt := New(pool) + bmt.Reset() + var buflen int + for offset := 0; offset < n; offset += buflen { + buflen = rand.Intn(n-offset) + 1 + read, err := bmt.Write(data[offset : offset+buflen]) + if err != nil { + return err + } + if read != buflen { + return fmt.Errorf("incorrect read. expected %v bytes, got %v", buflen, read) + } + } + hash := bmt.Sum(nil) + if !bytes.Equal(hash, expHash) { + return fmt.Errorf("hash mismatch. expected %x, got %x", hash, expHash) + } + return nil + } + + for j := 0; j < attempts; j++ { + go func() { + errc <- f() + }() + } + timeout := time.NewTimer(2 * time.Second) + for { + select { + case err := <-errc: + if err != nil { + t.Fatal(err) + } + attempts-- + if attempts == 0 { + return + } + case <-timeout.C: + t.Fatalf("timeout") + } + } + }) + } } // helper function that compares reference and optimised implementations on diff --git a/swarm/fuse/fuse_file.go b/swarm/fuse/fuse_file.go index 80c26fe05fcdd..be3b01c8c4766 100644 --- a/swarm/fuse/fuse_file.go +++ b/swarm/fuse/fuse_file.go @@ -84,7 +84,7 @@ func (sf *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error { a.Gid = uint32(os.Getegid()) if sf.fileSize == -1 { - reader, _ := sf.mountInfo.swarmApi.Retrieve(sf.addr) + reader, _ := sf.mountInfo.swarmApi.Retrieve(ctx, sf.addr) quitC := make(chan bool) size, err := reader.Size(quitC) if err != nil { @@ -104,7 +104,7 @@ func (sf *SwarmFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse sf.lock.RLock() defer sf.lock.RUnlock() if sf.reader == nil { - sf.reader, _ = sf.mountInfo.swarmApi.Retrieve(sf.addr) + sf.reader, _ = sf.mountInfo.swarmApi.Retrieve(ctx, sf.addr) } buf := make([]byte, req.Size) n, err := sf.reader.ReadAt(buf, req.Offset) diff --git a/swarm/fuse/swarmfs_test.go b/swarm/fuse/swarmfs_test.go index ed2021c4e0ee6..d579d15a02b83 100644 --- a/swarm/fuse/swarmfs_test.go +++ b/swarm/fuse/swarmfs_test.go @@ -20,6 +20,7 @@ package fuse import ( "bytes" + "context" "crypto/rand" "flag" "fmt" @@ -110,7 +111,7 @@ func createTestFilesAndUploadToSwarm(t *testing.T, api *api.API, files map[strin } //upload directory to swarm and return hash - bzzhash, err := api.Upload(uploadDir, "", toEncrypt) + bzzhash, err := api.Upload(context.TODO(), uploadDir, "", toEncrypt) if err != nil { t.Fatalf("Error uploading directory %v: %vm encryption: %v", uploadDir, err, toEncrypt) } diff --git a/swarm/fuse/swarmfs_unix.go b/swarm/fuse/swarmfs_unix.go index 74dd84a90351d..7a913b0dee7d4 100644 --- a/swarm/fuse/swarmfs_unix.go +++ b/swarm/fuse/swarmfs_unix.go @@ -19,6 +19,7 @@ package fuse import ( + "context" "errors" "fmt" "os" @@ -104,7 +105,7 @@ func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) { } log.Trace("swarmfs mount: getting manifest tree") - _, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(mhash, true) + _, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(context.TODO(), mhash, true) if err != nil { return nil, err } diff --git a/swarm/fuse/swarmfs_util.go b/swarm/fuse/swarmfs_util.go index 9bbb0f6ac0e7d..4f2e1416b6151 100644 --- a/swarm/fuse/swarmfs_util.go +++ b/swarm/fuse/swarmfs_util.go @@ -47,7 +47,7 @@ func externalUnmount(mountPoint string) error { } func addFileToSwarm(sf *SwarmFile, content []byte, size int) error { - fkey, mhash, err := sf.mountInfo.swarmApi.AddFile(sf.mountInfo.LatestManifest, sf.path, sf.name, content, true) + fkey, mhash, err := sf.mountInfo.swarmApi.AddFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, content, true) if err != nil { return err } @@ -66,7 +66,7 @@ func addFileToSwarm(sf *SwarmFile, content []byte, size int) error { } func removeFileFromSwarm(sf *SwarmFile) error { - mkey, err := sf.mountInfo.swarmApi.RemoveFile(sf.mountInfo.LatestManifest, sf.path, sf.name, true) + mkey, err := sf.mountInfo.swarmApi.RemoveFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, true) if err != nil { return err } @@ -102,7 +102,7 @@ func removeDirectoryFromSwarm(sd *SwarmDir) error { } func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, length int64) error { - fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.addr, offset, length, true) + fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.addr, offset, length, true) if err != nil { return err } diff --git a/swarm/metrics/flags.go b/swarm/metrics/flags.go index 795fc402ff088..79490fd360123 100644 --- a/swarm/metrics/flags.go +++ b/swarm/metrics/flags.go @@ -81,6 +81,9 @@ func Setup(ctx *cli.Context) { hosttag = ctx.GlobalString(metricsInfluxDBHostTagFlag.Name) ) + // Start system runtime metrics collection + go gethmetrics.CollectProcessMetrics(2 * time.Second) + if enableExport { log.Info("Enabling swarm metrics export to InfluxDB") go influxdb.InfluxDBWithTags(gethmetrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "swarm.", map[string]string{ diff --git a/swarm/network/networkid_test.go b/swarm/network/networkid_test.go new file mode 100644 index 0000000000000..05134b083b167 --- /dev/null +++ b/swarm/network/networkid_test.go @@ -0,0 +1,266 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package network + +import ( + "bytes" + "context" + "flag" + "fmt" + "math/rand" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" + "github.com/ethereum/go-ethereum/rpc" +) + +var ( + currentNetworkID int + cnt int + nodeMap map[int][]discover.NodeID + kademlias map[discover.NodeID]*Kademlia +) + +const ( + NumberOfNets = 4 + MaxTimeout = 6 +) + +func init() { + flag.Parse() + rand.Seed(time.Now().Unix()) +} + +/* +Run the network ID test. +The test creates one simulations.Network instance, +a number of nodes, then connects nodes with each other in this network. + +Each node gets a network ID assigned according to the number of networks. +Having more network IDs is just arbitrary in order to exclude +false positives. + +Nodes should only connect with other nodes with the same network ID. +After the setup phase, the test checks on each node if it has the +expected node connections (excluding those not sharing the network ID). +*/ +func TestNetworkID(t *testing.T) { + log.Debug("Start test") + //arbitrarily set the number of nodes. It could be any number + numNodes := 24 + //the nodeMap maps all nodes (slice value) with the same network ID (key) + nodeMap = make(map[int][]discover.NodeID) + //set up the network and connect nodes + net, err := setupNetwork(numNodes) + if err != nil { + t.Fatalf("Error setting up network: %v", err) + } + defer func() { + //shutdown the snapshot network + log.Trace("Shutting down network") + net.Shutdown() + }() + //let's sleep to ensure all nodes are connected + time.Sleep(1 * time.Second) + //for each group sharing the same network ID... + for _, netIDGroup := range nodeMap { + log.Trace("netIDGroup size", "size", len(netIDGroup)) + //...check that their size of the kademlia is of the expected size + //the assumption is that it should be the size of the group minus 1 (the node itself) + for _, node := range netIDGroup { + if kademlias[node].addrs.Size() != len(netIDGroup)-1 { + t.Fatalf("Kademlia size has not expected peer size. Kademlia size: %d, expected size: %d", kademlias[node].addrs.Size(), len(netIDGroup)-1) + } + kademlias[node].EachAddr(nil, 0, func(addr OverlayAddr, _ int, _ bool) bool { + found := false + for _, nd := range netIDGroup { + p := ToOverlayAddr(nd.Bytes()) + if bytes.Equal(p, addr.Address()) { + found = true + } + } + if !found { + t.Fatalf("Expected node not found for node %s", node.String()) + } + return true + }) + } + } + log.Info("Test terminated successfully") +} + +// setup simulated network with bzz/discovery and pss services. +// connects nodes in a circle +// if allowRaw is set, omission of builtin pss encryption is enabled (see PssParams) +func setupNetwork(numnodes int) (net *simulations.Network, err error) { + log.Debug("Setting up network") + quitC := make(chan struct{}) + errc := make(chan error) + nodes := make([]*simulations.Node, numnodes) + if numnodes < 16 { + return nil, fmt.Errorf("Minimum sixteen nodes in network") + } + adapter := adapters.NewSimAdapter(newServices()) + //create the network + net = simulations.NewNetwork(adapter, &simulations.NetworkConfig{ + ID: "NetworkIdTestNet", + DefaultService: "bzz", + }) + log.Debug("Creating networks and nodes") + + var connCount int + + //create nodes and connect them to each other + for i := 0; i < numnodes; i++ { + log.Trace("iteration: ", "i", i) + nodeconf := adapters.RandomNodeConfig() + nodes[i], err = net.NewNodeWithConfig(nodeconf) + if err != nil { + return nil, fmt.Errorf("error creating node %d: %v", i, err) + } + err = net.Start(nodes[i].ID()) + if err != nil { + return nil, fmt.Errorf("error starting node %d: %v", i, err) + } + client, err := nodes[i].Client() + if err != nil { + return nil, fmt.Errorf("create node %d rpc client fail: %v", i, err) + } + //now setup and start event watching in order to know when we can upload + ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout*time.Second) + defer watchCancel() + watchSubscriptionEvents(ctx, nodes[i].ID(), client, errc, quitC) + //on every iteration we connect to all previous ones + for k := i - 1; k >= 0; k-- { + connCount++ + log.Debug(fmt.Sprintf("Connecting node %d with node %d; connection count is %d", i, k, connCount)) + err = net.Connect(nodes[i].ID(), nodes[k].ID()) + if err != nil { + if !strings.Contains(err.Error(), "already connected") { + return nil, fmt.Errorf("error connecting nodes: %v", err) + } + } + } + } + //now wait until the number of expected subscriptions has been finished + //`watchSubscriptionEvents` will write with a `nil` value to errc + for err := range errc { + if err != nil { + return nil, err + } + //`nil` received, decrement count + connCount-- + log.Trace("count down", "cnt", connCount) + //all subscriptions received + if connCount == 0 { + close(quitC) + break + } + } + log.Debug("Network setup phase terminated") + return net, nil +} + +func newServices() adapters.Services { + kademlias = make(map[discover.NodeID]*Kademlia) + kademlia := func(id discover.NodeID) *Kademlia { + if k, ok := kademlias[id]; ok { + return k + } + addr := NewAddrFromNodeID(id) + params := NewKadParams() + params.MinProxBinSize = 2 + params.MaxBinSize = 3 + params.MinBinSize = 1 + params.MaxRetries = 1000 + params.RetryExponent = 2 + params.RetryInterval = 1000000 + kademlias[id] = NewKademlia(addr.Over(), params) + return kademlias[id] + } + return adapters.Services{ + "bzz": func(ctx *adapters.ServiceContext) (node.Service, error) { + addr := NewAddrFromNodeID(ctx.Config.ID) + hp := NewHiveParams() + hp.Discovery = false + cnt++ + //assign the network ID + currentNetworkID = cnt % NumberOfNets + if ok := nodeMap[currentNetworkID]; ok == nil { + nodeMap[currentNetworkID] = make([]discover.NodeID, 0) + } + //add this node to the group sharing the same network ID + nodeMap[currentNetworkID] = append(nodeMap[currentNetworkID], ctx.Config.ID) + log.Debug("current network ID:", "id", currentNetworkID) + config := &BzzConfig{ + OverlayAddr: addr.Over(), + UnderlayAddr: addr.Under(), + HiveParams: hp, + NetworkID: uint64(currentNetworkID), + } + return NewBzz(config, kademlia(ctx.Config.ID), nil, nil, nil), nil + }, + } +} + +func watchSubscriptionEvents(ctx context.Context, id discover.NodeID, client *rpc.Client, errc chan error, quitC chan struct{}) { + events := make(chan *p2p.PeerEvent) + sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents") + if err != nil { + log.Error(err.Error()) + errc <- fmt.Errorf("error getting peer events for node %v: %s", id, err) + return + } + go func() { + defer func() { + sub.Unsubscribe() + log.Trace("watch subscription events: unsubscribe", "id", id) + }() + + for { + select { + case <-quitC: + return + case <-ctx.Done(): + select { + case errc <- ctx.Err(): + case <-quitC: + } + return + case e := <-events: + if e.Type == p2p.PeerEventTypeAdd { + errc <- nil + } + case err := <-sub.Err(): + if err != nil { + select { + case errc <- fmt.Errorf("error getting peer events for node %v: %v", id, err): + case <-quitC: + } + return + } + } + } + }() +} diff --git a/swarm/network/stream/common_test.go b/swarm/network/stream/common_test.go index 9d1f997f29fbd..6a2c27401f86a 100644 --- a/swarm/network/stream/common_test.go +++ b/swarm/network/stream/common_test.go @@ -250,7 +250,7 @@ func (r *TestRegistry) APIs() []rpc.API { } func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) { - r, _ := fileStore.Retrieve(hash) + r, _ := fileStore.Retrieve(context.TODO(), hash) buf := make([]byte, 1024) var n int var total int64 diff --git a/swarm/network/stream/delivery_test.go b/swarm/network/stream/delivery_test.go index b03028c888167..cd87557b184de 100644 --- a/swarm/network/stream/delivery_test.go +++ b/swarm/network/stream/delivery_test.go @@ -345,9 +345,13 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck // here we distribute chunks of a random file into Stores of nodes 1 to nodes rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams()) size := chunkCount * chunkSize - fileHash, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) + ctx := context.TODO() + fileHash, wait, err := rrFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) // wait until all chunks stored - wait() + if err != nil { + t.Fatal(err.Error()) + } + err = wait(ctx) if err != nil { t.Fatal(err.Error()) } @@ -627,9 +631,13 @@ Loop: hashes := make([]storage.Address, chunkCount) for i := 0; i < chunkCount; i++ { // create actual size real chunks - hash, wait, err := remoteFileStore.Store(io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false) + ctx := context.TODO() + hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false) + if err != nil { + b.Fatalf("expected no error. got %v", err) + } // wait until all chunks stored - wait() + err = wait(ctx) if err != nil { b.Fatalf("expected no error. got %v", err) } diff --git a/swarm/network/stream/intervals_test.go b/swarm/network/stream/intervals_test.go index 4e2721cb0faad..d996cdc7e5330 100644 --- a/swarm/network/stream/intervals_test.go +++ b/swarm/network/stream/intervals_test.go @@ -117,8 +117,12 @@ func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) { fileStore := storage.NewFileStore(sim.Stores[0], storage.NewFileStoreParams()) size := chunkCount * chunkSize - _, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) - wait() + ctx := context.TODO() + _, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) + if err != nil { + t.Fatal(err) + } + err = wait(ctx) if err != nil { t.Fatal(err) } diff --git a/swarm/network/stream/snapshot_retrieval_test.go b/swarm/network/stream/snapshot_retrieval_test.go index 59c776c3027ca..da5253e8af10c 100644 --- a/swarm/network/stream/snapshot_retrieval_test.go +++ b/swarm/network/stream/snapshot_retrieval_test.go @@ -410,7 +410,7 @@ func runFileRetrievalTest(nodeCount int) error { fileStore := registries[id].fileStore //check all chunks for i, hash := range conf.hashes { - reader, _ := fileStore.Retrieve(hash) + reader, _ := fileStore.Retrieve(context.TODO(), hash) //check that we can read the file size and that it corresponds to the generated file size if s, err := reader.Size(nil); err != nil || s != int64(len(randomFiles[i])) { allSuccess = false @@ -697,7 +697,7 @@ func runRetrievalTest(chunkCount int, nodeCount int) error { fileStore := registries[id].fileStore //check all chunks for _, chnk := range conf.hashes { - reader, _ := fileStore.Retrieve(chnk) + reader, _ := fileStore.Retrieve(context.TODO(), chnk) //assuming that reading the Size of the chunk is enough to know we found it if s, err := reader.Size(nil); err != nil || s != chunkSize { allSuccess = false @@ -765,9 +765,13 @@ func uploadFilesToNodes(nodes []*simulations.Node) ([]storage.Address, []string, return nil, nil, err } //store it (upload it) on the FileStore - rk, wait, err := fileStore.Store(strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false) + ctx := context.TODO() + rk, wait, err := fileStore.Store(ctx, strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false) log.Debug("Uploaded random string file to node") - wait() + if err != nil { + return nil, nil, err + } + err = wait(ctx) if err != nil { return nil, nil, err } diff --git a/swarm/network/stream/snapshot_sync_test.go b/swarm/network/stream/snapshot_sync_test.go index ff1c39319d174..fd8863d435e48 100644 --- a/swarm/network/stream/snapshot_sync_test.go +++ b/swarm/network/stream/snapshot_sync_test.go @@ -581,8 +581,12 @@ func uploadFileToSingleNodeStore(id discover.NodeID, chunkCount int) ([]storage. fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams()) var rootAddrs []storage.Address for i := 0; i < chunkCount; i++ { - rk, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) - wait() + ctx := context.TODO() + rk, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) + if err != nil { + return nil, err + } + err = wait(ctx) if err != nil { return nil, err } diff --git a/swarm/network/stream/syncer_test.go b/swarm/network/stream/syncer_test.go index 68e20841dfb82..5fea7befe3ce6 100644 --- a/swarm/network/stream/syncer_test.go +++ b/swarm/network/stream/syncer_test.go @@ -202,9 +202,12 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck // here we distribute chunks of a random file into stores 1...nodes rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams()) size := chunkCount * chunkSize - _, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) + _, wait, err := rrFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false) + if err != nil { + t.Fatal(err.Error()) + } // need to wait cos we then immediately collect the relevant bin content - wait() + wait(ctx) if err != nil { t.Fatal(err.Error()) } diff --git a/swarm/network_test.go b/swarm/network_test.go index c291fce3b60a2..606a83be22132 100644 --- a/swarm/network_test.go +++ b/swarm/network_test.go @@ -508,14 +508,15 @@ func uploadFile(swarm *Swarm) (storage.Address, string, error) { // File data is very short, but it is ensured that its // uniqueness is very certain. data := fmt.Sprintf("test content %s %x", time.Now().Round(0), b) - k, wait, err := swarm.api.Put(data, "text/plain", false) + ctx := context.TODO() + k, wait, err := swarm.api.Put(ctx, data, "text/plain", false) if err != nil { return nil, "", err } if wait != nil { - wait() + err = wait(ctx) } - return k, data, nil + return k, data, err } // retrieve is the function that is used for checking the availability of @@ -570,7 +571,7 @@ func retrieve( log.Debug("api get: check file", "node", id.String(), "key", f.addr.String(), "total files found", atomic.LoadUint64(totalFoundCount)) - r, _, _, _, err := swarm.api.Get(f.addr, "/") + r, _, _, _, err := swarm.api.Get(context.TODO(), f.addr, "/") if err != nil { errc <- fmt.Errorf("api get: node %s, key %s, kademlia %s: %v", id, f.addr, swarm.bzz.Hive, err) return diff --git a/swarm/pss/handshake.go b/swarm/pss/handshake.go index 3b44847ecc1c6..e3ead77d04925 100644 --- a/swarm/pss/handshake.go +++ b/swarm/pss/handshake.go @@ -385,7 +385,7 @@ func (ctl *HandshakeController) sendKey(pubkeyid string, topic *Topic, keycount // generate new keys to send for i := 0; i < len(recvkeyids); i++ { var err error - recvkeyids[i], err = ctl.pss.generateSymmetricKey(*topic, to, true) + recvkeyids[i], err = ctl.pss.GenerateSymmetricKey(*topic, to, true) if err != nil { return []string{}, fmt.Errorf("set receive symkey fail (pubkey %x topic %x): %v", pubkeyid, topic, err) } diff --git a/swarm/pss/notify/notify.go b/swarm/pss/notify/notify.go new file mode 100644 index 0000000000000..723092c32d2bd --- /dev/null +++ b/swarm/pss/notify/notify.go @@ -0,0 +1,394 @@ +package notify + +import ( + "crypto/ecdsa" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/pss" +) + +const ( + // sent from requester to updater to request start of notifications + MsgCodeStart = iota + + // sent from updater to requester, contains a notification plus a new symkey to replace the old + MsgCodeNotifyWithKey + + // sent from updater to requester, contains a notification + MsgCodeNotify + + // sent from requester to updater to request stop of notifications (currently unused) + MsgCodeStop + MsgCodeMax +) + +const ( + DefaultAddressLength = 1 + symKeyLength = 32 // this should be gotten from source +) + +var ( + // control topic is used before symmetric key issuance completes + controlTopic = pss.Topic{0x00, 0x00, 0x00, 0x01} +) + +// when code is MsgCodeStart, Payload is address +// when code is MsgCodeNotifyWithKey, Payload is notification | symkey +// when code is MsgCodeNotify, Payload is notification +// when code is MsgCodeStop, Payload is address +type Msg struct { + Code byte + Name []byte + Payload []byte + namestring string +} + +// NewMsg creates a new notification message object +func NewMsg(code byte, name string, payload []byte) *Msg { + return &Msg{ + Code: code, + Name: []byte(name), + Payload: payload, + namestring: name, + } +} + +// NewMsgFromPayload decodes a serialized message payload into a new notification message object +func NewMsgFromPayload(payload []byte) (*Msg, error) { + msg := &Msg{} + err := rlp.DecodeBytes(payload, msg) + if err != nil { + return nil, err + } + msg.namestring = string(msg.Name) + return msg, nil +} + +// a notifier has one sendBin entry for each address space it sends messages to +type sendBin struct { + address pss.PssAddress + symKeyId string + count int +} + +// represents a single notification service +// only subscription address bins that match the address of a notification client have entries. +type notifier struct { + bins map[string]*sendBin + topic pss.Topic // identifies the resource for pss receiver + threshold int // amount of address bytes used in bins + updateC <-chan []byte + quitC chan struct{} +} + +func (n *notifier) removeSubscription() { + n.quitC <- struct{}{} +} + +// represents an individual subscription made by a public key at a specific address/neighborhood +type subscription struct { + pubkeyId string + address pss.PssAddress + handler func(string, []byte) error +} + +// Controller is the interface to control, add and remove notification services and subscriptions +type Controller struct { + pss *pss.Pss + notifiers map[string]*notifier + subscriptions map[string]*subscription + mu sync.Mutex +} + +// NewController creates a new Controller object +func NewController(ps *pss.Pss) *Controller { + ctrl := &Controller{ + pss: ps, + notifiers: make(map[string]*notifier), + subscriptions: make(map[string]*subscription), + } + ctrl.pss.Register(&controlTopic, ctrl.Handler) + return ctrl +} + +// IsActive is used to check if a notification service exists for a specified id string +// Returns true if exists, false if not +func (c *Controller) IsActive(name string) bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.isActive(name) +} + +func (c *Controller) isActive(name string) bool { + _, ok := c.notifiers[name] + return ok +} + +// Subscribe is used by a client to request notifications from a notification service provider +// It will create a MsgCodeStart message and send asymmetrically to the provider using its public key and routing address +// The handler function is a callback that will be called when notifications are received +// Fails if the request pss cannot be sent or if the update message could not be serialized +func (c *Controller) Subscribe(name string, pubkey *ecdsa.PublicKey, address pss.PssAddress, handler func(string, []byte) error) error { + c.mu.Lock() + defer c.mu.Unlock() + msg := NewMsg(MsgCodeStart, name, c.pss.BaseAddr()) + c.pss.SetPeerPublicKey(pubkey, controlTopic, &address) + pubkeyId := hexutil.Encode(crypto.FromECDSAPub(pubkey)) + smsg, err := rlp.EncodeToBytes(msg) + if err != nil { + return err + } + err = c.pss.SendAsym(pubkeyId, controlTopic, smsg) + if err != nil { + return err + } + c.subscriptions[name] = &subscription{ + pubkeyId: pubkeyId, + address: address, + handler: handler, + } + return nil +} + +// Unsubscribe, perhaps unsurprisingly, undoes the effects of Subscribe +// Fails if the subscription does not exist, if the request pss cannot be sent or if the update message could not be serialized +func (c *Controller) Unsubscribe(name string) error { + c.mu.Lock() + defer c.mu.Unlock() + sub, ok := c.subscriptions[name] + if !ok { + return fmt.Errorf("Unknown subscription '%s'", name) + } + msg := NewMsg(MsgCodeStop, name, sub.address) + smsg, err := rlp.EncodeToBytes(msg) + if err != nil { + return err + } + err = c.pss.SendAsym(sub.pubkeyId, controlTopic, smsg) + if err != nil { + return err + } + delete(c.subscriptions, name) + return nil +} + +// NewNotifier is used by a notification service provider to create a new notification service +// It takes a name as identifier for the resource, a threshold indicating the granularity of the subscription address bin +// It then starts an event loop which listens to the supplied update channel and executes notifications on channel receives +// Fails if a notifier already is registered on the name +//func (c *Controller) NewNotifier(name string, threshold int, contentFunc func(string) ([]byte, error)) error { +func (c *Controller) NewNotifier(name string, threshold int, updateC <-chan []byte) (func(), error) { + c.mu.Lock() + if c.isActive(name) { + c.mu.Unlock() + return nil, fmt.Errorf("Notification service %s already exists in controller", name) + } + quitC := make(chan struct{}) + c.notifiers[name] = ¬ifier{ + bins: make(map[string]*sendBin), + topic: pss.BytesToTopic([]byte(name)), + threshold: threshold, + updateC: updateC, + quitC: quitC, + //contentFunc: contentFunc, + } + c.mu.Unlock() + go func() { + for { + select { + case <-quitC: + return + case data := <-updateC: + c.notify(name, data) + } + } + }() + + return c.notifiers[name].removeSubscription, nil +} + +// RemoveNotifier is used to stop a notification service. +// It cancels the event loop listening to the notification provider's update channel +func (c *Controller) RemoveNotifier(name string) error { + c.mu.Lock() + defer c.mu.Unlock() + currentNotifier, ok := c.notifiers[name] + if !ok { + return fmt.Errorf("Unknown notification service %s", name) + } + currentNotifier.removeSubscription() + delete(c.notifiers, name) + return nil +} + +// Notify is called by a notification service provider to issue a new notification +// It takes the name of the notification service and the data to be sent. +// It fails if a notifier with this name does not exist or if data could not be serialized +// Note that it does NOT fail on failure to send a message +func (c *Controller) notify(name string, data []byte) error { + c.mu.Lock() + defer c.mu.Unlock() + if !c.isActive(name) { + return fmt.Errorf("Notification service %s doesn't exist", name) + } + msg := NewMsg(MsgCodeNotify, name, data) + smsg, err := rlp.EncodeToBytes(msg) + if err != nil { + return err + } + for _, m := range c.notifiers[name].bins { + log.Debug("sending pss notify", "name", name, "addr", fmt.Sprintf("%x", m.address), "topic", fmt.Sprintf("%x", c.notifiers[name].topic), "data", data) + go func(m *sendBin) { + err = c.pss.SendSym(m.symKeyId, c.notifiers[name].topic, smsg) + if err != nil { + log.Warn("Failed to send notify to addr %x: %v", m.address, err) + } + }(m) + } + return nil +} + +// check if we already have the bin +// if we do, retrieve the symkey from it and increment the count +// if we dont make a new symkey and a new bin entry +func (c *Controller) addToBin(ntfr *notifier, address []byte) (symKeyId string, pssAddress pss.PssAddress, err error) { + + // parse the address from the message and truncate if longer than our bins threshold + if len(address) > ntfr.threshold { + address = address[:ntfr.threshold] + } + + pssAddress = pss.PssAddress(address) + hexAddress := fmt.Sprintf("%x", address) + currentBin, ok := ntfr.bins[hexAddress] + if ok { + currentBin.count++ + symKeyId = currentBin.symKeyId + } else { + symKeyId, err = c.pss.GenerateSymmetricKey(ntfr.topic, &pssAddress, false) + if err != nil { + return "", nil, err + } + ntfr.bins[hexAddress] = &sendBin{ + address: address, + symKeyId: symKeyId, + count: 1, + } + } + return symKeyId, pssAddress, nil +} + +func (c *Controller) handleStartMsg(msg *Msg, keyid string) (err error) { + + keyidbytes, err := hexutil.Decode(keyid) + if err != nil { + return err + } + pubkey, err := crypto.UnmarshalPubkey(keyidbytes) + if err != nil { + return err + } + + // if name is not registered for notifications we will not react + currentNotifier, ok := c.notifiers[msg.namestring] + if !ok { + return fmt.Errorf("Subscribe attempted on unknown resource '%s'", msg.namestring) + } + + // add to or open new bin + symKeyId, pssAddress, err := c.addToBin(currentNotifier, msg.Payload) + if err != nil { + return err + } + + // add to address book for send initial notify + symkey, err := c.pss.GetSymmetricKey(symKeyId) + if err != nil { + return err + } + err = c.pss.SetPeerPublicKey(pubkey, controlTopic, &pssAddress) + if err != nil { + return err + } + + // TODO this is set to zero-length byte pending decision on protocol for initial message, whether it should include message or not, and how to trigger the initial message so that current state of MRU is sent upon subscription + notify := []byte{} + replyMsg := NewMsg(MsgCodeNotifyWithKey, msg.namestring, make([]byte, len(notify)+symKeyLength)) + copy(replyMsg.Payload, notify) + copy(replyMsg.Payload[len(notify):], symkey) + sReplyMsg, err := rlp.EncodeToBytes(replyMsg) + if err != nil { + return err + } + return c.pss.SendAsym(keyid, controlTopic, sReplyMsg) +} + +func (c *Controller) handleNotifyWithKeyMsg(msg *Msg) error { + symkey := msg.Payload[len(msg.Payload)-symKeyLength:] + topic := pss.BytesToTopic(msg.Name) + + // \TODO keep track of and add actual address + updaterAddr := pss.PssAddress([]byte{}) + c.pss.SetSymmetricKey(symkey, topic, &updaterAddr, true) + c.pss.Register(&topic, c.Handler) + return c.subscriptions[msg.namestring].handler(msg.namestring, msg.Payload[:len(msg.Payload)-symKeyLength]) +} + +func (c *Controller) handleStopMsg(msg *Msg) error { + // if name is not registered for notifications we will not react + currentNotifier, ok := c.notifiers[msg.namestring] + if !ok { + return fmt.Errorf("Unsubscribe attempted on unknown resource '%s'", msg.namestring) + } + + // parse the address from the message and truncate if longer than our bins' address length threshold + address := msg.Payload + if len(msg.Payload) > currentNotifier.threshold { + address = address[:currentNotifier.threshold] + } + + // remove the entry from the bin if it exists, and remove the bin if it's the last remaining one + hexAddress := fmt.Sprintf("%x", address) + currentBin, ok := currentNotifier.bins[hexAddress] + if !ok { + return fmt.Errorf("found no active bin for address %s", hexAddress) + } + currentBin.count-- + if currentBin.count == 0 { // if no more clients in this bin, remove it + delete(currentNotifier.bins, hexAddress) + } + return nil +} + +// Handler is the pss topic handler to be used to process notification service messages +// It should be registered in the pss of both to any notification service provides and clients using the service +func (c *Controller) Handler(smsg []byte, p *p2p.Peer, asymmetric bool, keyid string) error { + c.mu.Lock() + defer c.mu.Unlock() + log.Debug("notify controller handler", "keyid", keyid) + + // see if the message is valid + msg, err := NewMsgFromPayload(smsg) + if err != nil { + return err + } + + switch msg.Code { + case MsgCodeStart: + return c.handleStartMsg(msg, keyid) + case MsgCodeNotifyWithKey: + return c.handleNotifyWithKeyMsg(msg) + case MsgCodeNotify: + return c.subscriptions[msg.namestring].handler(msg.namestring, msg.Payload) + case MsgCodeStop: + return c.handleStopMsg(msg) + } + + return fmt.Errorf("Invalid message code: %d", msg.Code) +} diff --git a/swarm/pss/notify/notify_test.go b/swarm/pss/notify/notify_test.go new file mode 100644 index 0000000000000..3c655f215ccb9 --- /dev/null +++ b/swarm/pss/notify/notify_test.go @@ -0,0 +1,252 @@ +package notify + +import ( + "bytes" + "context" + "flag" + "fmt" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" + "github.com/ethereum/go-ethereum/swarm/network" + "github.com/ethereum/go-ethereum/swarm/pss" + "github.com/ethereum/go-ethereum/swarm/state" + whisper "github.com/ethereum/go-ethereum/whisper/whisperv5" +) + +var ( + loglevel = flag.Int("l", 3, "loglevel") + psses map[string]*pss.Pss + w *whisper.Whisper + wapi *whisper.PublicWhisperAPI +) + +func init() { + flag.Parse() + hs := log.StreamHandler(os.Stderr, log.TerminalFormat(true)) + hf := log.LvlFilterHandler(log.Lvl(*loglevel), hs) + h := log.CallerFileHandler(hf) + log.Root().SetHandler(h) + + w = whisper.New(&whisper.DefaultConfig) + wapi = whisper.NewPublicWhisperAPI(w) + psses = make(map[string]*pss.Pss) +} + +// Creates a client node and notifier node +// Client sends pss notifications requests +// notifier sends initial notification with symmetric key, and +// second notification symmetrically encrypted +func TestStart(t *testing.T) { + adapter := adapters.NewSimAdapter(newServices(false)) + net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{ + ID: "0", + DefaultService: "bzz", + }) + leftNodeConf := adapters.RandomNodeConfig() + leftNodeConf.Services = []string{"bzz", "pss"} + leftNode, err := net.NewNodeWithConfig(leftNodeConf) + if err != nil { + t.Fatal(err) + } + err = net.Start(leftNode.ID()) + if err != nil { + t.Fatal(err) + } + + rightNodeConf := adapters.RandomNodeConfig() + rightNodeConf.Services = []string{"bzz", "pss"} + rightNode, err := net.NewNodeWithConfig(rightNodeConf) + if err != nil { + t.Fatal(err) + } + err = net.Start(rightNode.ID()) + if err != nil { + t.Fatal(err) + } + + err = net.Connect(rightNode.ID(), leftNode.ID()) + if err != nil { + t.Fatal(err) + } + + leftRpc, err := leftNode.Client() + if err != nil { + t.Fatal(err) + } + + rightRpc, err := rightNode.Client() + if err != nil { + t.Fatal(err) + } + + var leftAddr string + err = leftRpc.Call(&leftAddr, "pss_baseAddr") + if err != nil { + t.Fatal(err) + } + + var rightAddr string + err = rightRpc.Call(&rightAddr, "pss_baseAddr") + if err != nil { + t.Fatal(err) + } + + var leftPub string + err = leftRpc.Call(&leftPub, "pss_getPublicKey") + if err != nil { + t.Fatal(err) + } + + var rightPub string + err = rightRpc.Call(&rightPub, "pss_getPublicKey") + if err != nil { + t.Fatal(err) + } + + rsrcName := "foo.eth" + rsrcTopic := pss.BytesToTopic([]byte(rsrcName)) + + // wait for kademlia table to populate + time.Sleep(time.Second) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + rmsgC := make(chan *pss.APIMsg) + rightSub, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", controlTopic) + if err != nil { + t.Fatal(err) + } + defer rightSub.Unsubscribe() + + updateC := make(chan []byte) + updateMsg := []byte{} + ctrlClient := NewController(psses[rightPub]) + ctrlNotifier := NewController(psses[leftPub]) + ctrlNotifier.NewNotifier("foo.eth", 2, updateC) + + pubkeybytes, err := hexutil.Decode(leftPub) + if err != nil { + t.Fatal(err) + } + pubkey, err := crypto.UnmarshalPubkey(pubkeybytes) + if err != nil { + t.Fatal(err) + } + addrbytes, err := hexutil.Decode(leftAddr) + if err != nil { + t.Fatal(err) + } + ctrlClient.Subscribe(rsrcName, pubkey, addrbytes, func(s string, b []byte) error { + if s != "foo.eth" || !bytes.Equal(updateMsg, b) { + t.Fatalf("unexpected result in client handler: '%s':'%x'", s, b) + } + log.Info("client handler receive", "s", s, "b", b) + return nil + }) + + var inMsg *pss.APIMsg + select { + case inMsg = <-rmsgC: + case <-ctx.Done(): + t.Fatal(ctx.Err()) + } + + dMsg, err := NewMsgFromPayload(inMsg.Msg) + if err != nil { + t.Fatal(err) + } + if dMsg.namestring != rsrcName { + t.Fatalf("expected name '%s', got '%s'", rsrcName, dMsg.namestring) + } + if !bytes.Equal(dMsg.Payload[:len(updateMsg)], updateMsg) { + t.Fatalf("expected payload first %d bytes '%x', got '%x'", len(updateMsg), updateMsg, dMsg.Payload[:len(updateMsg)]) + } + if len(updateMsg)+symKeyLength != len(dMsg.Payload) { + t.Fatalf("expected payload length %d, have %d", len(updateMsg)+symKeyLength, len(dMsg.Payload)) + } + + rightSubUpdate, err := rightRpc.Subscribe(ctx, "pss", rmsgC, "receive", rsrcTopic) + if err != nil { + t.Fatal(err) + } + defer rightSubUpdate.Unsubscribe() + + updateMsg = []byte("plugh") + updateC <- updateMsg + select { + case inMsg = <-rmsgC: + case <-ctx.Done(): + log.Error("timed out waiting for msg", "topic", fmt.Sprintf("%x", rsrcTopic)) + t.Fatal(ctx.Err()) + } + dMsg, err = NewMsgFromPayload(inMsg.Msg) + if err != nil { + t.Fatal(err) + } + if dMsg.namestring != rsrcName { + t.Fatalf("expected name %s, got %s", rsrcName, dMsg.namestring) + } + if !bytes.Equal(dMsg.Payload, updateMsg) { + t.Fatalf("expected payload '%x', got '%x'", updateMsg, dMsg.Payload) + } + +} + +func newServices(allowRaw bool) adapters.Services { + stateStore := state.NewInmemoryStore() + kademlias := make(map[discover.NodeID]*network.Kademlia) + kademlia := func(id discover.NodeID) *network.Kademlia { + if k, ok := kademlias[id]; ok { + return k + } + addr := network.NewAddrFromNodeID(id) + params := network.NewKadParams() + params.MinProxBinSize = 2 + params.MaxBinSize = 3 + params.MinBinSize = 1 + params.MaxRetries = 1000 + params.RetryExponent = 2 + params.RetryInterval = 1000000 + kademlias[id] = network.NewKademlia(addr.Over(), params) + return kademlias[id] + } + return adapters.Services{ + "pss": func(ctx *adapters.ServiceContext) (node.Service, error) { + ctxlocal, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + keys, err := wapi.NewKeyPair(ctxlocal) + privkey, err := w.GetPrivateKey(keys) + pssp := pss.NewPssParams().WithPrivateKey(privkey) + pssp.MsgTTL = time.Second * 30 + pssp.AllowRaw = allowRaw + pskad := kademlia(ctx.Config.ID) + ps, err := pss.NewPss(pskad, pssp) + if err != nil { + return nil, err + } + //psses[common.ToHex(crypto.FromECDSAPub(&privkey.PublicKey))] = ps + psses[hexutil.Encode(crypto.FromECDSAPub(&privkey.PublicKey))] = ps + return ps, nil + }, + "bzz": func(ctx *adapters.ServiceContext) (node.Service, error) { + addr := network.NewAddrFromNodeID(ctx.Config.ID) + hp := network.NewHiveParams() + hp.Discovery = false + config := &network.BzzConfig{ + OverlayAddr: addr.Over(), + UnderlayAddr: addr.Under(), + HiveParams: hp, + } + return network.NewBzz(config, kademlia(ctx.Config.ID), stateStore, nil, nil), nil + }, + } +} diff --git a/swarm/pss/protocol.go b/swarm/pss/protocol.go index bf23e49dafad8..5fcae090efb98 100644 --- a/swarm/pss/protocol.go +++ b/swarm/pss/protocol.go @@ -172,6 +172,8 @@ func (p *Protocol) Handle(msg []byte, peer *p2p.Peer, asymmetric bool, keyid str rw, err := p.AddPeer(peer, *p.topic, asymmetric, keyid) if err != nil { return err + } else if rw == nil { + return fmt.Errorf("handle called on nil MsgReadWriter for new key " + keyid) } vrw = rw.(*PssReadWriter) } @@ -181,8 +183,14 @@ func (p *Protocol) Handle(msg []byte, peer *p2p.Peer, asymmetric bool, keyid str return fmt.Errorf("could not decode pssmsg") } if asymmetric { + if p.pubKeyRWPool[keyid] == nil { + return fmt.Errorf("handle called on nil MsgReadWriter for key " + keyid) + } vrw = p.pubKeyRWPool[keyid].(*PssReadWriter) } else { + if p.symKeyRWPool[keyid] == nil { + return fmt.Errorf("handle called on nil MsgReadWriter for key " + keyid) + } vrw = p.symKeyRWPool[keyid].(*PssReadWriter) } vrw.injectMsg(pmsg) diff --git a/swarm/pss/pss.go b/swarm/pss/pss.go index 77191b25a0129..dd081e93a5691 100644 --- a/swarm/pss/pss.go +++ b/swarm/pss/pss.go @@ -41,7 +41,7 @@ import ( const ( defaultPaddingByteSize = 16 - defaultMsgTTL = time.Second * 120 + DefaultMsgTTL = time.Second * 120 defaultDigestCacheTTL = time.Second * 10 defaultSymKeyCacheCapacity = 512 digestLength = 32 // byte length of digest used for pss cache (currently same as swarm chunk hash) @@ -94,7 +94,7 @@ type PssParams struct { // Sane defaults for Pss func NewPssParams() *PssParams { return &PssParams{ - MsgTTL: defaultMsgTTL, + MsgTTL: DefaultMsgTTL, CacheTTL: defaultDigestCacheTTL, SymKeyCacheCapacity: defaultSymKeyCacheCapacity, } @@ -354,11 +354,11 @@ func (p *Pss) handlePssMsg(msg interface{}) error { } if int64(pssmsg.Expire) < time.Now().Unix() { metrics.GetOrRegisterCounter("pss.expire", nil).Inc(1) - log.Warn("pss filtered expired message", "from", fmt.Sprintf("%x", p.Overlay.BaseAddr()), "to", fmt.Sprintf("%x", common.ToHex(pssmsg.To))) + log.Warn("pss filtered expired message", "from", common.ToHex(p.Overlay.BaseAddr()), "to", common.ToHex(pssmsg.To)) return nil } if p.checkFwdCache(pssmsg) { - log.Trace(fmt.Sprintf("pss relay block-cache match (process): FROM %x TO %x", p.Overlay.BaseAddr(), common.ToHex(pssmsg.To))) + log.Trace("pss relay block-cache match (process)", "from", common.ToHex(p.Overlay.BaseAddr()), "to", (common.ToHex(pssmsg.To))) return nil } p.addFwdCache(pssmsg) @@ -480,7 +480,7 @@ func (p *Pss) SetPeerPublicKey(pubkey *ecdsa.PublicKey, topic Topic, address *Ps } // Automatically generate a new symkey for a topic and address hint -func (p *Pss) generateSymmetricKey(topic Topic, address *PssAddress, addToCache bool) (string, error) { +func (p *Pss) GenerateSymmetricKey(topic Topic, address *PssAddress, addToCache bool) (string, error) { keyid, err := p.w.GenerateSymKey() if err != nil { return "", err diff --git a/swarm/pss/pss_test.go b/swarm/pss/pss_test.go index a59a5e4270a4f..c738247f1f4c2 100644 --- a/swarm/pss/pss_test.go +++ b/swarm/pss/pss_test.go @@ -470,7 +470,7 @@ func TestKeys(t *testing.T) { } // make a symmetric key that we will send to peer for encrypting messages to us - inkeyid, err := ps.generateSymmetricKey(topicobj, &addr, true) + inkeyid, err := ps.GenerateSymmetricKey(topicobj, &addr, true) if err != nil { t.Fatalf("failed to set 'our' incoming symmetric key") } @@ -1296,7 +1296,7 @@ func benchmarkSymKeySend(b *testing.B) { topic := BytesToTopic([]byte("foo")) to := make(PssAddress, 32) copy(to[:], network.RandomAddr().Over()) - symkeyid, err := ps.generateSymmetricKey(topic, &to, true) + symkeyid, err := ps.GenerateSymmetricKey(topic, &to, true) if err != nil { b.Fatalf("could not generate symkey: %v", err) } @@ -1389,7 +1389,7 @@ func benchmarkSymkeyBruteforceChangeaddr(b *testing.B) { for i := 0; i < int(keycount); i++ { to := make(PssAddress, 32) copy(to[:], network.RandomAddr().Over()) - keyid, err = ps.generateSymmetricKey(topic, &to, true) + keyid, err = ps.GenerateSymmetricKey(topic, &to, true) if err != nil { b.Fatalf("cant generate symkey #%d: %v", i, err) } @@ -1471,7 +1471,7 @@ func benchmarkSymkeyBruteforceSameaddr(b *testing.B) { topic := BytesToTopic([]byte("foo")) for i := 0; i < int(keycount); i++ { copy(addr[i], network.RandomAddr().Over()) - keyid, err = ps.generateSymmetricKey(topic, &addr[i], true) + keyid, err = ps.GenerateSymmetricKey(topic, &addr[i], true) if err != nil { b.Fatalf("cant generate symkey #%d: %v", i, err) } diff --git a/swarm/storage/chunker.go b/swarm/storage/chunker.go index 5780742e38a4a..2d197fefa9304 100644 --- a/swarm/storage/chunker.go +++ b/swarm/storage/chunker.go @@ -16,6 +16,7 @@ package storage import ( + "context" "encoding/binary" "errors" "fmt" @@ -126,7 +127,7 @@ type TreeChunker struct { The chunks are not meant to be validated by the chunker when joining. This is because it is left to the DPA to decide which sources are trusted. */ -func TreeJoin(addr Address, getter Getter, depth int) *LazyChunkReader { +func TreeJoin(ctx context.Context, addr Address, getter Getter, depth int) *LazyChunkReader { jp := &JoinerParams{ ChunkerParams: ChunkerParams{ chunkSize: DefaultChunkSize, @@ -137,14 +138,14 @@ func TreeJoin(addr Address, getter Getter, depth int) *LazyChunkReader { depth: depth, } - return NewTreeJoiner(jp).Join() + return NewTreeJoiner(jp).Join(ctx) } /* When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes. New chunks to store are store using the putter which the caller provides. */ -func TreeSplit(data io.Reader, size int64, putter Putter) (k Address, wait func(), err error) { +func TreeSplit(ctx context.Context, data io.Reader, size int64, putter Putter) (k Address, wait func(context.Context) error, err error) { tsp := &TreeSplitterParams{ SplitterParams: SplitterParams{ ChunkerParams: ChunkerParams{ @@ -156,7 +157,7 @@ func TreeSplit(data io.Reader, size int64, putter Putter) (k Address, wait func( }, size: size, } - return NewTreeSplitter(tsp).Split() + return NewTreeSplitter(tsp).Split(ctx) } func NewTreeJoiner(params *JoinerParams) *TreeChunker { @@ -224,7 +225,7 @@ func (tc *TreeChunker) decrementWorkerCount() { tc.workerCount -= 1 } -func (tc *TreeChunker) Split() (k Address, wait func(), err error) { +func (tc *TreeChunker) Split(ctx context.Context) (k Address, wait func(context.Context) error, err error) { if tc.chunkSize <= 0 { panic("chunker must be initialised") } @@ -380,7 +381,7 @@ type LazyChunkReader struct { getter Getter } -func (tc *TreeChunker) Join() *LazyChunkReader { +func (tc *TreeChunker) Join(ctx context.Context) *LazyChunkReader { return &LazyChunkReader{ key: tc.addr, chunkSize: tc.chunkSize, diff --git a/swarm/storage/chunker_test.go b/swarm/storage/chunker_test.go index d8be13ef6bb2f..69c388b39eba6 100644 --- a/swarm/storage/chunker_test.go +++ b/swarm/storage/chunker_test.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "crypto/rand" "encoding/binary" "errors" @@ -81,7 +82,7 @@ func testRandomBrokenData(n int, tester *chunkerTester) { putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash) expectedError := fmt.Errorf("Broken reader") - addr, _, err := TreeSplit(brokendata, int64(n), putGetter) + addr, _, err := TreeSplit(context.TODO(), brokendata, int64(n), putGetter) if err == nil || err.Error() != expectedError.Error() { tester.t.Fatalf("Not receiving the correct error! Expected %v, received %v", expectedError, err) } @@ -104,20 +105,24 @@ func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester) putGetter := newTestHasherStore(NewMapChunkStore(), hash) var addr Address - var wait func() + var wait func(context.Context) error var err error + ctx := context.TODO() if usePyramid { - addr, wait, err = PyramidSplit(data, putGetter, putGetter) + addr, wait, err = PyramidSplit(ctx, data, putGetter, putGetter) } else { - addr, wait, err = TreeSplit(data, int64(n), putGetter) + addr, wait, err = TreeSplit(ctx, data, int64(n), putGetter) } if err != nil { tester.t.Fatalf(err.Error()) } tester.t.Logf(" Key = %v\n", addr) - wait() + err = wait(ctx) + if err != nil { + tester.t.Fatalf(err.Error()) + } - reader := TreeJoin(addr, putGetter, 0) + reader := TreeJoin(context.TODO(), addr, putGetter, 0) output := make([]byte, n) r, err := reader.Read(output) if r != n || err != io.EOF { @@ -200,11 +205,15 @@ func TestDataAppend(t *testing.T) { chunkStore := NewMapChunkStore() putGetter := newTestHasherStore(chunkStore, SHA3Hash) - addr, wait, err := PyramidSplit(data, putGetter, putGetter) + ctx := context.TODO() + addr, wait, err := PyramidSplit(ctx, data, putGetter, putGetter) + if err != nil { + tester.t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { tester.t.Fatalf(err.Error()) } - wait() //create a append data stream appendInput, found := tester.inputs[uint64(m)] @@ -217,13 +226,16 @@ func TestDataAppend(t *testing.T) { } putGetter = newTestHasherStore(chunkStore, SHA3Hash) - newAddr, wait, err := PyramidAppend(addr, appendData, putGetter, putGetter) + newAddr, wait, err := PyramidAppend(ctx, addr, appendData, putGetter, putGetter) + if err != nil { + tester.t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { tester.t.Fatalf(err.Error()) } - wait() - reader := TreeJoin(newAddr, putGetter, 0) + reader := TreeJoin(ctx, newAddr, putGetter, 0) newOutput := make([]byte, n+m) r, err := reader.Read(newOutput) if r != (n + m) { @@ -282,12 +294,16 @@ func benchmarkSplitJoin(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash) - key, wait, err := PyramidSplit(data, putGetter, putGetter) + ctx := context.TODO() + key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter) if err != nil { t.Fatalf(err.Error()) } - wait() - reader := TreeJoin(key, putGetter, 0) + err = wait(ctx) + if err != nil { + t.Fatalf(err.Error()) + } + reader := TreeJoin(ctx, key, putGetter, 0) benchReadAll(reader) } } @@ -298,7 +314,7 @@ func benchmarkSplitTreeSHA3(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, SHA3Hash) - _, _, err := TreeSplit(data, int64(n), putGetter) + _, _, err := TreeSplit(context.TODO(), data, int64(n), putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -311,7 +327,7 @@ func benchmarkSplitTreeBMT(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, BMTHash) - _, _, err := TreeSplit(data, int64(n), putGetter) + _, _, err := TreeSplit(context.TODO(), data, int64(n), putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -324,7 +340,7 @@ func benchmarkSplitPyramidSHA3(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, SHA3Hash) - _, _, err := PyramidSplit(data, putGetter, putGetter) + _, _, err := PyramidSplit(context.TODO(), data, putGetter, putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -338,7 +354,7 @@ func benchmarkSplitPyramidBMT(n int, t *testing.B) { data := testDataReader(n) putGetter := newTestHasherStore(&fakeChunkStore{}, BMTHash) - _, _, err := PyramidSplit(data, putGetter, putGetter) + _, _, err := PyramidSplit(context.TODO(), data, putGetter, putGetter) if err != nil { t.Fatalf(err.Error()) } @@ -354,18 +370,25 @@ func benchmarkSplitAppendPyramid(n, m int, t *testing.B) { chunkStore := NewMapChunkStore() putGetter := newTestHasherStore(chunkStore, SHA3Hash) - key, wait, err := PyramidSplit(data, putGetter, putGetter) + ctx := context.TODO() + key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter) + if err != nil { + t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { t.Fatalf(err.Error()) } - wait() putGetter = newTestHasherStore(chunkStore, SHA3Hash) - _, wait, err = PyramidAppend(key, data1, putGetter, putGetter) + _, wait, err = PyramidAppend(ctx, key, data1, putGetter, putGetter) + if err != nil { + t.Fatalf(err.Error()) + } + err = wait(ctx) if err != nil { t.Fatalf(err.Error()) } - wait() } } diff --git a/swarm/storage/filestore.go b/swarm/storage/filestore.go index c0b463debdd16..2d8d82d95a503 100644 --- a/swarm/storage/filestore.go +++ b/swarm/storage/filestore.go @@ -17,6 +17,7 @@ package storage import ( + "context" "io" ) @@ -78,18 +79,18 @@ func NewFileStore(store ChunkStore, params *FileStoreParams) *FileStore { // Chunk retrieval blocks on netStore requests with a timeout so reader will // report error if retrieval of chunks within requested range time out. // It returns a reader with the chunk data and whether the content was encrypted -func (f *FileStore) Retrieve(addr Address) (reader *LazyChunkReader, isEncrypted bool) { +func (f *FileStore) Retrieve(ctx context.Context, addr Address) (reader *LazyChunkReader, isEncrypted bool) { isEncrypted = len(addr) > f.hashFunc().Size() getter := NewHasherStore(f.ChunkStore, f.hashFunc, isEncrypted) - reader = TreeJoin(addr, getter, 0) + reader = TreeJoin(ctx, addr, getter, 0) return } // Public API. Main entry point for document storage directly. Used by the // FS-aware API and httpaccess -func (f *FileStore) Store(data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(), err error) { +func (f *FileStore) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr Address, wait func(context.Context) error, err error) { putter := NewHasherStore(f.ChunkStore, f.hashFunc, toEncrypt) - return PyramidSplit(data, putter, putter) + return PyramidSplit(ctx, data, putter, putter) } func (f *FileStore) HashSize() int { diff --git a/swarm/storage/filestore_test.go b/swarm/storage/filestore_test.go index 1aaec5e5cc4b4..f3f5972558842 100644 --- a/swarm/storage/filestore_test.go +++ b/swarm/storage/filestore_test.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "io" "io/ioutil" "os" @@ -49,12 +50,16 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) { defer os.RemoveAll("/tmp/bzz") reader, slice := generateRandomData(testDataSize) - key, wait, err := fileStore.Store(reader, testDataSize, toEncrypt) + ctx := context.TODO() + key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt) if err != nil { t.Errorf("Store error: %v", err) } - wait() - resultReader, isEncrypted := fileStore.Retrieve(key) + err = wait(ctx) + if err != nil { + t.Fatalf("Store waitt error: %v", err.Error()) + } + resultReader, isEncrypted := fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -72,7 +77,7 @@ func testFileStoreRandom(toEncrypt bool, t *testing.T) { ioutil.WriteFile("/tmp/slice.bzz.16M", slice, 0666) ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666) localStore.memStore = NewMemStore(NewDefaultStoreParams(), db) - resultReader, isEncrypted = fileStore.Retrieve(key) + resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -110,12 +115,16 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) { } fileStore := NewFileStore(localStore, NewFileStoreParams()) reader, slice := generateRandomData(testDataSize) - key, wait, err := fileStore.Store(reader, testDataSize, toEncrypt) + ctx := context.TODO() + key, wait, err := fileStore.Store(ctx, reader, testDataSize, toEncrypt) + if err != nil { + t.Errorf("Store error: %v", err) + } + err = wait(ctx) if err != nil { t.Errorf("Store error: %v", err) } - wait() - resultReader, isEncrypted := fileStore.Retrieve(key) + resultReader, isEncrypted := fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -134,7 +143,7 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) { memStore.setCapacity(0) // check whether it is, indeed, empty fileStore.ChunkStore = memStore - resultReader, isEncrypted = fileStore.Retrieve(key) + resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } @@ -144,7 +153,7 @@ func testFileStoreCapacity(toEncrypt bool, t *testing.T) { // check how it works with localStore fileStore.ChunkStore = localStore // localStore.dbStore.setCapacity(0) - resultReader, isEncrypted = fileStore.Retrieve(key) + resultReader, isEncrypted = fileStore.Retrieve(context.TODO(), key) if isEncrypted != toEncrypt { t.Fatalf("isEncrypted expected %v got %v", toEncrypt, isEncrypted) } diff --git a/swarm/storage/hasherstore.go b/swarm/storage/hasherstore.go index e659c3681ef64..e18b66ddcb9fa 100644 --- a/swarm/storage/hasherstore.go +++ b/swarm/storage/hasherstore.go @@ -17,6 +17,7 @@ package storage import ( + "context" "fmt" "sync" @@ -126,9 +127,10 @@ func (h *hasherStore) Close() { // Wait returns when // 1) the Close() function has been called and // 2) all the chunks which has been Put has been stored -func (h *hasherStore) Wait() { +func (h *hasherStore) Wait(ctx context.Context) error { <-h.closed h.wg.Wait() + return nil } func (h *hasherStore) createHash(chunkData ChunkData) Address { diff --git a/swarm/storage/hasherstore_test.go b/swarm/storage/hasherstore_test.go index ccb37524a01ba..cf7b0dcc343cf 100644 --- a/swarm/storage/hasherstore_test.go +++ b/swarm/storage/hasherstore_test.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "testing" "github.com/ethereum/go-ethereum/swarm/storage/encryption" @@ -60,7 +61,10 @@ func TestHasherStore(t *testing.T) { hasherStore.Close() // Wait until chunks are really stored - hasherStore.Wait() + err = hasherStore.Wait(context.TODO()) + if err != nil { + t.Fatalf("Expected no error got \"%v\"", err) + } // Get the first chunk retrievedChunkData1, err := hasherStore.Get(key1) diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go index 2c706a75bd316..2453d2f30b958 100644 --- a/swarm/storage/ldbstore_test.go +++ b/swarm/storage/ldbstore_test.go @@ -59,12 +59,12 @@ func newTestDbStore(mock bool, trusted bool) (*testDbStore, func(), error) { } cleanup := func() { - if err != nil { + if db != nil { db.Close() } err = os.RemoveAll(dir) if err != nil { - panic("db cleanup failed") + panic(fmt.Sprintf("db cleanup failed: %v", err)) } } diff --git a/swarm/storage/pyramid.go b/swarm/storage/pyramid.go index 01172cb77a47c..6643e989a1c68 100644 --- a/swarm/storage/pyramid.go +++ b/swarm/storage/pyramid.go @@ -17,6 +17,7 @@ package storage import ( + "context" "encoding/binary" "errors" "io" @@ -99,12 +100,12 @@ func NewPyramidSplitterParams(addr Address, reader io.Reader, putter Putter, get When splitting, data is given as a SectionReader, and the key is a hashSize long byte slice (Key), the root hash of the entire content will fill this once processing finishes. New chunks to store are store using the putter which the caller provides. */ -func PyramidSplit(reader io.Reader, putter Putter, getter Getter) (Address, func(), error) { - return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, DefaultChunkSize)).Split() +func PyramidSplit(ctx context.Context, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { + return NewPyramidSplitter(NewPyramidSplitterParams(nil, reader, putter, getter, DefaultChunkSize)).Split(ctx) } -func PyramidAppend(addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(), error) { - return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, DefaultChunkSize)).Append() +func PyramidAppend(ctx context.Context, addr Address, reader io.Reader, putter Putter, getter Getter) (Address, func(context.Context) error, error) { + return NewPyramidSplitter(NewPyramidSplitterParams(addr, reader, putter, getter, DefaultChunkSize)).Append(ctx) } // Entry to create a tree node @@ -203,7 +204,7 @@ func (pc *PyramidChunker) decrementWorkerCount() { pc.workerCount -= 1 } -func (pc *PyramidChunker) Split() (k Address, wait func(), err error) { +func (pc *PyramidChunker) Split(ctx context.Context) (k Address, wait func(context.Context) error, err error) { log.Debug("pyramid.chunker: Split()") pc.wg.Add(1) @@ -235,7 +236,7 @@ func (pc *PyramidChunker) Split() (k Address, wait func(), err error) { } -func (pc *PyramidChunker) Append() (k Address, wait func(), err error) { +func (pc *PyramidChunker) Append(ctx context.Context) (k Address, wait func(context.Context) error, err error) { log.Debug("pyramid.chunker: Append()") // Load the right most unfinished tree chunks in every level pc.loadTree() diff --git a/swarm/storage/types.go b/swarm/storage/types.go index b75f64205ff11..32880ead760f7 100644 --- a/swarm/storage/types.go +++ b/swarm/storage/types.go @@ -18,6 +18,7 @@ package storage import ( "bytes" + "context" "crypto" "crypto/rand" "encoding/binary" @@ -303,7 +304,7 @@ type Putter interface { // Close is to indicate that no more chunk data will be Put on this Putter Close() // Wait returns if all data has been store and the Close() was called. - Wait() + Wait(context.Context) error } // Getter is an interface to retrieve a chunk's data by its reference diff --git a/swarm/swarm_test.go b/swarm/swarm_test.go index f82a9c6fac04f..0827748ae2005 100644 --- a/swarm/swarm_test.go +++ b/swarm/swarm_test.go @@ -17,10 +17,13 @@ package swarm import ( + "context" + "encoding/hex" "io/ioutil" "math/rand" "os" "path" + "runtime" "strings" "testing" "time" @@ -42,6 +45,13 @@ func TestNewSwarm(t *testing.T) { // a simple rpc endpoint for testing dialing ipcEndpoint := path.Join(dir, "TestSwarm.ipc") + // windows namedpipes are not on filesystem but on NPFS + if runtime.GOOS == "windows" { + b := make([]byte, 8) + rand.Read(b) + ipcEndpoint = `\\.\pipe\TestSwarm-` + hex.EncodeToString(b) + } + _, server, err := rpc.StartIPCEndpoint(ipcEndpoint, nil) if err != nil { t.Error(err) @@ -338,15 +348,19 @@ func testLocalStoreAndRetrieve(t *testing.T, swarm *Swarm, n int, randomData boo } dataPut := string(slice) - k, wait, err := swarm.api.Store(strings.NewReader(dataPut), int64(len(dataPut)), false) + ctx := context.TODO() + k, wait, err := swarm.api.Store(ctx, strings.NewReader(dataPut), int64(len(dataPut)), false) if err != nil { t.Fatal(err) } if wait != nil { - wait() + err = wait(ctx) + if err != nil { + t.Fatal(err) + } } - r, _ := swarm.api.Retrieve(k) + r, _ := swarm.api.Retrieve(context.TODO(), k) d, err := ioutil.ReadAll(r) if err != nil { From 2eedbe799f5eb8766e4808d8a1810cc1c90c4b93 Mon Sep 17 00:00:00 2001 From: Wenbiao Zheng Date: Mon, 9 Jul 2018 22:34:59 +0800 Subject: [PATCH 36/36] cmd: typo fixed, isntance -> instance (#17149) --- cmd/utils/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index a191e4430660d..46ea7b96bf862 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -998,7 +998,7 @@ func setEthash(ctx *cli.Context, cfg *eth.Config) { } } -// checkExclusive verifies that only a single isntance of the provided flags was +// checkExclusive verifies that only a single instance of the provided flags was // set by the user. Each flag might optionally be followed by a string type to // specialize it further. func checkExclusive(ctx *cli.Context, args ...interface{}) {