diff --git a/cmd/devp2p/internal/ethtest/chain.go b/cmd/devp2p/internal/ethtest/chain.go index 27ee9493b..90b5720f1 100644 --- a/cmd/devp2p/internal/ethtest/chain.go +++ b/cmd/devp2p/internal/ethtest/chain.go @@ -26,6 +26,7 @@ import ( "os" "strings" + "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/forkid" "github.com/scroll-tech/go-ethereum/core/types" @@ -67,6 +68,13 @@ func (c *Chain) TotalDifficultyAt(height int) *big.Int { return sum } +func (c *Chain) RootAt(height int) common.Hash { + if height < c.Len() { + return c.blocks[height].Root() + } + return common.Hash{} +} + // ForkID gets the fork id of the chain. func (c *Chain) ForkID() forkid.ID { return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len())) diff --git a/cmd/devp2p/internal/ethtest/helpers.go b/cmd/devp2p/internal/ethtest/helpers.go index 70d773f72..9320fe94e 100644 --- a/cmd/devp2p/internal/ethtest/helpers.go +++ b/cmd/devp2p/internal/ethtest/helpers.go @@ -97,6 +97,19 @@ func (s *Suite) dial66() (*Conn, error) { return conn, nil } +// dial66 attempts to dial the given node and perform a handshake, +// returning the created Conn with additional snap/1 capabilities if +// successful. +func (s *Suite) dialSnap() (*Conn, error) { + conn, err := s.dial66() + if err != nil { + return nil, fmt.Errorf("dial failed: %v", err) + } + conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1}) + conn.ourHighestSnapProtoVersion = 1 + return conn, nil +} + // peer performs both the protocol handshake and the status message // exchange with the node in order to peer with it. func (c *Conn) peer(chain *Chain, status *Status) error { @@ -132,7 +145,11 @@ func (c *Conn) handshake() error { } c.negotiateEthProtocol(msg.Caps) if c.negotiatedProtoVersion == 0 { - return fmt.Errorf("could not negotiate protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion) + return fmt.Errorf("could not negotiate eth protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion) + } + // If we require snap, verify that it was negotiated + if c.ourHighestSnapProtoVersion != c.negotiatedSnapProtoVersion { + return fmt.Errorf("could not negotiate snap protocol (remote caps: %v, local snap version: %v)", msg.Caps, c.ourHighestSnapProtoVersion) } return nil default: @@ -144,15 +161,21 @@ func (c *Conn) handshake() error { // advertised capability from peer. func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { var highestEthVersion uint + var highestSnapVersion uint for _, capability := range caps { - if capability.Name != "eth" { - continue - } - if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion { - highestEthVersion = capability.Version + switch capability.Name { + case "eth": + if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion { + highestEthVersion = capability.Version + } + case "snap": + if capability.Version > highestSnapVersion && capability.Version <= c.ourHighestSnapProtoVersion { + highestSnapVersion = capability.Version + } } } c.negotiatedProtoVersion = highestEthVersion + c.negotiatedSnapProtoVersion = highestSnapVersion } // statusExchange performs a `Status` message exchange with the given node. @@ -326,6 +349,15 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bo } } +func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) { + defer c.SetReadDeadline(time.Time{}) + c.SetReadDeadline(time.Now().Add(5 * time.Second)) + if err := c.Write(msg); err != nil { + return nil, fmt.Errorf("could not write to connection: %v", err) + } + return c.ReadSnap(id) +} + // getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol. func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) { // write request diff --git a/cmd/devp2p/internal/ethtest/snap.go b/cmd/devp2p/internal/ethtest/snap.go new file mode 100644 index 000000000..8fc6c3369 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/snap.go @@ -0,0 +1,675 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "bytes" + "errors" + "fmt" + "math/rand" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto" + "github.com/scroll-tech/go-ethereum/eth/protocols/snap" + "github.com/scroll-tech/go-ethereum/internal/utesting" + "github.com/scroll-tech/go-ethereum/light" + "github.com/scroll-tech/go-ethereum/trie" + "golang.org/x/crypto/sha3" +) + +func (s *Suite) TestSnapStatus(t *utesting.T) { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err := conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } +} + +type accRangeTest struct { + nBytes uint64 + root common.Hash + origin common.Hash + limit common.Hash + + expAccounts int + expFirst common.Hash + expLast common.Hash +} + +// TestSnapGetAccountRange various forms of GetAccountRange requests. +func (s *Suite) TestSnapGetAccountRange(t *utesting.T) { + var ( + root = s.chain.RootAt(999) + ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + zero = common.Hash{} + firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29") + firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") + firstKeyPlus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b") + secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606") + storageRoot = common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790") + ) + for i, tc := range []accRangeTest{ + // Tests decreasing the number of bytes + {4000, root, zero, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")}, + {3000, root, zero, ffHash, 57, firstKey, common.HexToHash("0x9b63fa753ece5cb90657d02ecb15df4dc1508d8c1d187af1bf7f1a05e747d3c7")}, + {2000, root, zero, ffHash, 38, firstKey, common.HexToHash("0x5e6140ecae4354a9e8f47559a8c6209c1e0e69cb077b067b528556c11698b91f")}, + {1, root, zero, ffHash, 1, firstKey, firstKey}, + + // Tests variations of the range + // + // [00b to firstkey]: should return [firstkey, secondkey], where secondkey is out of bounds + {4000, root, common.HexToHash("0x00bf000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b"), 2, firstKey, secondKey}, + // [00b0 to 0bf0]: where both are before firstkey. Should return firstKey (even though it's out of bounds) + {4000, root, common.HexToHash("0x00b0000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000"), 1, firstKey, firstKey}, + {4000, root, zero, zero, 1, firstKey, firstKey}, + {4000, root, firstKey, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")}, + {4000, root, firstKeyPlus1, ffHash, 76, secondKey, common.HexToHash("0xd28f55d3b994f16389f36944ad685b48e0fc3f8fbe86c3ca92ebecadf16a783f")}, + + // Test different root hashes + // + // A stateroot that does not exist + {4000, common.Hash{0x13, 37}, zero, ffHash, 0, zero, zero}, + // The genesis stateroot (we expect it to not be served) + {4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero}, + // A 127 block old stateroot, expected to be served + {4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")}, + // A root which is not actually an account root, but a storage orot + {4000, storageRoot, zero, ffHash, 0, zero, zero}, + + // And some non-sensical requests + // + // range from [0xFF to 0x00], wrong order. Expect not to be serviced + {4000, root, ffHash, zero, 0, zero, zero}, + // range from [firstkey, firstkey-1], wrong order. Expect to get first key. + {4000, root, firstKey, firstKeyMinus1, 1, firstKey, firstKey}, + // range from [firstkey, 0], wrong order. Expect to get first key. + {4000, root, firstKey, zero, 1, firstKey, firstKey}, + // Max bytes: 0. Expect to deliver one account. + {0, root, zero, ffHash, 1, firstKey, firstKey}, + } { + if err := s.snapGetAccountRange(t, &tc); err != nil { + t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err) + } + } +} + +type stRangesTest struct { + root common.Hash + accounts []common.Hash + origin []byte + limit []byte + nBytes uint64 + + expSlots int +} + +// TestSnapGetStorageRange various forms of GetStorageRanges requests. +func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) { + var ( + ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + zero = common.Hash{} + firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") + secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606") + ) + for i, tc := range []stRangesTest{ + { + root: s.chain.RootAt(999), + accounts: []common.Hash{secondKey, firstKey}, + origin: zero[:], + limit: ffHash[:], + nBytes: 500, + expSlots: 0, + }, + + /* + Some tests against this account: + { + "balance": "0", + "nonce": 1, + "root": "0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790", + "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "storage": { + "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace": "02", + "0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "01", + "0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b": "03" + }, + "key": "0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844" + } + */ + { // [:] -> [slot1, slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: zero[:], + limit: ffHash[:], + nBytes: 500, + expSlots: 3, + }, + { // [slot1:] -> [slot1, slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"), + limit: ffHash[:], + nBytes: 500, + expSlots: 3, + }, + { // [slot1+ :] -> [slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5acf"), + limit: ffHash[:], + nBytes: 500, + expSlots: 2, + }, + { // [slot1:slot2] -> [slot1, slot2] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"), + limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"), + nBytes: 500, + expSlots: 2, + }, + { // [slot1+:slot2+] -> [slot2, slot3] + root: s.chain.RootAt(999), + accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")}, + origin: common.FromHex("0x4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf7"), + nBytes: 500, + expSlots: 2, + }, + } { + if err := s.snapGetStorageRanges(t, &tc); err != nil { + t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v", + i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err) + } + } +} + +type byteCodesTest struct { + nBytes uint64 + hashes []common.Hash + + expHashes int +} + +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + // emptyCode is the known hash of the empty EVM bytecode. + emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") +) + +// TestSnapGetByteCodes various forms of GetByteCodes requests. +func (s *Suite) TestSnapGetByteCodes(t *utesting.T) { + // The halfchain import should yield these bytecodes + var hcBytecodes []common.Hash + for _, s := range []string{ + "0x200c90460d8b0063210d5f5b9918e053c8f2c024485e0f1b48be8b1fc71b1317", + "0x20ba67ed4ac6aff626e0d1d4db623e2fada9593daeefc4a6eb4b70e6cff986f3", + "0x24b5b4902cb3d897c1cee9f16be8e897d8fa277c04c6dc8214f18295fca5de44", + "0x320b9d0a2be39b8a1c858f9f8cb96b1df0983071681de07ded3a7c0d05db5fd6", + "0x48cb0d5275936a24632babc7408339f9f7b051274809de565b8b0db76e97e03c", + "0x67c7a6f5cdaa43b4baa0e15b2be63346d1b9ce9f2c3d7e5804e0cacd44ee3b04", + "0x6d8418059bdc8c3fabf445e6bfc662af3b6a4ae45999b953996e42c7ead2ab49", + "0x7043422e5795d03f17ee0463a37235258e609fdd542247754895d72695e3e142", + "0x727f9e6f0c4bac1ff8d72c2972122d9c8d37ccb37e04edde2339e8da193546f1", + "0x86ccd5e23c78568a8334e0cebaf3e9f48c998307b0bfb1c378cee83b4bfb29cb", + "0x8fc89b00d6deafd4c4279531e743365626dbfa28845ec697919d305c2674302d", + "0x92cfc353bcb9746bb6f9996b6b9df779c88af2e9e0eeac44879ca19887c9b732", + "0x941b4872104f0995a4898fcf0f615ea6bf46bfbdfcf63ea8f2fd45b3f3286b77", + "0xa02fe8f41159bb39d2b704c633c3d6389cf4bfcb61a2539a9155f60786cf815f", + "0xa4b94e0afdffcb0af599677709dac067d3145489ea7aede57672bee43e3b7373", + "0xaf4e64edd3234c1205b725e42963becd1085f013590bd7ed93f8d711c5eb65fb", + "0xb69a18fa855b742031420081999086f6fb56c3930ae8840944e8b8ae9931c51e", + "0xc246c217bc73ce6666c93a93a94faa5250564f50a3fdc27ea74c231c07fe2ca6", + "0xcd6e4ab2c3034df2a8a1dfaaeb1c4baecd162a93d22de35e854ee2945cbe0c35", + "0xe24b692d09d6fc2f3d1a6028c400a27c37d7cbb11511907c013946d6ce263d3b", + "0xe440c5f0e8603fd1ed25976eee261ccee8038cf79d6a4c0eb31b2bf883be737f", + "0xe6eacbc509203d21ac814b350e72934fde686b7f673c19be8cf956b0c70078ce", + "0xe8530de4371467b5be7ea0e69e675ab36832c426d6c1ce9513817c0f0ae1486b", + "0xe85d487abbbc83bf3423cf9731360cf4f5a37220e18e5add54e72ee20861196a", + "0xf195ea389a5eea28db0be93660014275b158963dec44af1dfa7d4743019a9a49", + } { + hcBytecodes = append(hcBytecodes, common.HexToHash(s)) + } + + for i, tc := range []byteCodesTest{ + // A few stateroots + { + nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(999)}, + expHashes: 0, + }, + { + nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(0)}, + expHashes: 0, + }, + // Empties + { + nBytes: 10000, hashes: []common.Hash{emptyRoot}, + expHashes: 0, + }, + { + nBytes: 10000, hashes: []common.Hash{emptyCode}, + expHashes: 1, + }, + { + nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode}, + expHashes: 3, + }, + // The existing bytecodes + { + nBytes: 10000, hashes: hcBytecodes, + expHashes: len(hcBytecodes), + }, + // The existing, with limited byte arg + { + nBytes: 1, hashes: hcBytecodes, + expHashes: 1, + }, + { + nBytes: 0, hashes: hcBytecodes, + expHashes: 1, + }, + { + nBytes: 1000, hashes: []common.Hash{hcBytecodes[0], hcBytecodes[0], hcBytecodes[0], hcBytecodes[0]}, + expHashes: 4, + }, + } { + if err := s.snapGetByteCodes(t, &tc); err != nil { + t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err) + } + } +} + +type trieNodesTest struct { + root common.Hash + paths []snap.TrieNodePathSet + nBytes uint64 + + expHashes []common.Hash + expReject bool +} + +func decodeNibbles(nibbles []byte, bytes []byte) { + for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 { + bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1] + } +} + +// hasTerm returns whether a hex key has the terminator flag. +func hasTerm(s []byte) bool { + return len(s) > 0 && s[len(s)-1] == 16 +} + +func keybytesToHex(str []byte) []byte { + l := len(str)*2 + 1 + var nibbles = make([]byte, l) + for i, b := range str { + nibbles[i*2] = b / 16 + nibbles[i*2+1] = b % 16 + } + nibbles[l-1] = 16 + return nibbles +} + +func hexToCompact(hex []byte) []byte { + terminator := byte(0) + if hasTerm(hex) { + terminator = 1 + hex = hex[:len(hex)-1] + } + buf := make([]byte, len(hex)/2+1) + buf[0] = terminator << 5 // the flag byte + if len(hex)&1 == 1 { + buf[0] |= 1 << 4 // odd flag + buf[0] |= hex[0] // first nibble is contained in the first byte + hex = hex[1:] + } + decodeNibbles(hex, buf[1:]) + return buf +} + +// TestSnapTrieNodes various forms of GetTrieNodes requests. +func (s *Suite) TestSnapTrieNodes(t *utesting.T) { + + key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a") + // helper function to iterate the key, and generate the compact-encoded + // trie paths along the way. + pathTo := func(length int) snap.TrieNodePathSet { + hex := keybytesToHex(key)[:length] + hex[len(hex)-1] = 0 // remove term flag + hKey := hexToCompact(hex) + return snap.TrieNodePathSet{hKey} + } + var accPaths []snap.TrieNodePathSet + for i := 1; i <= 65; i++ { + accPaths = append(accPaths, pathTo(i)) + } + empty := emptyCode + for i, tc := range []trieNodesTest{ + { + root: s.chain.RootAt(999), + paths: nil, + nBytes: 500, + expHashes: nil, + }, + { + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{}, // zero-length pathset should 'abort' and kick us off + snap.TrieNodePathSet{[]byte{0}}, + }, + nBytes: 5000, + expHashes: []common.Hash{}, + expReject: true, + }, + { + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{[]byte{0}}, + snap.TrieNodePathSet{[]byte{1}, []byte{0}}, + }, + nBytes: 5000, + //0x6b3724a41b8c38b46d4d02fba2bb2074c47a507eb16a9a4b978f91d32e406faf + expHashes: []common.Hash{s.chain.RootAt(999)}, + }, + { // nonsensically long path + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8}}, + }, + nBytes: 5000, + expHashes: []common.Hash{common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")}, + }, + { + root: s.chain.RootAt(0), + paths: []snap.TrieNodePathSet{ + snap.TrieNodePathSet{[]byte{0}}, + snap.TrieNodePathSet{[]byte{1}, []byte{0}}, + }, + nBytes: 5000, + expHashes: []common.Hash{}, + }, + { + // The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures. + root: s.chain.RootAt(999), + paths: accPaths, + nBytes: 5000, + expHashes: []common.Hash{ + common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"), + common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"), + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, + empty, empty, empty}, + }, + { + // Basically the same as above, with different ordering + root: s.chain.RootAt(999), + paths: []snap.TrieNodePathSet{ + accPaths[10], accPaths[1], accPaths[0], + }, + nBytes: 5000, + expHashes: []common.Hash{ + empty, + common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"), + common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"), + }, + }, + } { + if err := s.snapGetTrieNodes(t, &tc); err != nil { + t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err) + } + } +} + +func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetAccountRange{ + ID: uint64(rand.Int63()), + Root: tc.root, + Origin: tc.origin, + Limit: tc.limit, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + return fmt.Errorf("account range request failed: %v", err) + } + var res *snap.AccountRangePacket + if r, ok := resp.(*AccountRange); !ok { + return fmt.Errorf("account range response wrong: %T %v", resp, resp) + } else { + res = (*snap.AccountRangePacket)(r) + } + if exp, got := tc.expAccounts, len(res.Accounts); exp != got { + return fmt.Errorf("expected %d accounts, got %d", exp, got) + } + // Check that the encoding order is correct + for i := 1; i < len(res.Accounts); i++ { + if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 { + return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:]) + } + } + var ( + hashes []common.Hash + accounts [][]byte + proof = res.Proof + ) + hashes, accounts, err = res.Unpack() + if err != nil { + return err + } + if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 { + return nil + } + if len(hashes) > 0 { + if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got { + return fmt.Errorf("expected first account 0x%x, got 0x%x", exp, got) + } + if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got { + return fmt.Errorf("expected last account 0x%x, got 0x%x", exp, got) + } + } + // Reconstruct a partial trie from the response and verify it + keys := make([][]byte, len(hashes)) + for i, key := range hashes { + keys[i] = common.CopyBytes(key[:]) + } + nodes := make(light.NodeList, len(proof)) + for i, node := range proof { + nodes[i] = node + } + proofdb := nodes.NodeSet() + + var end []byte + if len(keys) > 0 { + end = keys[len(keys)-1] + } + _, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb) + return err +} + +func (s *Suite) snapGetStorageRanges(t *utesting.T, tc *stRangesTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetStorageRanges{ + ID: uint64(rand.Int63()), + Root: tc.root, + Accounts: tc.accounts, + Origin: tc.origin, + Limit: tc.limit, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + return fmt.Errorf("account range request failed: %v", err) + } + var res *snap.StorageRangesPacket + if r, ok := resp.(*StorageRanges); !ok { + return fmt.Errorf("account range response wrong: %T %v", resp, resp) + } else { + res = (*snap.StorageRangesPacket)(r) + } + gotSlots := 0 + // Ensure the ranges are monotonically increasing + for i, slots := range res.Slots { + gotSlots += len(slots) + for j := 1; j < len(slots); j++ { + if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 { + return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:]) + } + } + } + if exp, got := tc.expSlots, gotSlots; exp != got { + return fmt.Errorf("expected %d slots, got %d", exp, got) + } + return nil +} + +func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetByteCodes{ + ID: uint64(rand.Int63()), + Hashes: tc.hashes, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + return fmt.Errorf("getBytecodes request failed: %v", err) + } + var res *snap.ByteCodesPacket + if r, ok := resp.(*ByteCodes); !ok { + return fmt.Errorf("bytecodes response wrong: %T %v", resp, resp) + } else { + res = (*snap.ByteCodesPacket)(r) + } + if exp, got := tc.expHashes, len(res.Codes); exp != got { + for i, c := range res.Codes { + fmt.Printf("%d. %#x\n", i, c) + } + return fmt.Errorf("expected %d bytecodes, got %d", exp, got) + } + // Cross reference the requested bytecodes with the response to find gaps + // that the serving node is missing + var ( + bytecodes = res.Codes + hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash = make([]byte, 32) + codes = make([][]byte, len(req.Hashes)) + ) + + for i, j := 0, 0; i < len(bytecodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + + for j < len(req.Hashes) && !bytes.Equal(hash, req.Hashes[j][:]) { + j++ + } + if j < len(req.Hashes) { + codes[j] = bytecodes[i] + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + return errors.New("unexpected bytecode") + } + + return nil +} + +func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error { + conn, err := s.dialSnap() + if err != nil { + t.Fatalf("dial failed: %v", err) + } + defer conn.Close() + if err = conn.peer(s.chain, nil); err != nil { + t.Fatalf("peering failed: %v", err) + } + // write request + req := &GetTrieNodes{ + ID: uint64(rand.Int63()), + Root: tc.root, + Paths: tc.paths, + Bytes: tc.nBytes, + } + resp, err := conn.snapRequest(req, req.ID, s.chain) + if err != nil { + if tc.expReject { + return nil + } + return fmt.Errorf("trienodes request failed: %v", err) + } + var res *snap.TrieNodesPacket + if r, ok := resp.(*TrieNodes); !ok { + return fmt.Errorf("trienodes response wrong: %T %v", resp, resp) + } else { + res = (*snap.TrieNodesPacket)(r) + } + + // Check the correctness + + // Cross reference the requested trienodes with the response to find gaps + // that the serving node is missing + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash := make([]byte, 32) + trienodes := res.Nodes + if got, want := len(trienodes), len(tc.expHashes); got != want { + return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want) + } + for i, trienode := range trienodes { + hasher.Reset() + hasher.Write(trienode) + hasher.Read(hash) + if got, want := hash, tc.expHashes[i]; !bytes.Equal(got, want[:]) { + fmt.Printf("hash %d wrong, got %#x, want %#x\n", i, got, want) + err = fmt.Errorf("hash %d wrong, got %#x, want %#x", i, got, want) + } + } + return err +} diff --git a/cmd/devp2p/internal/ethtest/snapTypes.go b/cmd/devp2p/internal/ethtest/snapTypes.go new file mode 100644 index 000000000..290d06159 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/snapTypes.go @@ -0,0 +1,36 @@ +package ethtest + +import "github.com/scroll-tech/go-ethereum/eth/protocols/snap" + +// GetAccountRange represents an account range query. +type GetAccountRange snap.GetAccountRangePacket + +func (g GetAccountRange) Code() int { return 33 } + +type AccountRange snap.AccountRangePacket + +func (g AccountRange) Code() int { return 34 } + +type GetStorageRanges snap.GetStorageRangesPacket + +func (g GetStorageRanges) Code() int { return 35 } + +type StorageRanges snap.StorageRangesPacket + +func (g StorageRanges) Code() int { return 36 } + +type GetByteCodes snap.GetByteCodesPacket + +func (g GetByteCodes) Code() int { return 37 } + +type ByteCodes snap.ByteCodesPacket + +func (g ByteCodes) Code() int { return 38 } + +type GetTrieNodes snap.GetTrieNodesPacket + +func (g GetTrieNodes) Code() int { return 39 } + +type TrieNodes snap.TrieNodesPacket + +func (g TrieNodes) Code() int { return 40 } diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index 0e555119e..8049a3339 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -125,6 +125,16 @@ func (s *Suite) Eth66Tests() []utesting.Test { } } +func (s *Suite) SnapTests() []utesting.Test { + return []utesting.Test{ + {Name: "TestSnapStatus", Fn: s.TestSnapStatus}, + {Name: "TestSnapAccountRange", Fn: s.TestSnapGetAccountRange}, + {Name: "TestSnapGetByteCodes", Fn: s.TestSnapGetByteCodes}, + {Name: "TestSnapGetTrieNodes", Fn: s.TestSnapTrieNodes}, + {Name: "TestSnapGetStorageRanges", Fn: s.TestSnapGetStorageRanges}, + } +} + var ( eth66 = true // indicates whether suite should negotiate eth66 connection eth65 = false // indicates whether suite should negotiate eth65 connection or below. diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go index 42ebb3c69..09c2f527b 100644 --- a/cmd/devp2p/internal/ethtest/suite_test.go +++ b/cmd/devp2p/internal/ethtest/suite_test.go @@ -55,6 +55,27 @@ func TestEthSuite(t *testing.T) { } } +func TestSnapSuite(t *testing.T) { + geth, err := runGeth() + if err != nil { + t.Fatalf("could not run geth: %v", err) + } + defer geth.Close() + + suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile) + if err != nil { + t.Fatalf("could not create new test suite: %v", err) + } + for _, test := range suite.SnapTests() { + t.Run(test.Name, func(t *testing.T) { + result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout) + if result[0].Failed { + t.Fatal() + } + }) + } +} + // runGeth creates and starts a geth node func runGeth() (*node.Node, error) { stack, err := node.New(&node.Config{ diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index 4aaa06b84..1921449eb 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -19,6 +19,7 @@ package ethtest import ( "crypto/ecdsa" "fmt" + "time" "github.com/scroll-tech/go-ethereum/eth/protocols/eth" "github.com/scroll-tech/go-ethereum/p2p" @@ -126,10 +127,12 @@ func (pt PooledTransactions) Code() int { return 26 } // Conn represents an individual connection with a peer type Conn struct { *rlpx.Conn - ourKey *ecdsa.PrivateKey - negotiatedProtoVersion uint - ourHighestProtoVersion uint - caps []p2p.Cap + ourKey *ecdsa.PrivateKey + negotiatedProtoVersion uint + negotiatedSnapProtoVersion uint + ourHighestProtoVersion uint + ourHighestSnapProtoVersion uint + caps []p2p.Cap } // Read reads an eth packet from the connection. @@ -259,12 +262,7 @@ func (c *Conn) Read66() (uint64, Message) { // Write writes a eth packet to the connection. func (c *Conn) Write(msg Message) error { - // check if message is eth protocol message - var ( - payload []byte - err error - ) - payload, err = rlp.EncodeToBytes(msg) + payload, err := rlp.EncodeToBytes(msg) if err != nil { return err } @@ -281,3 +279,43 @@ func (c *Conn) Write66(req eth.Packet, code int) error { _, err = c.Conn.Write(uint64(code), payload) return err } + +// ReadSnap reads a snap/1 response with the given id from the connection. +func (c *Conn) ReadSnap(id uint64) (Message, error) { + respId := id + 1 + start := time.Now() + for respId != id && time.Since(start) < timeout { + code, rawData, _, err := c.Conn.Read() + if err != nil { + return nil, fmt.Errorf("could not read from connection: %v", err) + } + var snpMsg interface{} + switch int(code) { + case (GetAccountRange{}).Code(): + snpMsg = new(GetAccountRange) + case (AccountRange{}).Code(): + snpMsg = new(AccountRange) + case (GetStorageRanges{}).Code(): + snpMsg = new(GetStorageRanges) + case (StorageRanges{}).Code(): + snpMsg = new(StorageRanges) + case (GetByteCodes{}).Code(): + snpMsg = new(GetByteCodes) + case (ByteCodes{}).Code(): + snpMsg = new(ByteCodes) + case (GetTrieNodes{}).Code(): + snpMsg = new(GetTrieNodes) + case (TrieNodes{}).Code(): + snpMsg = new(TrieNodes) + default: + //return nil, fmt.Errorf("invalid message code: %d", code) + continue + } + if err := rlp.DecodeBytes(rawData, snpMsg); err != nil { + return nil, fmt.Errorf("could not rlp decode message: %v", err) + } + return snpMsg.(Message), nil + + } + return nil, fmt.Errorf("request timed out") +} diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go index 210639c55..0b5e02bd9 100644 --- a/cmd/devp2p/rlpxcmd.go +++ b/cmd/devp2p/rlpxcmd.go @@ -37,6 +37,7 @@ var ( Subcommands: []cli.Command{ rlpxPingCommand, rlpxEthTestCommand, + rlpxSnapTestCommand, }, } rlpxPingCommand = cli.Command{ @@ -54,6 +55,16 @@ var ( testTAPFlag, }, } + rlpxSnapTestCommand = cli.Command{ + Name: "snap-test", + Usage: "Runs tests against a node", + ArgsUsage: " ", + Action: rlpxSnapTest, + Flags: []cli.Flag{ + testPatternFlag, + testTAPFlag, + }, + } ) func rlpxPing(ctx *cli.Context) error { @@ -107,3 +118,15 @@ func rlpxEthTest(ctx *cli.Context) error { } return runTests(ctx, suite.AllEthTests()) } + +// rlpxSnapTest runs the snap protocol test suite. +func rlpxSnapTest(ctx *cli.Context) error { + if ctx.NArg() < 3 { + exit("missing path to chain.rlp as command-line argument") + } + suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2]) + if err != nil { + exit(err) + } + return runTests(ctx, suite.SnapTests()) +} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 56bfa600b..562b36b4c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -220,7 +220,7 @@ var ( defaultSyncMode = ethconfig.Defaults.SyncMode SyncModeFlag = TextMarshalerFlag{ Name: "syncmode", - Usage: `Blockchain sync mode ("fast", "full", "snap" or "light")`, + Usage: `Blockchain sync mode ("snap", "full" or "light")`, Value: &defaultSyncMode, } GCModeFlag = cli.StringFlag{ diff --git a/core/blockchain.go b/core/blockchain.go index 7fc8711ee..36a5611a6 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -645,9 +645,9 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo return rootNumber, bc.loadLastState() } -// FastSyncCommitHead sets the current head block to the one defined by the hash +// SnapSyncCommitHead sets the current head block to the one defined by the hash // irrelevant what the chain contents were prior. -func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { +func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error { // Make sure that both the block as well at its state trie exists block := bc.GetBlockByHash(hash) if block == nil { @@ -752,30 +752,24 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { // // Note, this function assumes that the `mu` mutex is held! func (bc *BlockChain) writeHeadBlock(block *types.Block) { - // If the block is on a side chain or an unknown one, force other heads onto it too - updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() - // Add the block to the canonical chain number scheme and mark as the head batch := bc.db.NewBatch() + rawdb.WriteHeadHeaderHash(batch, block.Hash()) + rawdb.WriteHeadFastBlockHash(batch, block.Hash()) rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) rawdb.WriteTxLookupEntriesByBlock(batch, block) rawdb.WriteHeadBlockHash(batch, block.Hash()) - // If the block is better than our head or is on a different chain, force update heads - if updateHeads { - rawdb.WriteHeadHeaderHash(batch, block.Hash()) - rawdb.WriteHeadFastBlockHash(batch, block.Hash()) - } // Flush the whole batch into the disk, exit the node if failed if err := batch.Write(); err != nil { log.Crit("Failed to update chain indexes and markers", "err", err) } // Update all in-memory chain markers in the last step - if updateHeads { - bc.hc.SetCurrentHeader(block.Header()) - bc.currentFastBlock.Store(block) - headFastBlockGauge.Update(int64(block.NumberU64())) - } + bc.hc.SetCurrentHeader(block.Header()) + + bc.currentFastBlock.Store(block) + headFastBlockGauge.Update(int64(block.NumberU64())) + bc.currentBlock.Store(block) headBlockGauge.Update(int64(block.NumberU64())) } @@ -2191,6 +2185,9 @@ Error: %v // of the header retrieval mechanisms already need to verify nonces, as well as // because nonces can be verified sparsely, not needing to check each. func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { + if len(chain) == 0 { + return 0, nil + } start := time.Now() if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { return i, err diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index ffa8db3f0..74dba31bd 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -119,6 +119,9 @@ func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool { if bc.blockCache.Contains(hash) { return true } + if !bc.HasHeader(hash, number) { + return false + } return rawdb.HasBody(bc.db, hash, number) } diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index 8e846cf00..5ce30a906 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -79,10 +79,10 @@ func testShortRepair(t *testing.T, snapshots bool) { // already committed, after which the process crashed. In this case we expect the full // chain to be rolled back to the committed block, but the chain data itself left in // the database for replaying. -func TestShortFastSyncedRepair(t *testing.T) { testShortFastSyncedRepair(t, false) } -func TestShortFastSyncedRepairWithSnapshots(t *testing.T) { testShortFastSyncedRepair(t, true) } +func TestShortSnapSyncedRepair(t *testing.T) { testShortSnapSyncedRepair(t, false) } +func TestShortSnapSyncedRepairWithSnapshots(t *testing.T) { testShortSnapSyncedRepair(t, true) } -func testShortFastSyncedRepair(t *testing.T, snapshots bool) { +func testShortSnapSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -119,10 +119,10 @@ func testShortFastSyncedRepair(t *testing.T, snapshots bool) { // not yet committed, but the process crashed. In this case we expect the chain to // detect that it was fast syncing and not delete anything, since we can just pick // up directly where we left off. -func TestShortFastSyncingRepair(t *testing.T) { testShortFastSyncingRepair(t, false) } -func TestShortFastSyncingRepairWithSnapshots(t *testing.T) { testShortFastSyncingRepair(t, true) } +func TestShortSnapSyncingRepair(t *testing.T) { testShortSnapSyncingRepair(t, false) } +func TestShortSnapSyncingRepairWithSnapshots(t *testing.T) { testShortSnapSyncingRepair(t, true) } -func testShortFastSyncingRepair(t *testing.T, snapshots bool) { +func testShortSnapSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -203,14 +203,14 @@ func testShortOldForkedRepair(t *testing.T, snapshots bool) { // crashed. In this test scenario the side chain is below the committed block. In // this case we expect the canonical chain to be rolled back to the committed block, // but the chain data itself left in the database for replaying. -func TestShortOldForkedFastSyncedRepair(t *testing.T) { - testShortOldForkedFastSyncedRepair(t, false) +func TestShortOldForkedSnapSyncedRepair(t *testing.T) { + testShortOldForkedSnapSyncedRepair(t, false) } -func TestShortOldForkedFastSyncedRepairWithSnapshots(t *testing.T) { - testShortOldForkedFastSyncedRepair(t, true) +func TestShortOldForkedSnapSyncedRepairWithSnapshots(t *testing.T) { + testShortOldForkedSnapSyncedRepair(t, true) } -func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) { +func testShortOldForkedSnapSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -250,14 +250,14 @@ func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) { // test scenario the side chain is below the committed block. In this case we expect // the chain to detect that it was fast syncing and not delete anything, since we // can just pick up directly where we left off. -func TestShortOldForkedFastSyncingRepair(t *testing.T) { - testShortOldForkedFastSyncingRepair(t, false) +func TestShortOldForkedSnapSyncingRepair(t *testing.T) { + testShortOldForkedSnapSyncingRepair(t, false) } -func TestShortOldForkedFastSyncingRepairWithSnapshots(t *testing.T) { - testShortOldForkedFastSyncingRepair(t, true) +func TestShortOldForkedSnapSyncingRepairWithSnapshots(t *testing.T) { + testShortOldForkedSnapSyncingRepair(t, true) } -func testShortOldForkedFastSyncingRepair(t *testing.T, snapshots bool) { +func testShortOldForkedSnapSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -340,14 +340,14 @@ func testShortNewlyForkedRepair(t *testing.T, snapshots bool) { // crashed. In this test scenario the side chain reaches above the committed block. // In this case we expect the canonical chain to be rolled back to the committed // block, but the chain data itself left in the database for replaying. -func TestShortNewlyForkedFastSyncedRepair(t *testing.T) { - testShortNewlyForkedFastSyncedRepair(t, false) +func TestShortNewlyForkedSnapSyncedRepair(t *testing.T) { + testShortNewlyForkedSnapSyncedRepair(t, false) } -func TestShortNewlyForkedFastSyncedRepairWithSnapshots(t *testing.T) { - testShortNewlyForkedFastSyncedRepair(t, true) +func TestShortNewlyForkedSnapSyncedRepairWithSnapshots(t *testing.T) { + testShortNewlyForkedSnapSyncedRepair(t, true) } -func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) { +func testShortNewlyForkedSnapSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6 @@ -387,14 +387,14 @@ func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) { // this test scenario the side chain reaches above the committed block. In this // case we expect the chain to detect that it was fast syncing and not delete // anything, since we can just pick up directly where we left off. -func TestShortNewlyForkedFastSyncingRepair(t *testing.T) { - testShortNewlyForkedFastSyncingRepair(t, false) +func TestShortNewlyForkedSnapSyncingRepair(t *testing.T) { + testShortNewlyForkedSnapSyncingRepair(t, false) } -func TestShortNewlyForkedFastSyncingRepairWithSnapshots(t *testing.T) { - testShortNewlyForkedFastSyncingRepair(t, true) +func TestShortNewlyForkedSnapSyncingRepairWithSnapshots(t *testing.T) { + testShortNewlyForkedSnapSyncingRepair(t, true) } -func testShortNewlyForkedFastSyncingRepair(t *testing.T, snapshots bool) { +func testShortNewlyForkedSnapSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6 @@ -475,14 +475,14 @@ func testShortReorgedRepair(t *testing.T, snapshots bool) { // the fast sync pivot point was already committed to disk and then the process // crashed. In this case we expect the canonical chain to be rolled back to the // committed block, but the chain data itself left in the database for replaying. -func TestShortReorgedFastSyncedRepair(t *testing.T) { - testShortReorgedFastSyncedRepair(t, false) +func TestShortReorgedSnapSyncedRepair(t *testing.T) { + testShortReorgedSnapSyncedRepair(t, false) } -func TestShortReorgedFastSyncedRepairWithSnapshots(t *testing.T) { - testShortReorgedFastSyncedRepair(t, true) +func TestShortReorgedSnapSyncedRepairWithSnapshots(t *testing.T) { + testShortReorgedSnapSyncedRepair(t, true) } -func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) { +func testShortReorgedSnapSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -521,14 +521,14 @@ func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) { // the fast sync pivot point was not yet committed, but the process crashed. In // this case we expect the chain to detect that it was fast syncing and not delete // anything, since we can just pick up directly where we left off. -func TestShortReorgedFastSyncingRepair(t *testing.T) { - testShortReorgedFastSyncingRepair(t, false) +func TestShortReorgedSnapSyncingRepair(t *testing.T) { + testShortReorgedSnapSyncingRepair(t, false) } -func TestShortReorgedFastSyncingRepairWithSnapshots(t *testing.T) { - testShortReorgedFastSyncingRepair(t, true) +func TestShortReorgedSnapSyncingRepairWithSnapshots(t *testing.T) { + testShortReorgedSnapSyncingRepair(t, true) } -func testShortReorgedFastSyncingRepair(t *testing.T, snapshots bool) { +func testShortReorgedSnapSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -656,14 +656,14 @@ func testLongDeepRepair(t *testing.T, snapshots bool) { // sync pivot point - newer than the ancient limit - was already committed, after // which the process crashed. In this case we expect the chain to be rolled back // to the committed block, with everything afterwads kept as fast sync data. -func TestLongFastSyncedShallowRepair(t *testing.T) { - testLongFastSyncedShallowRepair(t, false) +func TestLongSnapSyncedShallowRepair(t *testing.T) { + testLongSnapSyncedShallowRepair(t, false) } -func TestLongFastSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongFastSyncedShallowRepair(t, true) +func TestLongSnapSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongSnapSyncedShallowRepair(t, true) } -func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) { +func testLongSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -705,10 +705,10 @@ func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) { // sync pivot point - older than the ancient limit - was already committed, after // which the process crashed. In this case we expect the chain to be rolled back // to the committed block, with everything afterwads deleted. -func TestLongFastSyncedDeepRepair(t *testing.T) { testLongFastSyncedDeepRepair(t, false) } -func TestLongFastSyncedDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncedDeepRepair(t, true) } +func TestLongSnapSyncedDeepRepair(t *testing.T) { testLongSnapSyncedDeepRepair(t, false) } +func TestLongSnapSyncedDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncedDeepRepair(t, true) } -func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) { +func testLongSnapSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -750,14 +750,14 @@ func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) { // process crashed. In this case we expect the chain to detect that it was fast // syncing and not delete anything, since we can just pick up directly where we // left off. -func TestLongFastSyncingShallowRepair(t *testing.T) { - testLongFastSyncingShallowRepair(t, false) +func TestLongSnapSyncingShallowRepair(t *testing.T) { + testLongSnapSyncingShallowRepair(t, false) } -func TestLongFastSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongFastSyncingShallowRepair(t, true) +func TestLongSnapSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongSnapSyncingShallowRepair(t, true) } -func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) { +func testLongSnapSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -800,10 +800,10 @@ func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) { // process crashed. In this case we expect the chain to detect that it was fast // syncing and not delete anything, since we can just pick up directly where we // left off. -func TestLongFastSyncingDeepRepair(t *testing.T) { testLongFastSyncingDeepRepair(t, false) } -func TestLongFastSyncingDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncingDeepRepair(t, true) } +func TestLongSnapSyncingDeepRepair(t *testing.T) { testLongSnapSyncingDeepRepair(t, false) } +func TestLongSnapSyncingDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncingDeepRepair(t, true) } -func testLongFastSyncingDeepRepair(t *testing.T, snapshots bool) { +func testLongSnapSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -946,14 +946,14 @@ func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) { // the side chain is below the committed block. In this case we expect the chain // to be rolled back to the committed block, with everything afterwads kept as // fast sync data; the side chain completely nuked by the freezer. -func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) { - testLongOldForkedFastSyncedShallowRepair(t, false) +func TestLongOldForkedSnapSyncedShallowRepair(t *testing.T) { + testLongOldForkedSnapSyncedShallowRepair(t, false) } -func TestLongOldForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncedShallowRepair(t, true) +func TestLongOldForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncedShallowRepair(t, true) } -func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -998,14 +998,14 @@ func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) { // the side chain is below the committed block. In this case we expect the canonical // chain to be rolled back to the committed block, with everything afterwads deleted; // the side chain completely nuked by the freezer. -func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) { - testLongOldForkedFastSyncedDeepRepair(t, false) +func TestLongOldForkedSnapSyncedDeepRepair(t *testing.T) { + testLongOldForkedSnapSyncedDeepRepair(t, false) } -func TestLongOldForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncedDeepRepair(t, true) +func TestLongOldForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncedDeepRepair(t, true) } -func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1049,14 +1049,14 @@ func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) { // chain is below the committed block. In this case we expect the chain to detect // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. -func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) { - testLongOldForkedFastSyncingShallowRepair(t, false) +func TestLongOldForkedSnapSyncingShallowRepair(t *testing.T) { + testLongOldForkedSnapSyncingShallowRepair(t, false) } -func TestLongOldForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncingShallowRepair(t, true) +func TestLongOldForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncingShallowRepair(t, true) } -func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -1101,14 +1101,14 @@ func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) { // chain is below the committed block. In this case we expect the chain to detect // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. -func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) { - testLongOldForkedFastSyncingDeepRepair(t, false) +func TestLongOldForkedSnapSyncingDeepRepair(t *testing.T) { + testLongOldForkedSnapSyncingDeepRepair(t, false) } -func TestLongOldForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncingDeepRepair(t, true) +func TestLongOldForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncingDeepRepair(t, true) } -func testLongOldForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1252,14 +1252,14 @@ func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) { // the side chain is above the committed block. In this case we expect the chain // to be rolled back to the committed block, with everything afterwads kept as fast // sync data; the side chain completely nuked by the freezer. -func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) { - testLongNewerForkedFastSyncedShallowRepair(t, false) +func TestLongNewerForkedSnapSyncedShallowRepair(t *testing.T) { + testLongNewerForkedSnapSyncedShallowRepair(t, false) } -func TestLongNewerForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncedShallowRepair(t, true) +func TestLongNewerForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncedShallowRepair(t, true) } -func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1304,14 +1304,14 @@ func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) { // the side chain is above the committed block. In this case we expect the canonical // chain to be rolled back to the committed block, with everything afterwads deleted; // the side chain completely nuked by the freezer. -func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) { - testLongNewerForkedFastSyncedDeepRepair(t, false) +func TestLongNewerForkedSnapSyncedDeepRepair(t *testing.T) { + testLongNewerForkedSnapSyncedDeepRepair(t, false) } -func TestLongNewerForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncedDeepRepair(t, true) +func TestLongNewerForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncedDeepRepair(t, true) } -func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1355,14 +1355,14 @@ func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) { // chain is above the committed block. In this case we expect the chain to detect // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. -func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) { - testLongNewerForkedFastSyncingShallowRepair(t, false) +func TestLongNewerForkedSnapSyncingShallowRepair(t *testing.T) { + testLongNewerForkedSnapSyncingShallowRepair(t, false) } -func TestLongNewerForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncingShallowRepair(t, true) +func TestLongNewerForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncingShallowRepair(t, true) } -func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1407,14 +1407,14 @@ func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) { // chain is above the committed block. In this case we expect the chain to detect // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. -func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) { - testLongNewerForkedFastSyncingDeepRepair(t, false) +func TestLongNewerForkedSnapSyncingDeepRepair(t *testing.T) { + testLongNewerForkedSnapSyncingDeepRepair(t, false) } -func TestLongNewerForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncingDeepRepair(t, true) +func TestLongNewerForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncingDeepRepair(t, true) } -func testLongNewerForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1552,14 +1552,14 @@ func testLongReorgedDeepRepair(t *testing.T, snapshots bool) { // expect the chain to be rolled back to the committed block, with everything // afterwads kept as fast sync data. The side chain completely nuked by the // freezer. -func TestLongReorgedFastSyncedShallowRepair(t *testing.T) { - testLongReorgedFastSyncedShallowRepair(t, false) +func TestLongReorgedSnapSyncedShallowRepair(t *testing.T) { + testLongReorgedSnapSyncedShallowRepair(t, false) } -func TestLongReorgedFastSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongReorgedFastSyncedShallowRepair(t, true) +func TestLongReorgedSnapSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncedShallowRepair(t, true) } -func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1603,14 +1603,14 @@ func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) { // was already committed to disk and then the process crashed. In this case we // expect the canonical chains to be rolled back to the committed block, with // everything afterwads deleted. The side chain completely nuked by the freezer. -func TestLongReorgedFastSyncedDeepRepair(t *testing.T) { - testLongReorgedFastSyncedDeepRepair(t, false) +func TestLongReorgedSnapSyncedDeepRepair(t *testing.T) { + testLongReorgedSnapSyncedDeepRepair(t, false) } -func TestLongReorgedFastSyncedDeepRepairWithSnapshots(t *testing.T) { - testLongReorgedFastSyncedDeepRepair(t, true) +func TestLongReorgedSnapSyncedDeepRepairWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncedDeepRepair(t, true) } -func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1653,14 +1653,14 @@ func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) { // was not yet committed, but the process crashed. In this case we expect the // chain to detect that it was fast syncing and not delete anything, since we // can just pick up directly where we left off. -func TestLongReorgedFastSyncingShallowRepair(t *testing.T) { - testLongReorgedFastSyncingShallowRepair(t, false) +func TestLongReorgedSnapSyncingShallowRepair(t *testing.T) { + testLongReorgedSnapSyncingShallowRepair(t, false) } -func TestLongReorgedFastSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongReorgedFastSyncingShallowRepair(t, true) +func TestLongReorgedSnapSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncingShallowRepair(t, true) } -func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1704,14 +1704,14 @@ func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) { // was not yet committed, but the process crashed. In this case we expect the // chain to detect that it was fast syncing and not delete anything, since we // can just pick up directly where we left off. -func TestLongReorgedFastSyncingDeepRepair(t *testing.T) { - testLongReorgedFastSyncingDeepRepair(t, false) +func TestLongReorgedSnapSyncingDeepRepair(t *testing.T) { + testLongReorgedSnapSyncingDeepRepair(t, false) } -func TestLongReorgedFastSyncingDeepRepairWithSnapshots(t *testing.T) { - testLongReorgedFastSyncingDeepRepair(t, true) +func TestLongReorgedSnapSyncingDeepRepairWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncingDeepRepair(t, true) } -func testLongReorgedFastSyncingDeepRepair(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index 986ddd7c3..df09a6e02 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -194,10 +194,10 @@ func testShortSetHead(t *testing.T, snapshots bool) { // Everything above the sethead point should be deleted. In between the committed // block and the requested head the data can remain as "fast sync" data to avoid // redownloading it. -func TestShortFastSyncedSetHead(t *testing.T) { testShortFastSyncedSetHead(t, false) } -func TestShortFastSyncedSetHeadWithSnapshots(t *testing.T) { testShortFastSyncedSetHead(t, true) } +func TestShortSnapSyncedSetHead(t *testing.T) { testShortSnapSyncedSetHead(t, false) } +func TestShortSnapSyncedSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncedSetHead(t, true) } -func testShortFastSyncedSetHead(t *testing.T, snapshots bool) { +func testShortSnapSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -236,10 +236,10 @@ func testShortFastSyncedSetHead(t *testing.T, snapshots bool) { // detect that it was fast syncing and delete everything from the new head, since // we can just pick up fast syncing from there. The head full block should be set // to the genesis. -func TestShortFastSyncingSetHead(t *testing.T) { testShortFastSyncingSetHead(t, false) } -func TestShortFastSyncingSetHeadWithSnapshots(t *testing.T) { testShortFastSyncingSetHead(t, true) } +func TestShortSnapSyncingSetHead(t *testing.T) { testShortSnapSyncingSetHead(t, false) } +func TestShortSnapSyncingSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncingSetHead(t, true) } -func testShortFastSyncingSetHead(t *testing.T, snapshots bool) { +func testShortSnapSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -326,14 +326,14 @@ func testShortOldForkedSetHead(t *testing.T, snapshots bool) { // block. Everything above the sethead point should be deleted. In between the // committed block and the requested head the data can remain as "fast sync" data // to avoid redownloading it. The side chain should be left alone as it was shorter. -func TestShortOldForkedFastSyncedSetHead(t *testing.T) { - testShortOldForkedFastSyncedSetHead(t, false) +func TestShortOldForkedSnapSyncedSetHead(t *testing.T) { + testShortOldForkedSnapSyncedSetHead(t, false) } -func TestShortOldForkedFastSyncedSetHeadWithSnapshots(t *testing.T) { - testShortOldForkedFastSyncedSetHead(t, true) +func TestShortOldForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) { + testShortOldForkedSnapSyncedSetHead(t, true) } -func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) { +func testShortOldForkedSnapSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -375,14 +375,14 @@ func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) { // the chain to detect that it was fast syncing and delete everything from the new // head, since we can just pick up fast syncing from there. The head full block // should be set to the genesis. -func TestShortOldForkedFastSyncingSetHead(t *testing.T) { - testShortOldForkedFastSyncingSetHead(t, false) +func TestShortOldForkedSnapSyncingSetHead(t *testing.T) { + testShortOldForkedSnapSyncingSetHead(t, false) } -func TestShortOldForkedFastSyncingSetHeadWithSnapshots(t *testing.T) { - testShortOldForkedFastSyncingSetHead(t, true) +func TestShortOldForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) { + testShortOldForkedSnapSyncingSetHead(t, true) } -func testShortOldForkedFastSyncingSetHead(t *testing.T, snapshots bool) { +func testShortOldForkedSnapSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -478,14 +478,14 @@ func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) { // The side chain could be left to be if the fork point was before the new head // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) { - testShortNewlyForkedFastSyncedSetHead(t, false) +func TestShortNewlyForkedSnapSyncedSetHead(t *testing.T) { + testShortNewlyForkedSnapSyncedSetHead(t, false) } -func TestShortNewlyForkedFastSyncedSetHeadWithSnapshots(t *testing.T) { - testShortNewlyForkedFastSyncedSetHead(t, true) +func TestShortNewlyForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) { + testShortNewlyForkedSnapSyncedSetHead(t, true) } -func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) { +func testShortNewlyForkedSnapSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8 @@ -531,14 +531,14 @@ func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) { // The side chain could be left to be if the fork point was before the new head // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) { - testShortNewlyForkedFastSyncingSetHead(t, false) +func TestShortNewlyForkedSnapSyncingSetHead(t *testing.T) { + testShortNewlyForkedSnapSyncingSetHead(t, false) } -func TestShortNewlyForkedFastSyncingSetHeadWithSnapshots(t *testing.T) { - testShortNewlyForkedFastSyncingSetHead(t, true) +func TestShortNewlyForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) { + testShortNewlyForkedSnapSyncingSetHead(t, true) } -func testShortNewlyForkedFastSyncingSetHead(t *testing.T, snapshots bool) { +func testShortNewlyForkedSnapSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8 @@ -634,14 +634,14 @@ func testShortReorgedSetHead(t *testing.T, snapshots bool) { // The side chain could be left to be if the fork point was before the new head // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortReorgedFastSyncedSetHead(t *testing.T) { - testShortReorgedFastSyncedSetHead(t, false) +func TestShortReorgedSnapSyncedSetHead(t *testing.T) { + testShortReorgedSnapSyncedSetHead(t, false) } -func TestShortReorgedFastSyncedSetHeadWithSnapshots(t *testing.T) { - testShortReorgedFastSyncedSetHead(t, true) +func TestShortReorgedSnapSyncedSetHeadWithSnapshots(t *testing.T) { + testShortReorgedSnapSyncedSetHead(t, true) } -func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) { +func testShortReorgedSnapSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -686,14 +686,14 @@ func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) { // The side chain could be left to be if the fork point was before the new head // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortReorgedFastSyncingSetHead(t *testing.T) { - testShortReorgedFastSyncingSetHead(t, false) +func TestShortReorgedSnapSyncingSetHead(t *testing.T) { + testShortReorgedSnapSyncingSetHead(t, false) } -func TestShortReorgedFastSyncingSetHeadWithSnapshots(t *testing.T) { - testShortReorgedFastSyncingSetHead(t, true) +func TestShortReorgedSnapSyncingSetHeadWithSnapshots(t *testing.T) { + testShortReorgedSnapSyncingSetHead(t, true) } -func testShortReorgedFastSyncingSetHead(t *testing.T, snapshots bool) { +func testShortReorgedSnapSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -829,14 +829,14 @@ func testLongDeepSetHead(t *testing.T, snapshots bool) { // back to the committed block. Everything above the sethead point should be // deleted. In between the committed block and the requested head the data can // remain as "fast sync" data to avoid redownloading it. -func TestLongFastSyncedShallowSetHead(t *testing.T) { - testLongFastSyncedShallowSetHead(t, false) +func TestLongSnapSyncedShallowSetHead(t *testing.T) { + testLongSnapSyncedShallowSetHead(t, false) } -func TestLongFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongFastSyncedShallowSetHead(t, true) +func TestLongSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongSnapSyncedShallowSetHead(t, true) } -func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) { +func testLongSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -880,10 +880,10 @@ func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // which sethead was called. In this case we expect the full chain to be rolled // back to the committed block. Since the ancient limit was underflown, everything // needs to be deleted onwards to avoid creating a gap. -func TestLongFastSyncedDeepSetHead(t *testing.T) { testLongFastSyncedDeepSetHead(t, false) } -func TestLongFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongFastSyncedDeepSetHead(t, true) } +func TestLongSnapSyncedDeepSetHead(t *testing.T) { testLongSnapSyncedDeepSetHead(t, false) } +func TestLongSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongSnapSyncedDeepSetHead(t, true) } -func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) { +func testLongSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -926,14 +926,14 @@ func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // sethead was called. In this case we expect the chain to detect that it was fast // syncing and delete everything from the new head, since we can just pick up fast // syncing from there. -func TestLongFastSyncingShallowSetHead(t *testing.T) { - testLongFastSyncingShallowSetHead(t, false) +func TestLongSnapSyncingShallowSetHead(t *testing.T) { + testLongSnapSyncingShallowSetHead(t, false) } -func TestLongFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongFastSyncingShallowSetHead(t, true) +func TestLongSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongSnapSyncingShallowSetHead(t, true) } -func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) { +func testLongSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -977,14 +977,14 @@ func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) { // sethead was called. In this case we expect the chain to detect that it was fast // syncing and delete everything from the new head, since we can just pick up fast // syncing from there. -func TestLongFastSyncingDeepSetHead(t *testing.T) { - testLongFastSyncingDeepSetHead(t, false) +func TestLongSnapSyncingDeepSetHead(t *testing.T) { + testLongSnapSyncingDeepSetHead(t, false) } -func TestLongFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongFastSyncingDeepSetHead(t, true) +func TestLongSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongSnapSyncingDeepSetHead(t, true) } -func testLongFastSyncingDeepSetHead(t *testing.T, snapshots bool) { +func testLongSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -1132,14 +1132,14 @@ func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) { // sethead point should be deleted. In between the committed block and the // requested head the data can remain as "fast sync" data to avoid redownloading // it. The side chain is nuked by the freezer. -func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) { - testLongOldForkedFastSyncedShallowSetHead(t, false) +func TestLongOldForkedSnapSyncedShallowSetHead(t *testing.T) { + testLongOldForkedSnapSyncedShallowSetHead(t, false) } -func TestLongOldForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncedShallowSetHead(t, true) +func TestLongOldForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncedShallowSetHead(t, true) } -func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -1186,14 +1186,14 @@ func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // full chain to be rolled back to the committed block. Since the ancient limit was // underflown, everything needs to be deleted onwards to avoid creating a gap. The // side chain is nuked by the freezer. -func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) { - testLongOldForkedFastSyncedDeepSetHead(t, false) +func TestLongOldForkedSnapSyncedDeepSetHead(t *testing.T) { + testLongOldForkedSnapSyncedDeepSetHead(t, false) } -func TestLongOldForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncedDeepSetHead(t, true) +func TestLongOldForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncedDeepSetHead(t, true) } -func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1239,14 +1239,14 @@ func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // that it was fast syncing and delete everything from the new head, since we can // just pick up fast syncing from there. The side chain is completely nuked by the // freezer. -func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) { - testLongOldForkedFastSyncingShallowSetHead(t, false) +func TestLongOldForkedSnapSyncingShallowSetHead(t *testing.T) { + testLongOldForkedSnapSyncingShallowSetHead(t, false) } -func TestLongOldForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncingShallowSetHead(t, true) +func TestLongOldForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncingShallowSetHead(t, true) } -func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -1293,14 +1293,14 @@ func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { // that it was fast syncing and delete everything from the new head, since we can // just pick up fast syncing from there. The side chain is completely nuked by the // freezer. -func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) { - testLongOldForkedFastSyncingDeepSetHead(t, false) +func TestLongOldForkedSnapSyncingDeepSetHead(t *testing.T) { + testLongOldForkedSnapSyncingDeepSetHead(t, false) } -func TestLongOldForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncingDeepSetHead(t, true) +func TestLongOldForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncingDeepSetHead(t, true) } -func testLongOldForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1446,15 +1446,15 @@ func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - newer than the ancient limit - // was already committed to disk and then sethead was called. In this test scenario // the side chain is above the committed block. In this case the freezer will delete -// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead. -func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) { - testLongNewerForkedFastSyncedShallowSetHead(t, false) +// the sidechain since it's dangling, reverting to TestLongSnapSyncedShallowSetHead. +func TestLongNewerForkedSnapSyncedShallowSetHead(t *testing.T) { + testLongNewerForkedSnapSyncedShallowSetHead(t, false) } -func TestLongNewerForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncedShallowSetHead(t, true) +func TestLongNewerForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncedShallowSetHead(t, true) } -func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1498,15 +1498,15 @@ func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - older than the ancient limit - // was already committed to disk and then sethead was called. In this test scenario // the side chain is above the committed block. In this case the freezer will delete -// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead. -func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) { - testLongNewerForkedFastSyncedDeepSetHead(t, false) +// the sidechain since it's dangling, reverting to TestLongSnapSyncedDeepSetHead. +func TestLongNewerForkedSnapSyncedDeepSetHead(t *testing.T) { + testLongNewerForkedSnapSyncedDeepSetHead(t, false) } -func TestLongNewerForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncedDeepSetHead(t, true) +func TestLongNewerForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncedDeepSetHead(t, true) } -func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1549,15 +1549,15 @@ func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - newer than the ancient limit - // was not yet committed, but sethead was called. In this test scenario the side // chain is above the committed block. In this case the freezer will delete the -// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead. -func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) { - testLongNewerForkedFastSyncingShallowSetHead(t, false) +// sidechain since it's dangling, reverting to TestLongSnapSyncinghallowSetHead. +func TestLongNewerForkedSnapSyncingShallowSetHead(t *testing.T) { + testLongNewerForkedSnapSyncingShallowSetHead(t, false) } -func TestLongNewerForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncingShallowSetHead(t, true) +func TestLongNewerForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncingShallowSetHead(t, true) } -func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1601,15 +1601,15 @@ func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) // side chain, where the fast sync pivot point - older than the ancient limit - // was not yet committed, but sethead was called. In this test scenario the side // chain is above the committed block. In this case the freezer will delete the -// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead. -func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) { - testLongNewerForkedFastSyncingDeepSetHead(t, false) +// sidechain since it's dangling, reverting to TestLongSnapSyncingDeepSetHead. +func TestLongNewerForkedSnapSyncingDeepSetHead(t *testing.T) { + testLongNewerForkedSnapSyncingDeepSetHead(t, false) } -func TestLongNewerForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncingDeepSetHead(t, true) +func TestLongNewerForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncingDeepSetHead(t, true) } -func testLongNewerForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1745,15 +1745,15 @@ func testLongReorgedDeepSetHead(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - newer than the ancient limit - // was already committed to disk and then sethead was called. In this case the // freezer will delete the sidechain since it's dangling, reverting to -// TestLongFastSyncedShallowSetHead. -func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) { - testLongReorgedFastSyncedShallowSetHead(t, false) +// TestLongSnapSyncedShallowSetHead. +func TestLongReorgedSnapSyncedShallowSetHead(t *testing.T) { + testLongReorgedSnapSyncedShallowSetHead(t, false) } -func TestLongReorgedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongReorgedFastSyncedShallowSetHead(t, true) +func TestLongReorgedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncedShallowSetHead(t, true) } -func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1797,15 +1797,15 @@ func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - older than the ancient limit - // was already committed to disk and then sethead was called. In this case the // freezer will delete the sidechain since it's dangling, reverting to -// TestLongFastSyncedDeepSetHead. -func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) { - testLongReorgedFastSyncedDeepSetHead(t, false) +// TestLongSnapSyncedDeepSetHead. +func TestLongReorgedSnapSyncedDeepSetHead(t *testing.T) { + testLongReorgedSnapSyncedDeepSetHead(t, false) } -func TestLongReorgedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { - testLongReorgedFastSyncedDeepSetHead(t, true) +func TestLongReorgedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncedDeepSetHead(t, true) } -func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1850,14 +1850,14 @@ func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // chain to detect that it was fast syncing and delete everything from the new // head, since we can just pick up fast syncing from there. The side chain is // completely nuked by the freezer. -func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) { - testLongReorgedFastSyncingShallowSetHead(t, false) +func TestLongReorgedSnapSyncingShallowSetHead(t *testing.T) { + testLongReorgedSnapSyncingShallowSetHead(t, false) } -func TestLongReorgedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongReorgedFastSyncingShallowSetHead(t, true) +func TestLongReorgedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncingShallowSetHead(t, true) } -func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1903,14 +1903,14 @@ func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { // chain to detect that it was fast syncing and delete everything from the new // head, since we can just pick up fast syncing from there. The side chain is // completely nuked by the freezer. -func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) { - testLongReorgedFastSyncingDeepSetHead(t, false) +func TestLongReorgedSnapSyncingDeepSetHead(t *testing.T) { + testLongReorgedSnapSyncingDeepSetHead(t, false) } -func TestLongReorgedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongReorgedFastSyncingDeepSetHead(t, true) +func TestLongReorgedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncingDeepSetHead(t, true) } -func testLongReorgedFastSyncingDeepSetHead(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 9c7e7ec77..651898864 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -2272,7 +2272,7 @@ func TestTransactionIndices(t *testing.T) { } } -func TestSkipStaleTxIndicesInFastSync(t *testing.T) { +func TestSkipStaleTxIndicesInSnapSync(t *testing.T) { // Configure and generate a sample block chain var ( gendb = rawdb.NewMemoryDatabase() diff --git a/core/chain_makers.go b/core/chain_makers.go index 829a176c0..d44d45243 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -160,6 +160,28 @@ func (b *BlockGen) TxNonce(addr common.Address) uint64 { // AddUncle adds an uncle header to the generated block. func (b *BlockGen) AddUncle(h *types.Header) { + // The uncle will have the same timestamp and auto-generated difficulty + h.Time = b.header.Time + + var parent *types.Header + for i := b.i - 1; i >= 0; i-- { + if b.chain[i].Hash() == h.ParentHash { + parent = b.chain[i].Header() + break + } + } + chainreader := &fakeChainReader{config: b.config} + h.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, parent) + + // The gas limit and price should be derived from the parent + h.GasLimit = parent.GasLimit + if b.config.IsLondon(h.Number) { + h.BaseFee = misc.CalcBaseFee(b.config, parent) + if !b.config.IsLondon(parent.Number) { + parentGasLimit := parent.GasLimit * params.ElasticityMultiplier + h.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit) + } + } b.uncles = append(b.uncles, h) } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index a79037e32..d126c5027 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -242,24 +242,6 @@ func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) { } } -// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow -// reporting correct numbers across restarts. -func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 { - data, _ := db.Get(fastTrieProgressKey) - if len(data) == 0 { - return 0 - } - return new(big.Int).SetBytes(data).Uint64() -} - -// WriteFastTrieProgress stores the fast sync trie process counter to support -// retrieving it across restarts. -func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) { - if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil { - log.Crit("Failed to store fast sync trie progress", "err", err) - } -} - // ReadTxIndexTail retrieves the number of oldest indexed block // whose transaction indices has been indexed. If the corresponding entry // is non-existent in database it means the indexing has been finished. diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go index a573583ec..20ec37166 100644 --- a/core/rawdb/accessors_snapshot.go +++ b/core/rawdb/accessors_snapshot.go @@ -208,11 +208,3 @@ func WriteSnapshotSyncStatus(db ethdb.KeyValueWriter, status []byte) { log.Crit("Failed to store snapshot sync status", "err", err) } } - -// DeleteSnapshotSyncStatus deletes the serialized sync status saved at the last -// shutdown -func DeleteSnapshotSyncStatus(db ethdb.KeyValueWriter) { - if err := db.Delete(snapshotSyncStatusKey); err != nil { - log.Crit("Failed to remove snapshot sync status", "err", err) - } -} diff --git a/core/rawdb/accessors_sync.go b/core/rawdb/accessors_sync.go new file mode 100644 index 000000000..7189fc65a --- /dev/null +++ b/core/rawdb/accessors_sync.go @@ -0,0 +1,80 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "bytes" + + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rlp" +) + +// ReadSkeletonSyncStatus retrieves the serialized sync status saved at shutdown. +func ReadSkeletonSyncStatus(db ethdb.KeyValueReader) []byte { + data, _ := db.Get(skeletonSyncStatusKey) + return data +} + +// WriteSkeletonSyncStatus stores the serialized sync status to save at shutdown. +func WriteSkeletonSyncStatus(db ethdb.KeyValueWriter, status []byte) { + if err := db.Put(skeletonSyncStatusKey, status); err != nil { + log.Crit("Failed to store skeleton sync status", "err", err) + } +} + +// DeleteSkeletonSyncStatus deletes the serialized sync status saved at the last +// shutdown +func DeleteSkeletonSyncStatus(db ethdb.KeyValueWriter) { + if err := db.Delete(skeletonSyncStatusKey); err != nil { + log.Crit("Failed to remove skeleton sync status", "err", err) + } +} + +// ReadSkeletonHeader retrieves a block header from the skeleton sync store, +func ReadSkeletonHeader(db ethdb.KeyValueReader, number uint64) *types.Header { + data, _ := db.Get(skeletonHeaderKey(number)) + if len(data) == 0 { + return nil + } + header := new(types.Header) + if err := rlp.Decode(bytes.NewReader(data), header); err != nil { + log.Error("Invalid skeleton header RLP", "number", number, "err", err) + return nil + } + return header +} + +// WriteSkeletonHeader stores a block header into the skeleton sync store. +func WriteSkeletonHeader(db ethdb.KeyValueWriter, header *types.Header) { + data, err := rlp.EncodeToBytes(header) + if err != nil { + log.Crit("Failed to RLP encode header", "err", err) + } + key := skeletonHeaderKey(header.Number.Uint64()) + if err := db.Put(key, data); err != nil { + log.Crit("Failed to store skeleton header", "err", err) + } +} + +// DeleteSkeletonHeader removes all block header data associated with a hash. +func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) { + if err := db.Delete(skeletonHeaderKey(number)); err != nil { + log.Crit("Failed to delete skeleton header", "err", err) + } +} diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 0b43504f0..5838bae3d 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -322,6 +322,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { storageSnaps stat preimages stat bloomBits stat + beaconHeaders stat cliqueSnaps stat // Ancient store statistics @@ -380,6 +381,8 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { bloomBits.Add(size) case bytes.HasPrefix(key, BloomBitsIndexPrefix): bloomBits.Add(size) + case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8): + beaconHeaders.Add(size) case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength: cliqueSnaps.Add(size) case bytes.HasPrefix(key, []byte("cht-")) || @@ -396,7 +399,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, - uncleanShutdownKey, badBlockKey, + uncleanShutdownKey, badBlockKey, skeletonSyncStatusKey, } { if bytes.Equal(key, meta) { metadata.Add(size) @@ -442,6 +445,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { {"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()}, {"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()}, {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()}, + {"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()}, {"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()}, {"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()}, {"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()}, diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 73dc69ea3..2e99b131d 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -63,6 +63,9 @@ var ( // snapshotSyncStatusKey tracks the snapshot sync status across restarts. snapshotSyncStatusKey = []byte("SnapshotSyncStatus") + // skeletonSyncStatusKey tracks the skeleton sync status across restarts. + skeletonSyncStatusKey = []byte("SkeletonSyncStatus") + // txIndexTailKey tracks the oldest block whose transactions have been indexed. txIndexTailKey = []byte("TransactionIndexTail") @@ -89,6 +92,7 @@ var ( SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value CodePrefix = []byte("c") // CodePrefix + code hash -> account code + skeletonHeaderPrefix = []byte("S") // skeletonHeaderPrefix + num (uint64 big endian) -> header PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage configPrefix = []byte("ethereum-config-") // config prefix for the db @@ -207,6 +211,11 @@ func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte { return key } +// skeletonHeaderKey = skeletonHeaderPrefix + num (uint64 big endian) +func skeletonHeaderKey(number uint64) []byte { + return append(skeletonHeaderPrefix, encodeBlockNumber(number)...) +} + // preimageKey = PreimagePrefix + hash func preimageKey(hash common.Hash) []byte { return append(PreimagePrefix, hash.Bytes()...) diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 893de34a8..a1ab90ed6 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -172,6 +172,18 @@ func (t *table) NewBatch() ethdb.Batch { return &tableBatch{t.db.NewBatch(), t.prefix} } +// NewBatchWithSize creates a write-only database batch with pre-allocated buffer. +func (t *table) NewBatchWithSize(size int) ethdb.Batch { + return &tableBatch{t.db.NewBatchWithSize(size), t.prefix} +} + +// NewSnapshot creates a database snapshot based on the current state. +// The created snapshot will not be affected by all following mutations +// happened on the database. +func (t *table) NewSnapshot() (ethdb.Snapshot, error) { + return t.db.NewSnapshot() +} + // tableBatch is a wrapper around a database batch that prefixes each key access // with a pre-configured string. type tableBatch struct { diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index bedee8a1f..f2d333735 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -471,7 +471,6 @@ func (dl *diffLayer) flatten() snapshot { for storageHash, data := range storage { comboData[storageHash] = data } - parent.storageData[accountHash] = comboData } // Return the combo parent return &diffLayer{ diff --git a/core/state/sync.go b/core/state/sync.go index df80eadd8..15e123d4d 100644 --- a/core/state/sync.go +++ b/core/state/sync.go @@ -27,7 +27,7 @@ import ( ) // NewStateSync create a new state trie download scheduler. -func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom, onLeaf func(paths [][]byte, leaf []byte) error) *trie.Sync { +func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(paths [][]byte, leaf []byte) error) *trie.Sync { // Register the storage slot callback if the external callback is specified. var onSlot func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error if onLeaf != nil { @@ -52,6 +52,6 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.S syncer.AddCodeEntry(common.BytesToHash(obj.KeccakCodeHash), hexpath, parent) return nil } - syncer = trie.NewSync(root, database, onAccount, bloom) + syncer = trie.NewSync(root, database, onAccount) return syncer } diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 7f2320d9f..ac6424a0d 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -25,9 +25,7 @@ import ( "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/crypto" - "github.com/scroll-tech/go-ethereum/crypto/codehash" "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/ethdb/memorydb" "github.com/scroll-tech/go-ethereum/rlp" "github.com/scroll-tech/go-ethereum/trie" ) @@ -64,7 +62,7 @@ func makeTestState() (Database, common.Hash, []*testAccount) { } if i%5 == 0 { for j := byte(0); j < 5; j++ { - hash := codehash.KeccakCodeHash([]byte{i, i, i, i, i, j, j}) + hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j}) obj.SetState(db, hash, hash) } } @@ -135,7 +133,7 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error { // Tests that an empty state is not scheduled for syncing. func TestEmptyStateSync(t *testing.T) { empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New()), nil) + sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), nil) if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 { t.Errorf(" content requested for empty state: %v, %v, %v", nodes, paths, codes) } @@ -172,7 +170,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil) + sched := NewStateSync(srcRoot, dstDb, nil) nodes, paths, codes := sched.Missing(count) var ( @@ -251,7 +249,7 @@ func TestIterativeDelayedStateSync(t *testing.T) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil) + sched := NewStateSync(srcRoot, dstDb, nil) nodes, _, codes := sched.Missing(0) queue := append(append([]common.Hash{}, nodes...), codes...) @@ -299,7 +297,7 @@ func testIterativeRandomStateSync(t *testing.T, count int) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil) + sched := NewStateSync(srcRoot, dstDb, nil) queue := make(map[common.Hash]struct{}) nodes, _, codes := sched.Missing(count) @@ -349,7 +347,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil) + sched := NewStateSync(srcRoot, dstDb, nil) queue := make(map[common.Hash]struct{}) nodes, _, codes := sched.Missing(0) @@ -408,7 +406,7 @@ func TestIncompleteStateSync(t *testing.T) { var isCode = make(map[common.Hash]struct{}) for _, acc := range srcAccounts { if len(acc.code) > 0 { - isCode[codehash.KeccakCodeHash(acc.code)] = struct{}{} + isCode[crypto.Keccak256Hash(acc.code)] = struct{}{} } } isCode[common.BytesToHash(emptyKeccakCodeHash)] = struct{}{} @@ -416,7 +414,7 @@ func TestIncompleteStateSync(t *testing.T) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil) + sched := NewStateSync(srcRoot, dstDb, nil) var added []common.Hash diff --git a/eth/downloader/beaconsync.go b/eth/downloader/beaconsync.go new file mode 100644 index 000000000..f14df312b --- /dev/null +++ b/eth/downloader/beaconsync.go @@ -0,0 +1,390 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/log" +) + +// beaconBackfiller is the chain and state backfilling that can be commenced once +// the skeleton syncer has successfully reverse downloaded all the headers up to +// the genesis block or an existing header in the database. Its operation is fully +// directed by the skeleton sync's head/tail events. +type beaconBackfiller struct { + downloader *Downloader // Downloader to direct via this callback implementation + syncMode SyncMode // Sync mode to use for backfilling the skeleton chains + success func() // Callback to run on successful sync cycle completion + filling bool // Flag whether the downloader is backfilling or not + filled *types.Header // Last header filled by the last terminated sync loop + started chan struct{} // Notification channel whether the downloader inited + lock sync.Mutex // Mutex protecting the sync lock +} + +// newBeaconBackfiller is a helper method to create the backfiller. +func newBeaconBackfiller(dl *Downloader, success func()) backfiller { + return &beaconBackfiller{ + downloader: dl, + success: success, + } +} + +// suspend cancels any background downloader threads and returns the last header +// that has been successfully backfilled. +func (b *beaconBackfiller) suspend() *types.Header { + // If no filling is running, don't waste cycles + b.lock.Lock() + filling := b.filling + filled := b.filled + started := b.started + b.lock.Unlock() + + if !filling { + return filled // Return the filled header on the previous sync completion + } + // A previous filling should be running, though it may happen that it hasn't + // yet started (being done on a new goroutine). Many concurrent beacon head + // announcements can lead to sync start/stop thrashing. In that case we need + // to wait for initialization before we can safely cancel it. It is safe to + // read this channel multiple times, it gets closed on startup. + <-started + + // Now that we're sure the downloader successfully started up, we can cancel + // it safely without running the risk of data races. + b.downloader.Cancel() + + // Sync cycle was just terminated, retrieve and return the last filled header. + // Can't use `filled` as that contains a stale value from before cancellation. + return b.downloader.blockchain.CurrentFastBlock().Header() +} + +// resume starts the downloader threads for backfilling state and chain data. +func (b *beaconBackfiller) resume() { + b.lock.Lock() + if b.filling { + // If a previous filling cycle is still running, just ignore this start + // request. // TODO(karalabe): We should make this channel driven + b.lock.Unlock() + return + } + b.filling = true + b.filled = nil + b.started = make(chan struct{}) + mode := b.syncMode + b.lock.Unlock() + + // Start the backfilling on its own thread since the downloader does not have + // its own lifecycle runloop. + go func() { + // Set the backfiller to non-filling when download completes + defer func() { + b.lock.Lock() + b.filling = false + b.filled = b.downloader.blockchain.CurrentFastBlock().Header() + b.lock.Unlock() + }() + // If the downloader fails, report an error as in beacon chain mode there + // should be no errors as long as the chain we're syncing to is valid. + if err := b.downloader.synchronise("", common.Hash{}, nil, nil, mode, true, b.started); err != nil { + log.Error("Beacon backfilling failed", "err", err) + return + } + // Synchronization succeeded. Since this happens async, notify the outer + // context to disable snap syncing and enable transaction propagation. + if b.success != nil { + b.success() + } + }() +} + +// setMode updates the sync mode from the current one to the requested one. If +// there's an active sync in progress, it will be cancelled and restarted. +func (b *beaconBackfiller) setMode(mode SyncMode) { + // Update the old sync mode and track if it was changed + b.lock.Lock() + updated := b.syncMode != mode + filling := b.filling + b.syncMode = mode + b.lock.Unlock() + + // If the sync mode was changed mid-sync, restart. This should never ever + // really happen, we just handle it to detect programming errors. + if !updated || !filling { + return + } + log.Error("Downloader sync mode changed mid-run", "old", mode.String(), "new", mode.String()) + b.suspend() + b.resume() +} + +// SetBadBlockCallback sets the callback to run when a bad block is hit by the +// block processor. This method is not thread safe and should be set only once +// on startup before system events are fired. +func (d *Downloader) SetBadBlockCallback(onBadBlock badBlockFn) { + d.badBlock = onBadBlock +} + +// BeaconSync is the post-merge version of the chain synchronization, where the +// chain is not downloaded from genesis onward, rather from trusted head announces +// backwards. +// +// Internally backfilling and state sync is done the same way, but the header +// retrieval and scheduling is replaced. +func (d *Downloader) BeaconSync(mode SyncMode, head *types.Header) error { + return d.beaconSync(mode, head, true) +} + +// BeaconExtend is an optimistic version of BeaconSync, where an attempt is made +// to extend the current beacon chain with a new header, but in case of a mismatch, +// the old sync will not be terminated and reorged, rather the new head is dropped. +// +// This is useful if a beacon client is feeding us large chunks of payloads to run, +// but is not setting the head after each. +func (d *Downloader) BeaconExtend(mode SyncMode, head *types.Header) error { + return d.beaconSync(mode, head, false) +} + +// beaconSync is the post-merge version of the chain synchronization, where the +// chain is not downloaded from genesis onward, rather from trusted head announces +// backwards. +// +// Internally backfilling and state sync is done the same way, but the header +// retrieval and scheduling is replaced. +func (d *Downloader) beaconSync(mode SyncMode, head *types.Header, force bool) error { + // When the downloader starts a sync cycle, it needs to be aware of the sync + // mode to use (full, snap). To keep the skeleton chain oblivious, inject the + // mode into the backfiller directly. + // + // Super crazy dangerous type cast. Should be fine (TM), we're only using a + // different backfiller implementation for skeleton tests. + d.skeleton.filler.(*beaconBackfiller).setMode(mode) + + // Signal the skeleton sync to switch to a new head, however it wants + if err := d.skeleton.Sync(head, force); err != nil { + return err + } + return nil +} + +// findBeaconAncestor tries to locate the common ancestor link of the local chain +// and the beacon chain just requested. In the general case when our node was in +// sync and on the correct chain, checking the top N links should already get us +// a match. In the rare scenario when we ended up on a long reorganisation (i.e. +// none of the head links match), we do a binary search to find the ancestor. +func (d *Downloader) findBeaconAncestor() (uint64, error) { + // Figure out the current local head position + var chainHead *types.Header + + switch d.getMode() { + case FullSync: + chainHead = d.blockchain.CurrentBlock().Header() + case SnapSync: + chainHead = d.blockchain.CurrentFastBlock().Header() + default: + chainHead = d.lightchain.CurrentHeader() + } + number := chainHead.Number.Uint64() + + // Retrieve the skeleton bounds and ensure they are linked to the local chain + beaconHead, beaconTail, err := d.skeleton.Bounds() + if err != nil { + // This is a programming error. The chain backfiller was called with an + // invalid beacon sync state. Ideally we would panic here, but erroring + // gives us at least a remote chance to recover. It's still a big fault! + log.Error("Failed to retrieve beacon bounds", "err", err) + return 0, err + } + var linked bool + switch d.getMode() { + case FullSync: + linked = d.blockchain.HasBlock(beaconTail.ParentHash, beaconTail.Number.Uint64()-1) + case SnapSync: + linked = d.blockchain.HasFastBlock(beaconTail.ParentHash, beaconTail.Number.Uint64()-1) + default: + linked = d.blockchain.HasHeader(beaconTail.ParentHash, beaconTail.Number.Uint64()-1) + } + if !linked { + // This is a programming error. The chain backfiller was called with a + // tail that's not linked to the local chain. Whilst this should never + // happen, there might be some weirdnesses if beacon sync backfilling + // races with the user (or beacon client) calling setHead. Whilst panic + // would be the ideal thing to do, it is safer long term to attempt a + // recovery and fix any noticed issue after the fact. + log.Error("Beacon sync linkup unavailable", "number", beaconTail.Number.Uint64()-1, "hash", beaconTail.ParentHash) + return 0, fmt.Errorf("beacon linkup unavailable locally: %d [%x]", beaconTail.Number.Uint64()-1, beaconTail.ParentHash) + } + // Binary search to find the ancestor + start, end := beaconTail.Number.Uint64()-1, number + if number := beaconHead.Number.Uint64(); end > number { + // This shouldn't really happen in a healty network, but if the consensus + // clients feeds us a shorter chain as the canonical, we should not attempt + // to access non-existent skeleton items. + log.Warn("Beacon head lower than local chain", "beacon", number, "local", end) + end = number + } + for start+1 < end { + // Split our chain interval in two, and request the hash to cross check + check := (start + end) / 2 + + h := d.skeleton.Header(check) + n := h.Number.Uint64() + + var known bool + switch d.getMode() { + case FullSync: + known = d.blockchain.HasBlock(h.Hash(), n) + case SnapSync: + known = d.blockchain.HasFastBlock(h.Hash(), n) + default: + known = d.lightchain.HasHeader(h.Hash(), n) + } + if !known { + end = check + continue + } + start = check + } + return start, nil +} + +// fetchBeaconHeaders feeds skeleton headers to the downloader queue for scheduling +// until sync errors or is finished. +func (d *Downloader) fetchBeaconHeaders(from uint64) error { + var head *types.Header + _, tail, err := d.skeleton.Bounds() + if err != nil { + return err + } + // A part of headers are not in the skeleton space, try to resolve + // them from the local chain. Note the range should be very short + // and it should only happen when there are less than 64 post-merge + // blocks in the network. + var localHeaders []*types.Header + if from < tail.Number.Uint64() { + count := tail.Number.Uint64() - from + if count > uint64(fsMinFullBlocks) { + return fmt.Errorf("invalid origin (%d) of beacon sync (%d)", from, tail.Number) + } + localHeaders = d.readHeaderRange(tail, int(count)) + log.Warn("Retrieved beacon headers from local", "from", from, "count", count) + } + for { + // Some beacon headers might have appeared since the last cycle, make + // sure we're always syncing to all available ones + head, _, err = d.skeleton.Bounds() + if err != nil { + return err + } + // If the pivot became stale (older than 2*64-8 (bit of wiggle room)), + // move it ahead to HEAD-64 + d.pivotLock.Lock() + if d.pivotHeader != nil { + if head.Number.Uint64() > d.pivotHeader.Number.Uint64()+2*uint64(fsMinFullBlocks)-8 { + // Retrieve the next pivot header, either from skeleton chain + // or the filled chain + number := head.Number.Uint64() - uint64(fsMinFullBlocks) + + log.Warn("Pivot seemingly stale, moving", "old", d.pivotHeader.Number, "new", number) + if d.pivotHeader = d.skeleton.Header(number); d.pivotHeader == nil { + if number < tail.Number.Uint64() { + dist := tail.Number.Uint64() - number + if len(localHeaders) >= int(dist) { + d.pivotHeader = localHeaders[dist-1] + log.Warn("Retrieved pivot header from local", "number", d.pivotHeader.Number, "hash", d.pivotHeader.Hash(), "latest", head.Number, "oldest", tail.Number) + } + } + } + // Print an error log and return directly in case the pivot header + // is still not found. It means the skeleton chain is not linked + // correctly with local chain. + if d.pivotHeader == nil { + log.Error("Pivot header is not found", "number", number) + d.pivotLock.Unlock() + return errNoPivotHeader + } + // Write out the pivot into the database so a rollback beyond + // it will reenable snap sync and update the state root that + // the state syncer will be downloading + rawdb.WriteLastPivotNumber(d.stateDB, d.pivotHeader.Number.Uint64()) + } + } + d.pivotLock.Unlock() + + // Retrieve a batch of headers and feed it to the header processor + var ( + headers = make([]*types.Header, 0, maxHeadersProcess) + hashes = make([]common.Hash, 0, maxHeadersProcess) + ) + for i := 0; i < maxHeadersProcess && from <= head.Number.Uint64(); i++ { + header := d.skeleton.Header(from) + + // The header is not found in skeleton space, try to find it in local chain. + if header == nil && from < tail.Number.Uint64() { + dist := tail.Number.Uint64() - from + if len(localHeaders) >= int(dist) { + header = localHeaders[dist-1] + } + } + // The header is still missing, the beacon sync is corrupted and bail out + // the error here. + if header == nil { + return fmt.Errorf("missing beacon header %d", from) + } + headers = append(headers, header) + hashes = append(hashes, headers[i].Hash()) + from++ + } + if len(headers) > 0 { + log.Trace("Scheduling new beacon headers", "count", len(headers), "from", from-uint64(len(headers))) + select { + case d.headerProcCh <- &headerTask{ + headers: headers, + hashes: hashes, + }: + case <-d.cancelCh: + return errCanceled + } + } + // If we still have headers to import, loop and keep pushing them + if from <= head.Number.Uint64() { + continue + } + // If the pivot block is committed, signal header sync termination + if atomic.LoadInt32(&d.committed) == 1 { + select { + case d.headerProcCh <- nil: + return nil + case <-d.cancelCh: + return errCanceled + } + } + // State sync still going, wait a bit for new headers and retry + log.Trace("Pivot not yet committed, waiting...") + select { + case <-time.After(fsHeaderContCheck): + case <-d.cancelCh: + return errCanceled + } + } +} diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 839ebe733..6d86ee7e3 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -30,14 +30,11 @@ import ( "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/state/snapshot" "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/eth/protocols/eth" "github.com/scroll-tech/go-ethereum/eth/protocols/snap" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/event" "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/metrics" "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/trie" ) var ( @@ -45,7 +42,6 @@ var ( MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request - MaxStateFetch = 384 // Amount of node state values to allow fetching per request maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) maxHeadersProcess = 2048 // Number of header download results to import at once into the chain @@ -56,11 +52,11 @@ var ( reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs - fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync + fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during snap sync fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download - fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync + fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in snap sync ) var ( @@ -80,27 +76,40 @@ var ( errCancelStateFetch = errors.New("state data download canceled (requested)") errCancelContentProcessing = errors.New("content processing canceled (requested)") errCanceled = errors.New("syncing canceled (requested)") - errNoSyncActive = errors.New("no sync active") errTooOld = errors.New("peer's protocol version too old") errNoAncestorFound = errors.New("no common ancestor found") + errNoPivotHeader = errors.New("pivot header is not found") + ErrMergeTransition = errors.New("legacy sync reached the merge") ) +// peerDropFn is a callback type for dropping a peer detected as malicious. +type peerDropFn func(id string) + +// badBlockFn is a callback for the async beacon sync to notify the caller that +// the origin header requested to sync to, produced a chain with a bad block. +type badBlockFn func(invalid *types.Header, origin *types.Header) + +// headerTask is a set of downloaded headers to queue along with their precomputed +// hashes to avoid constant rehashing. +type headerTask struct { + headers []*types.Header + hashes []common.Hash +} + type Downloader struct { mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode mux *event.TypeMux // Event multiplexer to announce sync operation events - checkpoint uint64 // Checkpoint block number to enforce head against (e.g. fast sync) + checkpoint uint64 // Checkpoint block number to enforce head against (e.g. snap sync) genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed - stateDB ethdb.Database // Database to state sync into (and deduplicate via) - stateBloom *trie.SyncBloom // Bloom filter for fast trie node and contract code existence checks + stateDB ethdb.Database // Database to state sync into (and deduplicate via) // Statistics - syncStatsChainOrigin uint64 // Origin block number where syncing started at - syncStatsChainHeight uint64 // Highest block number known when syncing started - syncStatsState stateSyncStats + syncStatsChainOrigin uint64 // Origin block number where syncing started at + syncStatsChainHeight uint64 // Highest block number known when syncing started syncStatsLock sync.RWMutex // Lock protecting the sync stats fields lightchain LightChain @@ -108,6 +117,7 @@ type Downloader struct { // Callbacks dropPeer peerDropFn // Drops a peer for misbehaving + badBlock badBlockFn // Reports a block as rejected by the chain // Status synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing @@ -117,12 +127,10 @@ type Downloader struct { ancientLimit uint64 // The maximum block number which can be regarded as ancient data. // Channels - headerCh chan dataPack // Channel receiving inbound block headers - bodyCh chan dataPack // Channel receiving inbound block bodies - receiptCh chan dataPack // Channel receiving inbound receipts - bodyWakeCh chan bool // Channel to signal the block body fetcher of new tasks - receiptWakeCh chan bool // Channel to signal the receipt fetcher of new tasks - headerProcCh chan []*types.Header // Channel to feed the header processor new tasks + headerProcCh chan *headerTask // Channel to feed the header processor new tasks + + // Skeleton sync + skeleton *skeleton // Header skeleton to backfill the chain with (eth2 mode) // State sync pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root @@ -131,8 +139,6 @@ type Downloader struct { snapSync bool // Whether to run state sync over the snap protocol SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now stateSyncStart chan *stateSync - trackStateReq chan *stateReq - stateCh chan dataPack // Channel receiving inbound node state data // Cancellation and termination cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) @@ -171,14 +177,14 @@ type LightChain interface { SetHead(uint64) error } -// BlockChain encapsulates functions required to sync a (full or fast) blockchain. +// BlockChain encapsulates functions required to sync a (full or snap) blockchain. type BlockChain interface { LightChain // HasBlock verifies a block's presence in the local chain. HasBlock(common.Hash, uint64) bool - // HasFastBlock verifies a fast block's presence in the local chain. + // HasFastBlock verifies a snap block's presence in the local chain. HasFastBlock(common.Hash, uint64) bool // GetBlockByHash retrieves a block from the local chain. @@ -187,11 +193,11 @@ type BlockChain interface { // CurrentBlock retrieves the head block from the local chain. CurrentBlock() *types.Block - // CurrentFastBlock retrieves the head fast block from the local chain. + // CurrentFastBlock retrieves the head snap block from the local chain. CurrentFastBlock() *types.Block - // FastSyncCommitHead directly commits the head block to a certain entity. - FastSyncCommitHead(common.Hash) error + // SnapSyncCommitHead directly commits the head block to a certain entity. + SnapSyncCommitHead(common.Hash) error // InsertChain inserts a batch of blocks into the local chain. InsertChain(types.Blocks) (int, error) @@ -204,13 +210,12 @@ type BlockChain interface { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { +func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader { if lightchain == nil { lightchain = chain } dl := &Downloader{ stateDB: stateDb, - stateBloom: stateBloom, mux: mux, checkpoint: checkpoint, queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), @@ -218,21 +223,13 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, blockchain: chain, lightchain: lightchain, dropPeer: dropPeer, - headerCh: make(chan dataPack, 1), - bodyCh: make(chan dataPack, 1), - receiptCh: make(chan dataPack, 1), - bodyWakeCh: make(chan bool, 1), - receiptWakeCh: make(chan bool, 1), - headerProcCh: make(chan []*types.Header, 1), + headerProcCh: make(chan *headerTask, 1), quitCh: make(chan struct{}), - stateCh: make(chan dataPack), SnapSyncer: snap.NewSyncer(stateDb), stateSyncStart: make(chan *stateSync), - syncStatsState: stateSyncStats{ - processed: rawdb.ReadFastTrieProgress(stateDb), - }, - trackStateReq: make(chan *stateReq), } + dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success)) + go dl.stateFetcher() return dl } @@ -241,7 +238,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, // block where synchronisation started at (may have failed/suspended); the block // or header sync is currently at; and the latest known block which the sync targets. // -// In addition, during the state download phase of fast synchronisation the number +// In addition, during the state download phase of snap synchronisation the number // of processed and the total number of known states are also returned. Otherwise // these are zero. func (d *Downloader) Progress() ethereum.SyncProgress { @@ -254,19 +251,31 @@ func (d *Downloader) Progress() ethereum.SyncProgress { switch { case d.blockchain != nil && mode == FullSync: current = d.blockchain.CurrentBlock().NumberU64() - case d.blockchain != nil && mode == FastSync: + case d.blockchain != nil && mode == SnapSync: current = d.blockchain.CurrentFastBlock().NumberU64() case d.lightchain != nil: current = d.lightchain.CurrentHeader().Number.Uint64() default: log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode) } + progress, pending := d.SnapSyncer.Progress() + return ethereum.SyncProgress{ - StartingBlock: d.syncStatsChainOrigin, - CurrentBlock: current, - HighestBlock: d.syncStatsChainHeight, - PulledStates: d.syncStatsState.processed, - KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, + StartingBlock: d.syncStatsChainOrigin, + CurrentBlock: current, + HighestBlock: d.syncStatsChainHeight, + SyncedAccounts: progress.AccountSynced, + SyncedAccountBytes: uint64(progress.AccountBytes), + SyncedBytecodes: progress.BytecodeSynced, + SyncedBytecodeBytes: uint64(progress.BytecodeBytes), + SyncedStorage: progress.StorageSynced, + SyncedStorageBytes: uint64(progress.StorageBytes), + HealedTrienodes: progress.TrienodeHealSynced, + HealedTrienodeBytes: uint64(progress.TrienodeHealBytes), + HealedBytecodes: progress.BytecodeHealSynced, + HealedBytecodeBytes: uint64(progress.BytecodeHealBytes), + HealingTrienodes: pending.TrienodeHeal, + HealingBytecode: pending.BytecodeHeal, } } @@ -320,10 +329,10 @@ func (d *Downloader) UnregisterPeer(id string) error { return nil } -// Synchronise tries to sync up our local block chain with a remote peer, both +// LegacySync tries to sync up our local block chain with a remote peer, both // adding various sanity checks as well as wrapping it with various log entries. -func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { - err := d.synchronise(id, head, td, mode) +func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, mode SyncMode) error { + err := d.synchronise(id, head, td, ttd, mode, false, nil) switch err { case nil, errBusy, errCanceled: @@ -342,6 +351,9 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode } return err } + if errors.Is(err, ErrMergeTransition) { + return err // This is an expected fault, don't keep printing it in a spin-loop + } log.Warn("Synchronisation failed, retrying", "err", err) return err } @@ -349,7 +361,21 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode // synchronise will select the peer and use it for synchronising. If an empty string is given // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous -func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { +func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, mode SyncMode, beaconMode bool, beaconPing chan struct{}) error { + // The beacon header syncer is async. It will start this synchronization and + // will continue doing other tasks. However, if synchornization needs to be + // cancelled, the syncer needs to know if we reached the startup point (and + // inited the cancel cannel) or not yet. Make sure that we'll signal even in + // case of a failure. + if beaconPing != nil { + defer func() { + select { + case <-beaconPing: // already notified + default: + close(beaconPing) // weird exit condition, notify that it's safe to cancel (the nothing) + } + }() + } // Mock out the synchronisation if testing if d.synchroniseMock != nil { return d.synchroniseMock(id, hash) @@ -364,47 +390,24 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { log.Info("Block synchronisation started") } - // If we are already full syncing, but have a fast-sync bloom filter laying - // around, make sure it doesn't use memory any more. This is a special case - // when the user attempts to fast sync a new empty network. - if mode == FullSync && d.stateBloom != nil { - d.stateBloom.Close() - } - // If snap sync was requested, create the snap scheduler and switch to fast - // sync mode. Long term we could drop fast sync or merge the two together, - // but until snap becomes prevalent, we should support both. TODO(karalabe). if mode == SnapSync { - if !d.snapSync { - // Snap sync uses the snapshot namespace to store potentially flakey data until - // sync completely heals and finishes. Pause snapshot maintenance in the mean - // time to prevent access. - if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests - snapshots.Disable() - } - log.Warn("Enabling snapshot sync prototype") - d.snapSync = true + // Snap sync uses the snapshot namespace to store potentially flakey data until + // sync completely heals and finishes. Pause snapshot maintenance in the mean- + // time to prevent access. + if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests + snapshots.Disable() } - mode = FastSync } // Reset the queue, peer set and wake channels to clean any internal leftover state d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems) d.peers.Reset() - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { select { case <-ch: default: } } - for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} { - for empty := false; !empty; { - select { - case <-ch: - default: - empty = true - } - } - } for empty := false; !empty; { select { case <-d.headerProcCh: @@ -424,11 +427,17 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode atomic.StoreUint32(&d.mode, uint32(mode)) // Retrieve the origin peer and initiate the downloading process - p := d.peers.Peer(id) - if p == nil { - return errUnknownPeer + var p *peerConnection + if !beaconMode { // Beacon mode doesn't need a peer to sync from + p = d.peers.Peer(id) + if p == nil { + return errUnknownPeer + } + } + if beaconPing != nil { + close(beaconPing) } - return d.syncWithPeer(p, hash, td) + return d.syncWithPeer(p, hash, td, ttd, beaconMode) } func (d *Downloader) getMode() SyncMode { @@ -437,7 +446,7 @@ func (d *Downloader) getMode() SyncMode { // syncWithPeer starts a block synchronization based on the hash chain from the // specified peer and head hash. -func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { +func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *big.Int, beaconMode bool) (err error) { d.mux.Post(StartEvent{}) defer func() { // reset on error @@ -448,33 +457,78 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I d.mux.Post(DoneEvent{latest}) } }() - if p.version < eth.ETH66 { - return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, eth.ETH66) - } mode := d.getMode() - log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode) + if !beaconMode { + log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode) + } else { + log.Debug("Backfilling with the network", "mode", mode) + } defer func(start time.Time) { log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start))) }(time.Now()) // Look up the sync boundaries: the common ancestor and the target block - latest, pivot, err := d.fetchHead(p) - if err != nil { - return err + var latest, pivot *types.Header + if !beaconMode { + // In legacy mode, use the master peer to retrieve the headers from + latest, pivot, err = d.fetchHead(p) + if err != nil { + return err + } + } else { + // In beacon mode, user the skeleton chain to retrieve the headers from + latest, _, err = d.skeleton.Bounds() + if err != nil { + return err + } + if latest.Number.Uint64() > uint64(fsMinFullBlocks) { + number := latest.Number.Uint64() - uint64(fsMinFullBlocks) + + // Retrieve the pivot header from the skeleton chain segment but + // fallback to local chain if it's not found in skeleton space. + if pivot = d.skeleton.Header(number); pivot == nil { + _, oldest, _ := d.skeleton.Bounds() // error is already checked + if number < oldest.Number.Uint64() { + count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks + headers := d.readHeaderRange(oldest, count) + if len(headers) == count { + pivot = headers[len(headers)-1] + log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number) + } + } + } + // Print an error log and return directly in case the pivot header + // is still not found. It means the skeleton chain is not linked + // correctly with local chain. + if pivot == nil { + log.Error("Pivot header is not found", "number", number) + return errNoPivotHeader + } + } } - if mode == FastSync && pivot == nil { - // If no pivot block was returned, the head is below the min full block - // threshold (i.e. new chain). In that case we won't really fast sync - // anyway, but still need a valid pivot block to avoid some code hitting - // nil panics on an access. + // If no pivot block was returned, the head is below the min full block + // threshold (i.e. new chain). In that case we won't really snap sync + // anyway, but still need a valid pivot block to avoid some code hitting + // nil panics on access. + if mode == SnapSync && pivot == nil { pivot = d.blockchain.CurrentBlock().Header() } height := latest.Number.Uint64() - origin, err := d.findAncestor(p, latest) - if err != nil { - return err + var origin uint64 + if !beaconMode { + // In legacy mode, reach out to the network and find the ancestor + origin, err = d.findAncestor(p, latest) + if err != nil { + return err + } + } else { + // In beacon mode, use the skeleton chain for the ancestor lookup + origin, err = d.findBeaconAncestor() + if err != nil { + return err + } } d.syncStatsLock.Lock() if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { @@ -483,8 +537,8 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I d.syncStatsChainHeight = height d.syncStatsLock.Unlock() - // Ensure our origin point is below any fast sync pivot point - if mode == FastSync { + // Ensure our origin point is below any snap sync pivot point + if mode == SnapSync { if height <= uint64(fsMinFullBlocks) { origin = 0 } else { @@ -493,17 +547,17 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I origin = pivotNumber - 1 } // Write out the pivot into the database so a rollback beyond it will - // reenable fast sync + // reenable snap sync rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber) } } d.committed = 1 - if mode == FastSync && pivot.Number.Uint64() != 0 { + if mode == SnapSync && pivot.Number.Uint64() != 0 { d.committed = 0 } - if mode == FastSync { + if mode == SnapSync { // Set the ancient data limitation. - // If we are running fast sync, all block data older than ancientLimit will be + // If we are running snap sync, all block data older than ancientLimit will be // written to the ancient store. More recent data will be written to the active // database and will wait for the freezer to migrate. // @@ -545,20 +599,28 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I if d.syncInitHook != nil { d.syncInitHook(origin, height) } + var headerFetcher func() error + if !beaconMode { + // In legacy mode, headers are retrieved from the network + headerFetcher = func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) } + } else { + // In beacon mode, headers are served by the skeleton syncer + headerFetcher = func() error { return d.fetchBeaconHeaders(origin + 1) } + } fetchers := []func() error{ - func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved - func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync - func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync - func() error { return d.processHeaders(origin+1, td) }, + headerFetcher, // Headers are always retrieved + func() error { return d.fetchBodies(origin+1, beaconMode) }, // Bodies are retrieved during normal and snap sync + func() error { return d.fetchReceipts(origin+1, beaconMode) }, // Receipts are retrieved during snap sync + func() error { return d.processHeaders(origin+1, td, ttd, beaconMode) }, } - if mode == FastSync { + if mode == SnapSync { d.pivotLock.Lock() d.pivotHeader = pivot d.pivotLock.Unlock() - fetchers = append(fetchers, func() error { return d.processFastSyncContent() }) + fetchers = append(fetchers, func() error { return d.processSnapSyncContent() }) } else if mode == FullSync { - fetchers = append(fetchers, d.processFullSyncContent) + fetchers = append(fetchers, func() error { return d.processFullSyncContent(ttd, beaconMode) }) } return d.spawnSync(fetchers) } @@ -624,9 +686,9 @@ func (d *Downloader) Terminate() { case <-d.quitCh: default: close(d.quitCh) - } - if d.stateBloom != nil { - d.stateBloom.Close() + + // Terminate the internal beacon syncer + d.skeleton.Terminate() } d.quitLock.Unlock() @@ -643,68 +705,48 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty // Request the advertised remote head block and wait for the response latest, _ := p.peer.Head() fetch := 1 - if mode == FastSync { + if mode == SnapSync { fetch = 2 // head + pivot headers } - go p.peer.RequestHeadersByHash(latest, fetch, fsMinFullBlocks-1, true) - - ttl := d.peers.rates.TargetTimeout() - timeout := time.After(ttl) - for { - select { - case <-d.cancelCh: - return nil, nil, errCanceled - - case packet := <-d.headerCh: - // Discard anything not from the origin peer - if packet.PeerId() != p.id { - log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) - break - } - // Make sure the peer gave us at least one and at most the requested headers - headers := packet.(*headerPack).headers - if len(headers) == 0 || len(headers) > fetch { - return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) - } - // The first header needs to be the head, validate against the checkpoint - // and request. If only 1 header was returned, make sure there's no pivot - // or there was not one requested. - head := headers[0] - if (mode == FastSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint { - return nil, nil, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint) - } - if len(headers) == 1 { - if mode == FastSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) - } - p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", head.Hash()) - return head, nil, nil - } - // At this point we have 2 headers in total and the first is the - // validated head of the chain. Check the pivot number and return, - pivot := headers[1] - if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) - } - return head, pivot, nil - - case <-timeout: - p.log.Debug("Waiting for head header timed out", "elapsed", ttl) - return nil, nil, errTimeout - - case <-d.bodyCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore + headers, hashes, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true) + if err != nil { + return nil, nil, err + } + // Make sure the peer gave us at least one and at most the requested headers + if len(headers) == 0 || len(headers) > fetch { + return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) + } + // The first header needs to be the head, validate against the checkpoint + // and request. If only 1 header was returned, make sure there's no pivot + // or there was not one requested. + head = headers[0] + if (mode == SnapSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint { + return nil, nil, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint) + } + if len(headers) == 1 { + if mode == SnapSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { + return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) } + p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", hashes[0]) + return head, nil, nil + } + // At this point we have 2 headers in total and the first is the + // validated head of the chain. Check the pivot number and return, + pivot = headers[1] + if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { + return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) } + return head, pivot, nil } // calculateRequestSpan calculates what headers to request from a peer when trying to determine the // common ancestor. // It returns parameters to be used for peer.RequestHeadersByNumber: -// from - starting block number -// count - number of headers to request -// skip - number of headers to skip +// +// from - starting block number +// count - number of headers to request +// skip - number of headers to skip +// // and also returns 'max', the last block which is expected to be returned by the remote peers, // given the (from,count,skip) func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { @@ -767,7 +809,7 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) switch mode { case FullSync: localHeight = d.blockchain.CurrentBlock().NumberU64() - case FastSync: + case SnapSync: localHeight = d.blockchain.CurrentFastBlock().NumberU64() default: localHeight = d.lightchain.CurrentHeader().Number.Uint64() @@ -822,76 +864,52 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) return ancestor, nil } -func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (commonAncestor uint64, err error) { +func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (uint64, error) { from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) - go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false) - + headers, hashes, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false) + if err != nil { + return 0, err + } // Wait for the remote response to the head fetch number, hash := uint64(0), common.Hash{} - ttl := d.peers.rates.TargetTimeout() - timeout := time.After(ttl) - - for finished := false; !finished; { - select { - case <-d.cancelCh: - return 0, errCanceled - - case packet := <-d.headerCh: - // Discard anything not from the origin peer - if packet.PeerId() != p.id { - log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) - break - } - // Make sure the peer actually gave something valid - headers := packet.(*headerPack).headers - if len(headers) == 0 { - p.log.Warn("Empty head header set") - return 0, errEmptyHeaderSet - } - // Make sure the peer's reply conforms to the request - for i, header := range headers { - expectNumber := from + int64(i)*int64(skip+1) - if number := header.Number.Int64(); number != expectNumber { - p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) - return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) - } - } - // Check if a common ancestor was found - finished = true - for i := len(headers) - 1; i >= 0; i-- { - // Skip any headers that underflow/overflow our requested set - if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { - continue - } - // Otherwise check if we already know the header or not - h := headers[i].Hash() - n := headers[i].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case FastSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if known { - number, hash = n, h - break - } - } - - case <-timeout: - p.log.Debug("Waiting for head header timed out", "elapsed", ttl) - return 0, errTimeout - - case <-d.bodyCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore + // Make sure the peer actually gave something valid + if len(headers) == 0 { + p.log.Warn("Empty head header set") + return 0, errEmptyHeaderSet + } + // Make sure the peer's reply conforms to the request + for i, header := range headers { + expectNumber := from + int64(i)*int64(skip+1) + if number := header.Number.Int64(); number != expectNumber { + p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) + return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) + } + } + // Check if a common ancestor was found + for i := len(headers) - 1; i >= 0; i-- { + // Skip any headers that underflow/overflow our requested set + if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { + continue + } + // Otherwise check if we already know the header or not + h := hashes[i] + n := headers[i].Number.Uint64() + + var known bool + switch mode { + case FullSync: + known = d.blockchain.HasBlock(h, n) + case SnapSync: + known = d.blockchain.HasFastBlock(h, n) + default: + known = d.lightchain.HasHeader(h, n) + } + if known { + number, hash = n, h + break } } // If the head fetch already found an ancestor, return @@ -906,7 +924,7 @@ func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, re return 0, errNoAncestorFound } -func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (commonAncestor uint64, err error) { +func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (uint64, error) { hash := common.Hash{} // Ancestor not found, we need to binary search over our chain @@ -920,65 +938,39 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, // Split our chain interval in two, and request the hash to cross check check := (start + end) / 2 - ttl := d.peers.rates.TargetTimeout() - timeout := time.After(ttl) - - go p.peer.RequestHeadersByNumber(check, 1, 0, false) - - // Wait until a reply arrives to this request - for arrived := false; !arrived; { - select { - case <-d.cancelCh: - return 0, errCanceled - - case packet := <-d.headerCh: - // Discard anything not from the origin peer - if packet.PeerId() != p.id { - log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) - break - } - // Make sure the peer actually gave something valid - headers := packet.(*headerPack).headers - if len(headers) != 1 { - p.log.Warn("Multiple headers for single request", "headers", len(headers)) - return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) - } - arrived = true - - // Modify the search interval based on the response - h := headers[0].Hash() - n := headers[0].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case FastSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if !known { - end = check - break - } - header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists - if header.Number.Uint64() != check { - p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) - return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) - } - start = check - hash = h - - case <-timeout: - p.log.Debug("Waiting for search header timed out", "elapsed", ttl) - return 0, errTimeout - - case <-d.bodyCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore - } + headers, hashes, err := d.fetchHeadersByNumber(p, check, 1, 0, false) + if err != nil { + return 0, err + } + // Make sure the peer actually gave something valid + if len(headers) != 1 { + p.log.Warn("Multiple headers for single request", "headers", len(headers)) + return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) } + // Modify the search interval based on the response + h := hashes[0] + n := headers[0].Number.Uint64() + + var known bool + switch mode { + case FullSync: + known = d.blockchain.HasBlock(h, n) + case SnapSync: + known = d.blockchain.HasFastBlock(h, n) + default: + known = d.lightchain.HasHeader(h, n) + } + if !known { + end = check + continue + } + header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists + if header.Number.Uint64() != check { + p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) + return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) + } + start = check + hash = h } // Ensure valid ancestry and return if int64(start) <= floor { @@ -997,227 +989,216 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, // other peers are only accepted if they map cleanly to the skeleton. If no one // can fill in the skeleton - not even the origin peer - it's assumed invalid and // the origin is dropped. -func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error { +func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) error { p.log.Debug("Directing header downloads", "origin", from) defer p.log.Debug("Header download terminated") - // Create a timeout timer, and the associated header fetcher - skeleton := true // Skeleton assembly phase or finishing up - pivoting := false // Whether the next request is pivot verification - request := time.Now() // time of the last skeleton fetch request - timeout := time.NewTimer(0) // timer to dump a non-responsive active peer - <-timeout.C // timeout channel should be initially empty - defer timeout.Stop() - - var ttl time.Duration - getHeaders := func(from uint64) { - request = time.Now() + // Start pulling the header chain skeleton until all is done + var ( + skeleton = true // Skeleton assembly phase or finishing up + pivoting = false // Whether the next request is pivot verification + ancestor = from + mode = d.getMode() + ) + for { + // Pull the next batch of headers, it either: + // - Pivot check to see if the chain moved too far + // - Skeleton retrieval to permit concurrent header fetches + // - Full header retrieval if we're near the chain head + var ( + headers []*types.Header + hashes []common.Hash + err error + ) + switch { + case pivoting: + d.pivotLock.RLock() + pivot := d.pivotHeader.Number.Uint64() + d.pivotLock.RUnlock() - ttl = d.peers.rates.TargetTimeout() - timeout.Reset(ttl) + p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) + headers, hashes, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep - if skeleton { + case skeleton: p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) - go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) - } else { + headers, hashes, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) + + default: p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) - go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false) + headers, hashes, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false) } - } - getNextPivot := func() { - pivoting = true - request = time.Now() + switch err { + case nil: + // Headers retrieved, continue with processing - ttl = d.peers.rates.TargetTimeout() - timeout.Reset(ttl) - - d.pivotLock.RLock() - pivot := d.pivotHeader.Number.Uint64() - d.pivotLock.RUnlock() - - p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) - go p.peer.RequestHeadersByNumber(pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep - } - // Start pulling the header chain skeleton until all is done - ancestor := from - getHeaders(from) + case errCanceled: + // Sync cancelled, no issue, propagate up + return err - mode := d.getMode() - for { - select { - case <-d.cancelCh: - return errCanceled + default: + // Header retrieval either timed out, or the peer failed in some strange way + // (e.g. disconnect). Consider the master peer bad and drop + d.dropPeer(p.id) - case packet := <-d.headerCh: - // Make sure the active peer is giving us the skeleton headers - if packet.PeerId() != p.id { - log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) - break + // Finish the sync gracefully instead of dumping the gathered data though + for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { + select { + case ch <- false: + case <-d.cancelCh: + } } - headerReqTimer.UpdateSince(request) - timeout.Stop() - - // If the pivot is being checked, move if it became stale and run the real retrieval - var pivot uint64 - - d.pivotLock.RLock() - if d.pivotHeader != nil { - pivot = d.pivotHeader.Number.Uint64() + select { + case d.headerProcCh <- nil: + case <-d.cancelCh: } - d.pivotLock.RUnlock() + return fmt.Errorf("%w: header request failed: %v", errBadPeer, err) + } + // If the pivot is being checked, move if it became stale and run the real retrieval + var pivot uint64 - if pivoting { - if packet.Items() == 2 { - // Retrieve the headers and do some sanity checks, just in case - headers := packet.(*headerPack).headers + d.pivotLock.RLock() + if d.pivotHeader != nil { + pivot = d.pivotHeader.Number.Uint64() + } + d.pivotLock.RUnlock() - if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { - log.Warn("Peer sent invalid next pivot", "have", have, "want", want) - return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) - } - if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { - log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) - return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) - } - log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) - pivot = headers[0].Number.Uint64() + if pivoting { + if len(headers) == 2 { + if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { + log.Warn("Peer sent invalid next pivot", "have", have, "want", want) + return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) + } + if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { + log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) + return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) + } + log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) + pivot = headers[0].Number.Uint64() - d.pivotLock.Lock() - d.pivotHeader = headers[0] - d.pivotLock.Unlock() + d.pivotLock.Lock() + d.pivotHeader = headers[0] + d.pivotLock.Unlock() - // Write out the pivot into the database so a rollback beyond - // it will reenable fast sync and update the state root that - // the state syncer will be downloading. - rawdb.WriteLastPivotNumber(d.stateDB, pivot) - } - pivoting = false - getHeaders(from) - continue + // Write out the pivot into the database so a rollback beyond + // it will reenable snap sync and update the state root that + // the state syncer will be downloading. + rawdb.WriteLastPivotNumber(d.stateDB, pivot) } - // If the skeleton's finished, pull any remaining head headers directly from the origin - if skeleton && packet.Items() == 0 { - skeleton = false - getHeaders(from) - continue + // Disable the pivot check and fetch the next batch of headers + pivoting = false + continue + } + // If the skeleton's finished, pull any remaining head headers directly from the origin + if skeleton && len(headers) == 0 { + // A malicious node might withhold advertised headers indefinitely + if from+uint64(MaxHeaderFetch)-1 <= head { + p.log.Warn("Peer withheld skeleton headers", "advertised", head, "withheld", from+uint64(MaxHeaderFetch)-1) + return fmt.Errorf("%w: withheld skeleton headers: advertised %d, withheld #%d", errStallingPeer, head, from+uint64(MaxHeaderFetch)-1) } - // If no more headers are inbound, notify the content fetchers and return - if packet.Items() == 0 { - // Don't abort header fetches while the pivot is downloading - if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { - p.log.Debug("No headers, waiting for pivot commit") - select { - case <-time.After(fsHeaderContCheck): - getHeaders(from) - continue - case <-d.cancelCh: - return errCanceled - } - } - // Pivot done (or not in fast sync) and no more headers, terminate the process - p.log.Debug("No more headers available") + p.log.Debug("No skeleton, fetching headers directly") + skeleton = false + continue + } + // If no more headers are inbound, notify the content fetchers and return + if len(headers) == 0 { + // Don't abort header fetches while the pivot is downloading + if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { + p.log.Debug("No headers, waiting for pivot commit") select { - case d.headerProcCh <- nil: - return nil + case <-time.After(fsHeaderContCheck): + continue case <-d.cancelCh: return errCanceled } } - headers := packet.(*headerPack).headers - - // If we received a skeleton batch, resolve internals concurrently - if skeleton { - filled, proced, err := d.fillHeaderSkeleton(from, headers) - if err != nil { - p.log.Debug("Skeleton chain invalid", "err", err) - return fmt.Errorf("%w: %v", errInvalidChain, err) - } - headers = filled[proced:] - from += uint64(proced) - } else { - // If we're closing in on the chain head, but haven't yet reached it, delay - // the last few headers so mini reorgs on the head don't cause invalid hash - // chain errors. - if n := len(headers); n > 0 { - // Retrieve the current head we're at - var head uint64 - if mode == LightSync { - head = d.lightchain.CurrentHeader().Number.Uint64() - } else { - head = d.blockchain.CurrentFastBlock().NumberU64() - if full := d.blockchain.CurrentBlock().NumberU64(); head < full { - head = full - } - } - // If the head is below the common ancestor, we're actually deduplicating - // already existing chain segments, so use the ancestor as the fake head. - // Otherwise we might end up delaying header deliveries pointlessly. - if head < ancestor { - head = ancestor - } - // If the head is way older than this batch, delay the last few headers - if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { - delay := reorgProtHeaderDelay - if delay > n { - delay = n - } - headers = headers[:n-delay] - } - } + // Pivot done (or not in snap sync) and no more headers, terminate the process + p.log.Debug("No more headers available") + select { + case d.headerProcCh <- nil: + return nil + case <-d.cancelCh: + return errCanceled } - // Insert all the new headers and fetch the next batch - if len(headers) > 0 { - p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) - select { - case d.headerProcCh <- headers: - case <-d.cancelCh: - return errCanceled - } - from += uint64(len(headers)) + } + // If we received a skeleton batch, resolve internals concurrently + var progressed bool + if skeleton { + filled, hashset, proced, err := d.fillHeaderSkeleton(from, headers) + if err != nil { + p.log.Debug("Skeleton chain invalid", "err", err) + return fmt.Errorf("%w: %v", errInvalidChain, err) + } + headers = filled[proced:] + hashes = hashset[proced:] - // If we're still skeleton filling fast sync, check pivot staleness - // before continuing to the next skeleton filling - if skeleton && pivot > 0 { - getNextPivot() + progressed = proced > 0 + from += uint64(proced) + } else { + // A malicious node might withhold advertised headers indefinitely + if n := len(headers); n < MaxHeaderFetch && headers[n-1].Number.Uint64() < head { + p.log.Warn("Peer withheld headers", "advertised", head, "delivered", headers[n-1].Number.Uint64()) + return fmt.Errorf("%w: withheld headers: advertised %d, delivered %d", errStallingPeer, head, headers[n-1].Number.Uint64()) + } + // If we're closing in on the chain head, but haven't yet reached it, delay + // the last few headers so mini reorgs on the head don't cause invalid hash + // chain errors. + if n := len(headers); n > 0 { + // Retrieve the current head we're at + var head uint64 + if mode == LightSync { + head = d.lightchain.CurrentHeader().Number.Uint64() } else { - getHeaders(from) + head = d.blockchain.CurrentFastBlock().NumberU64() + if full := d.blockchain.CurrentBlock().NumberU64(); head < full { + head = full + } } - } else { - // No headers delivered, or all of them being delayed, sleep a bit and retry - p.log.Trace("All headers delayed, waiting") - select { - case <-time.After(fsHeaderContCheck): - getHeaders(from) - continue - case <-d.cancelCh: - return errCanceled + // If the head is below the common ancestor, we're actually deduplicating + // already existing chain segments, so use the ancestor as the fake head. + // Otherwise, we might end up delaying header deliveries pointlessly. + if head < ancestor { + head = ancestor } - } - - case <-timeout.C: - if d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id) - break - } - // Header retrieval timed out, consider the peer bad and drop - p.log.Debug("Header request timed out", "elapsed", ttl) - headerTimeoutMeter.Mark(1) - d.dropPeer(p.id) - - // Finish the sync gracefully instead of dumping the gathered data though - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { - select { - case ch <- false: - case <-d.cancelCh: + // If the head is way older than this batch, delay the last few headers + if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { + delay := reorgProtHeaderDelay + if delay > n { + delay = n + } + headers = headers[:n-delay] + hashes = hashes[:n-delay] } } + } + // If no headers have bene delivered, or all of them have been delayed, + // sleep a bit and retry. Take care with headers already consumed during + // skeleton filling + if len(headers) == 0 && !progressed { + p.log.Trace("All headers delayed, waiting") select { - case d.headerProcCh <- nil: + case <-time.After(fsHeaderContCheck): + continue case <-d.cancelCh: + return errCanceled } - return fmt.Errorf("%w: header request timed out", errBadPeer) + } + // Insert any remaining new headers and fetch the next batch + if len(headers) > 0 { + p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) + select { + case d.headerProcCh <- &headerTask{ + headers: headers, + hashes: hashes, + }: + case <-d.cancelCh: + return errCanceled + } + from += uint64(len(headers)) + } + // If we're still skeleton filling snap sync, check pivot staleness + // before continuing to the next skeleton filling + if skeleton && pivot > 0 { + pivoting = true } } } @@ -1231,54 +1212,27 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error { // // The method returns the entire filled skeleton and also the number of headers // already forwarded for processing. -func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { +func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, []common.Hash, int, error) { log.Debug("Filling up skeleton", "from", from) d.queue.ScheduleSkeleton(from, skeleton) - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*headerPack) - return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh) - } - expire = func() map[string]int { return d.queue.ExpireHeaders(d.peers.rates.TargetTimeout()) } - reserve = func(p *peerConnection, count int) (*fetchRequest, bool, bool) { - return d.queue.ReserveHeaders(p, count), false, false - } - fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } - capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.peers.rates.TargetRoundTrip()) } - setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { - p.SetHeadersIdle(accepted, deliveryTime) - } - ) - err := d.fetchParts(d.headerCh, deliver, d.queue.headerContCh, expire, - d.queue.PendingHeaders, d.queue.InFlightHeaders, reserve, - nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") - - log.Debug("Skeleton fill terminated", "err", err) - - filled, proced := d.queue.RetrieveHeaders() - return filled, proced, err + err := d.concurrentFetch((*headerQueue)(d), false) + if err != nil { + log.Debug("Skeleton fill failed", "err", err) + } + filled, hashes, proced := d.queue.RetrieveHeaders() + if err == nil { + log.Debug("Skeleton fill succeeded", "filled", len(filled), "processed", proced) + } + return filled, hashes, proced, err } // fetchBodies iteratively downloads the scheduled block bodies, taking any // available peers, reserving a chunk of blocks for each, waiting for delivery // and also periodically checking for timeouts. -func (d *Downloader) fetchBodies(from uint64) error { +func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error { log.Debug("Downloading block bodies", "origin", from) - - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*bodyPack) - return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles) - } - expire = func() map[string]int { return d.queue.ExpireBodies(d.peers.rates.TargetTimeout()) } - fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } - capacity = func(p *peerConnection) int { return p.BlockCapacity(d.peers.rates.TargetRoundTrip()) } - setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetBodiesIdle(accepted, deliveryTime) } - ) - err := d.fetchParts(d.bodyCh, deliver, d.bodyWakeCh, expire, - d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ReserveBodies, - d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") + err := d.concurrentFetch((*bodyQueue)(d), beaconMode) log.Debug("Block body download terminated", "err", err) return err @@ -1287,230 +1241,18 @@ func (d *Downloader) fetchBodies(from uint64) error { // fetchReceipts iteratively downloads the scheduled block receipts, taking any // available peers, reserving a chunk of receipts for each, waiting for delivery // and also periodically checking for timeouts. -func (d *Downloader) fetchReceipts(from uint64) error { - log.Debug("Downloading transaction receipts", "origin", from) - - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*receiptPack) - return d.queue.DeliverReceipts(pack.peerID, pack.receipts) - } - expire = func() map[string]int { return d.queue.ExpireReceipts(d.peers.rates.TargetTimeout()) } - fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } - capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.peers.rates.TargetRoundTrip()) } - setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { - p.SetReceiptsIdle(accepted, deliveryTime) - } - ) - err := d.fetchParts(d.receiptCh, deliver, d.receiptWakeCh, expire, - d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ReserveReceipts, - d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") +func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { + log.Debug("Downloading receipts", "origin", from) + err := d.concurrentFetch((*receiptQueue)(d), beaconMode) - log.Debug("Transaction receipt download terminated", "err", err) + log.Debug("Receipt download terminated", "err", err) return err } -// fetchParts iteratively downloads scheduled block parts, taking any available -// peers, reserving a chunk of fetch requests for each, waiting for delivery and -// also periodically checking for timeouts. -// -// As the scheduling/timeout logic mostly is the same for all downloaded data -// types, this method is used by each for data gathering and is instrumented with -// various callbacks to handle the slight differences between processing them. -// -// The instrumentation parameters: -// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) -// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) -// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) -// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) -// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) -// - pending: task callback for the number of requests still needing download (detect completion/non-completability) -// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) -// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) -// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) -// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) -// - fetch: network callback to actually send a particular download request to a physical remote peer -// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) -// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) -// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks -// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) -// - kind: textual label of the type being downloaded to display in log messages -func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, - expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool), - fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, - idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int, time.Time), kind string) error { - - // Create a ticker to detect expired retrieval tasks - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - update := make(chan struct{}, 1) - - // Prepare the queue and fetch block parts until the block header fetcher's done - finished := false - for { - select { - case <-d.cancelCh: - return errCanceled - - case packet := <-deliveryCh: - deliveryTime := time.Now() - // If the peer was previously banned and failed to deliver its pack - // in a reasonable time frame, ignore its message. - if peer := d.peers.Peer(packet.PeerId()); peer != nil { - // Deliver the received chunk of data and check chain validity - accepted, err := deliver(packet) - if errors.Is(err, errInvalidChain) { - return err - } - // Unless a peer delivered something completely else than requested (usually - // caused by a timed out request which came through in the end), set it to - // idle. If the delivery's stale, the peer should have already been idled. - if !errors.Is(err, errStaleDelivery) { - setIdle(peer, accepted, deliveryTime) - } - // Issue a log to the user to see what's going on - switch { - case err == nil && packet.Items() == 0: - peer.log.Trace("Requested data not delivered", "type", kind) - case err == nil: - peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) - default: - peer.log.Debug("Failed to deliver retrieved data", "type", kind, "err", err) - } - } - // Blocks assembled, try to update the progress - select { - case update <- struct{}{}: - default: - } - - case cont := <-wakeCh: - // The header fetcher sent a continuation flag, check if it's done - if !cont { - finished = true - } - // Headers arrive, try to update the progress - select { - case update <- struct{}{}: - default: - } - - case <-ticker.C: - // Sanity check update the progress - select { - case update <- struct{}{}: - default: - } - - case <-update: - // Short circuit if we lost all our peers - if d.peers.Len() == 0 { - return errNoPeers - } - // Check for fetch request timeouts and demote the responsible peers - for pid, fails := range expire() { - if peer := d.peers.Peer(pid); peer != nil { - // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps - // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times - // out that sync wise we need to get rid of the peer. - // - // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth - // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing - // how response times reacts, to it always requests one more than the minimum (i.e. min 2). - if fails > 2 { - peer.log.Trace("Data delivery timed out", "type", kind) - setIdle(peer, 0, time.Now()) - } else { - peer.log.Debug("Stalling delivery, dropping", "type", kind) - - if d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid) - } else { - d.dropPeer(pid) - - // If this peer was the master peer, abort sync immediately - d.cancelLock.RLock() - master := pid == d.cancelPeer - d.cancelLock.RUnlock() - - if master { - d.cancel() - return errTimeout - } - } - } - } - } - // If there's nothing more to fetch, wait or terminate - if pending() == 0 { - if !inFlight() && finished { - log.Debug("Data fetching completed", "type", kind) - return nil - } - break - } - // Send a download request to all idle peers, until throttled - progressed, throttled, running := false, false, inFlight() - idles, total := idle() - pendCount := pending() - for _, peer := range idles { - // Short circuit if throttling activated - if throttled { - break - } - // Short circuit if there is no more available task. - if pendCount = pending(); pendCount == 0 { - break - } - // Reserve a chunk of fetches for a peer. A nil can mean either that - // no more headers are available, or that the peer is known not to - // have them. - request, progress, throttle := reserve(peer, capacity(peer)) - if progress { - progressed = true - } - if throttle { - throttled = true - throttleCounter.Inc(1) - } - if request == nil { - continue - } - if request.From > 0 { - peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From) - } else { - peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) - } - // Fetch the chunk and make sure any errors return the hashes to the queue - if fetchHook != nil { - fetchHook(request.Headers) - } - if err := fetch(peer, request); err != nil { - // Although we could try and make an attempt to fix this, this error really - // means that we've double allocated a fetch task to a peer. If that is the - // case, the internal state of the downloader and the queue is very wrong so - // better hard crash and note the error instead of silently accumulating into - // a much bigger issue. - panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) - } - running = true - } - // Make sure that we have peers available for fetching. If all peers have been tried - // and all failed throw an error - if !progressed && !throttled && !running && len(idles) == total && pendCount > 0 { - return errPeersUnavailable - } - } - } -} - // processHeaders takes batches of retrieved headers from an input channel and // keeps processing and scheduling them into the header chain and downloader's // queue until the stream ends or a failure occurs. -func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { +func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error { // Keep a count of uncertain headers to roll back var ( rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) @@ -1535,7 +1277,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { } log.Warn("Rolled back chain segment", "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), - "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), + "snap", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr) } }() @@ -1548,45 +1290,50 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { rollbackErr = errCanceled return errCanceled - case headers := <-d.headerProcCh: + case task := <-d.headerProcCh: // Terminate header processing if we synced up - if len(headers) == 0 { + if task == nil || len(task.headers) == 0 { // Notify everyone that headers are fully processed - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { select { case ch <- false: case <-d.cancelCh: } } - // If no headers were retrieved at all, the peer violated its TD promise that it had a - // better chain compared to ours. The only exception is if its promised blocks were - // already imported by other means (e.g. fetcher): - // - // R , L : Both at block 10 - // R: Mine block 11, and propagate it to L - // L: Queue block 11 for import - // L: Notice that R's head and TD increased compared to ours, start sync - // L: Import of block 11 finishes - // L: Sync begins, and finds common ancestor at 11 - // L: Request new headers up from 11 (R's TD was higher, it must have something) - // R: Nothing to give - if mode != LightSync { - head := d.blockchain.CurrentBlock() - if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { - return errStallingPeer + // If we're in legacy sync mode, we need to check total difficulty + // violations from malicious peers. That is not needed in beacon + // mode and we can skip to terminating sync. + if !beaconMode { + // If no headers were retrieved at all, the peer violated its TD promise that it had a + // better chain compared to ours. The only exception is if its promised blocks were + // already imported by other means (e.g. fetcher): + // + // R , L : Both at block 10 + // R: Mine block 11, and propagate it to L + // L: Queue block 11 for import + // L: Notice that R's head and TD increased compared to ours, start sync + // L: Import of block 11 finishes + // L: Sync begins, and finds common ancestor at 11 + // L: Request new headers up from 11 (R's TD was higher, it must have something) + // R: Nothing to give + if mode != LightSync { + head := d.blockchain.CurrentBlock() + if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { + return errStallingPeer + } } - } - // If fast or light syncing, ensure promised headers are indeed delivered. This is - // needed to detect scenarios where an attacker feeds a bad pivot and then bails out - // of delivering the post-pivot blocks that would flag the invalid content. - // - // This check cannot be executed "as is" for full imports, since blocks may still be - // queued for processing when the header download completes. However, as long as the - // peer gave us something useful, we're already happy/progressed (above check). - if mode == FastSync || mode == LightSync { - head := d.lightchain.CurrentHeader() - if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { - return errStallingPeer + // If snap or light syncing, ensure promised headers are indeed delivered. This is + // needed to detect scenarios where an attacker feeds a bad pivot and then bails out + // of delivering the post-pivot blocks that would flag the invalid content. + // + // This check cannot be executed "as is" for full imports, since blocks may still be + // queued for processing when the header download completes. However, as long as the + // peer gave us something useful, we're already happy/progressed (above check). + if mode == SnapSync || mode == LightSync { + head := d.lightchain.CurrentHeader() + if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { + return errStallingPeer + } } } // Disable any rollback and return @@ -1594,6 +1341,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { return nil } // Otherwise split the chunk of headers into batches and process them + headers, hashes := task.headers, task.hashes + gotHeaders = true for len(headers) > 0 { // Terminate if something failed in between processing chunks @@ -1608,10 +1357,11 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { if limit > len(headers) { limit = len(headers) } - chunk := headers[:limit] + chunkHeaders := headers[:limit] + chunkHashes := hashes[:limit] // In case of header only syncing, validate the chunk immediately - if mode == FastSync || mode == LightSync { + if mode == SnapSync || mode == LightSync { // If we're importing pure headers, verify based on their recentness var pivot uint64 @@ -1622,33 +1372,73 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { d.pivotLock.RUnlock() frequency := fsHeaderCheckFrequency - if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { + if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { frequency = 1 } - if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { - rollbackErr = err - - // If some headers were inserted, track them as uncertain - if (mode == FastSync || frequency > 1) && n > 0 && rollback == 0 { - rollback = chunk[0].Number.Uint64() + // Although the received headers might be all valid, a legacy + // PoW/PoA sync must not accept post-merge headers. Make sure + // that any transition is rejected at this point. + var ( + rejected []*types.Header + td *big.Int + ) + if !beaconMode && ttd != nil { + td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1) + if td == nil { + // This should never really happen, but handle gracefully for now + log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash) + return fmt.Errorf("%w: parent TD missing", errInvalidChain) + } + for i, header := range chunkHeaders { + td = new(big.Int).Add(td, header.Difficulty) + if td.Cmp(ttd) >= 0 { + // Terminal total difficulty reached, allow the last header in + if new(big.Int).Sub(td, header.Difficulty).Cmp(ttd) < 0 { + chunkHeaders, rejected = chunkHeaders[:i+1], chunkHeaders[i+1:] + if len(rejected) > 0 { + // Make a nicer user log as to the first TD truly rejected + td = new(big.Int).Add(td, rejected[0].Difficulty) + } + } else { + chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:] + } + break + } } - log.Warn("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err) - return fmt.Errorf("%w: %v", errInvalidChain, err) } - // All verifications passed, track all headers within the alloted limits - if mode == FastSync { - head := chunk[len(chunk)-1].Number.Uint64() - if head-rollback > uint64(fsHeaderSafetyNet) { - rollback = head - uint64(fsHeaderSafetyNet) - } else { - rollback = 1 + if len(chunkHeaders) > 0 { + if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil { + rollbackErr = err + + // If some headers were inserted, track them as uncertain + if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 { + rollback = chunkHeaders[0].Number.Uint64() + } + log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) + return fmt.Errorf("%w: %v", errInvalidChain, err) + } + // All verifications passed, track all headers within the allowed limits + if mode == SnapSync { + head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() + if head-rollback > uint64(fsHeaderSafetyNet) { + rollback = head - uint64(fsHeaderSafetyNet) + } else { + rollback = 1 + } } } + if len(rejected) != 0 { + // Merge threshold reached, stop importing, but don't roll back + rollback = 0 + + log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) + return ErrMergeTransition + } } // Unless we're doing light chains, schedule the headers for associated content retrieval - if mode == FullSync || mode == FastSync { + if mode == FullSync || mode == SnapSync { // If we've reached the allowed number of pending headers, stall a bit - for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { + for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { select { case <-d.cancelCh: rollbackErr = errCanceled @@ -1657,13 +1447,14 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { } } // Otherwise insert the headers for content retrieval - inserts := d.queue.Schedule(chunk, origin) - if len(inserts) != len(chunk) { - rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunk)) + inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) + if len(inserts) != len(chunkHeaders) { + rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunkHeaders)) return fmt.Errorf("%w: stale headers", errBadPeer) } } headers = headers[limit:] + hashes = hashes[limit:] origin += uint64(limit) } // Update the highest block number we know if a higher one is found. @@ -1674,7 +1465,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { d.syncStatsLock.Unlock() // Signal the content downloaders of the availablility of new tasks - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { select { case ch <- true: default: @@ -1685,7 +1476,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { } // processFullSyncContent takes fetch results from the queue and imports them into the chain. -func (d *Downloader) processFullSyncContent() error { +func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error { for { results := d.queue.Results(true) if len(results) == 0 { @@ -1694,9 +1485,44 @@ func (d *Downloader) processFullSyncContent() error { if d.chainInsertHook != nil { d.chainInsertHook(results) } + // Although the received blocks might be all valid, a legacy PoW/PoA sync + // must not accept post-merge blocks. Make sure that pre-merge blocks are + // imported, but post-merge ones are rejected. + var ( + rejected []*fetchResult + td *big.Int + ) + if !beaconMode && ttd != nil { + td = d.blockchain.GetTd(results[0].Header.ParentHash, results[0].Header.Number.Uint64()-1) + if td == nil { + // This should never really happen, but handle gracefully for now + log.Error("Failed to retrieve parent block TD", "number", results[0].Header.Number.Uint64()-1, "hash", results[0].Header.ParentHash) + return fmt.Errorf("%w: parent TD missing", errInvalidChain) + } + for i, result := range results { + td = new(big.Int).Add(td, result.Header.Difficulty) + if td.Cmp(ttd) >= 0 { + // Terminal total difficulty reached, allow the last block in + if new(big.Int).Sub(td, result.Header.Difficulty).Cmp(ttd) < 0 { + results, rejected = results[:i+1], results[i+1:] + if len(rejected) > 0 { + // Make a nicer user log as to the first TD truly rejected + td = new(big.Int).Add(td, rejected[0].Header.Difficulty) + } + } else { + results, rejected = results[:i], results[i:] + } + break + } + } + } if err := d.importBlockResults(results); err != nil { return err } + if len(rejected) != 0 { + log.Info("Legacy sync reached merge threshold", "number", rejected[0].Header.Number, "hash", rejected[0].Header.Hash(), "td", td, "ttd", ttd) + return ErrMergeTransition + } } } @@ -1710,7 +1536,7 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { return errCancelContentProcessing default: } - // Retrieve the a batch of results to import + // Retrieve a batch of results to import first, last := results[0].Header, results[len(results)-1].Header log.Debug("Inserting downloaded chain", "items", len(results), "firstnum", first.Number, "firsthash", first.Hash(), @@ -1723,6 +1549,16 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { if index, err := d.blockchain.InsertChain(blocks); err != nil { if index < len(results) { log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) + + // In post-merge, notify the engine API of encountered bad chains + if d.badBlock != nil { + head, _, err := d.skeleton.Bounds() + if err != nil { + log.Error("Failed to retrieve beacon bounds for bad block reporting", "err", err) + } else { + d.badBlock(blocks[index].Header(), head) + } + } } else { // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index, // when it needs to preprocess blocks to import a sidechain. @@ -1735,9 +1571,9 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { return nil } -// processFastSyncContent takes fetch results from the queue and writes them to the +// processSnapSyncContent takes fetch results from the queue and writes them to the // database. It also controls the synchronisation of state nodes of the pivot block. -func (d *Downloader) processFastSyncContent() error { +func (d *Downloader) processSnapSyncContent() error { // Start syncing state of the reported head block. This should get us most of // the state of the pivot block. d.pivotLock.RLock() @@ -1800,7 +1636,7 @@ func (d *Downloader) processFastSyncContent() error { } else { results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) } - // Split around the pivot block and process the two sides via fast/full sync + // Split around the pivot block and process the two sides via snap/full sync if atomic.LoadInt32(&d.committed) == 0 { latest := results[len(results)-1].Header // If the height is above the pivot block by 2 sets, it means the pivot @@ -1819,12 +1655,12 @@ func (d *Downloader) processFastSyncContent() error { d.pivotLock.Unlock() // Write out the pivot into the database so a rollback beyond it will - // reenable fast sync + // reenable snap sync rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64()) } } P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results) - if err := d.commitFastSyncData(beforeP, sync); err != nil { + if err := d.commitSnapSyncData(beforeP, sync); err != nil { return err } if P != nil { @@ -1882,7 +1718,7 @@ func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, bef return p, before, after } -func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { +func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *stateSync) error { // Check for any early termination requests if len(results) == 0 { return nil @@ -1898,7 +1734,7 @@ func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *state } // Retrieve the a batch of results to import first, last := results[0].Header, results[len(results)-1].Header - log.Debug("Inserting fast-sync blocks", "items", len(results), + log.Debug("Inserting snap-sync blocks", "items", len(results), "firstnum", first.Number, "firsthash", first.Hash(), "lastnumn", last.Number, "lasthash", last.Hash(), ) @@ -1917,49 +1753,19 @@ func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *state func (d *Downloader) commitPivotBlock(result *fetchResult) error { block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) - log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash()) + log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash()) // Commit the pivot block as the new head, will require full sync from here on if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil { return err } - if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { + if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil { return err } atomic.StoreInt32(&d.committed, 1) - - // If we had a bloom filter for the state sync, deallocate it now. Note, we only - // deallocate internally, but keep the empty wrapper. This ensures that if we do - // a rollback after committing the pivot and restarting fast sync, we don't end - // up using a nil bloom. Empty bloom is fine, it just returns that it does not - // have the info we need, so reach down to the database instead. - if d.stateBloom != nil { - d.stateBloom.Close() - } return nil } -// DeliverHeaders injects a new batch of block headers received from a remote -// node into the download schedule. -func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error { - return d.deliver(d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) -} - -// DeliverBodies injects a new batch of block bodies received from a remote node. -func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) error { - return d.deliver(d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter) -} - -// DeliverReceipts injects a new batch of receipts received from a remote node. -func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) error { - return d.deliver(d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) -} - -// DeliverNodeData injects a new batch of node state data received from a remote node. -func (d *Downloader) DeliverNodeData(id string, data [][]byte) error { - return d.deliver(d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) -} - // DeliverSnapPacket is invoked from a peer's message handler when it transmits a // data packet for the local node to consume. func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error { @@ -1986,26 +1792,24 @@ func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) erro } } -// deliver injects a new batch of data received from a remote node. -func (d *Downloader) deliver(destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { - // Update the delivery metrics for both good and failed deliveries - inMeter.Mark(int64(packet.Items())) - defer func() { - if err != nil { - dropMeter.Mark(int64(packet.Items())) +// readHeaderRange returns a list of headers, using the given last header as the base, +// and going backwards towards genesis. This method assumes that the caller already has +// placed a reasonable cap on count. +func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Header { + var ( + current = last + headers []*types.Header + ) + for { + parent := d.lightchain.GetHeaderByHash(current.ParentHash) + if parent == nil { + break // The chain is not continuous, or the chain is exhausted } - }() - // Deliver or abort if the sync is canceled while queuing - d.cancelLock.RLock() - cancel := d.cancelCh - d.cancelLock.RUnlock() - if cancel == nil { - return errNoSyncActive - } - select { - case destCh <- packet: - return nil - case <-cancel: - return errNoSyncActive + headers = append(headers, parent) + if len(headers) >= count { + break + } + current = parent } + return headers } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index e96373548..21cbcabd9 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -19,7 +19,9 @@ package downloader import ( "errors" "fmt" + "io/ioutil" "math/big" + "os" "strings" "sync" "sync/atomic" @@ -28,68 +30,57 @@ import ( "github.com/scroll-tech/go-ethereum" "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/consensus/ethash" + "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/core/state/snapshot" "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/core/vm" "github.com/scroll-tech/go-ethereum/eth/protocols/eth" - "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/eth/protocols/snap" "github.com/scroll-tech/go-ethereum/event" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rlp" "github.com/scroll-tech/go-ethereum/trie" ) -// Reduce some of the parameters to make the tester faster. -func init() { - fullMaxForkAncestry = 10000 - lightMaxForkAncestry = 10000 - blockCacheMaxItems = 1024 - fsHeaderContCheck = 500 * time.Millisecond -} - // downloadTester is a test simulator for mocking out local block chain. type downloadTester struct { + freezer string + chain *core.BlockChain downloader *Downloader - genesis *types.Block // Genesis blocks used by the tester and peers - stateDb ethdb.Database // Database used by the tester for syncing from peers - peerDb ethdb.Database // Database of the peers containing all data - peers map[string]*downloadTesterPeer - - ownHashes []common.Hash // Hash chain belonging to the tester - ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester - ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester - ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester - ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain - - ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester - ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester - ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester - ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain - - lock sync.RWMutex + peers map[string]*downloadTesterPeer + lock sync.RWMutex } // newTester creates a new downloader test mocker. func newTester() *downloadTester { + return newTesterWithNotification(nil) +} + +// newTester creates a new downloader test mocker. +func newTesterWithNotification(success func()) *downloadTester { + freezer, err := ioutil.TempDir("", "") + if err != nil { + panic(err) + } + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) + if err != nil { + panic(err) + } + core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000000000)) + + chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) + if err != nil { + panic(err) + } tester := &downloadTester{ - genesis: testGenesis, - peerDb: testDB, - peers: make(map[string]*downloadTesterPeer), - ownHashes: []common.Hash{testGenesis.Hash()}, - ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, - ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, - ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, - ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, - - // Initialize ancient store with test genesis block - ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, - ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, - ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, - ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, - } - tester.stateDb = rawdb.NewMemoryDatabase() - tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) - - tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer) + freezer: freezer, + chain: chain, + peers: make(map[string]*downloadTesterPeer), + } + tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success) return tester } @@ -97,20 +88,20 @@ func newTester() *downloadTester { // held resources. func (dl *downloadTester) terminate() { dl.downloader.Terminate() + dl.chain.Stop() + + os.RemoveAll(dl.freezer) } // sync starts synchronizing with a remote peer, blocking until it completes. func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { - dl.lock.RLock() - hash := dl.peers[id].chain.headBlock().Hash() - // If no particular TD was requested, load from the peer's blockchain + head := dl.peers[id].chain.CurrentBlock() if td == nil { - td = dl.peers[id].chain.td(hash) + // If no particular TD was requested, load from the peer's blockchain + td = dl.peers[id].chain.GetTd(head.Hash(), head.NumberU64()) } - dl.lock.RUnlock() - // Synchronise with the chosen peer and ensure proper cleanup afterwards - err := dl.downloader.synchronise(id, hash, td, mode) + err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil) select { case <-dl.downloader.cancelCh: // Ok, downloader fully cancelled after sync cycle @@ -121,367 +112,294 @@ func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { return err } -// HasHeader checks if a header is present in the testers canonical chain. -func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool { - return dl.GetHeaderByHash(hash) != nil -} - -// HasBlock checks if a block is present in the testers canonical chain. -func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { - return dl.GetBlockByHash(hash) != nil -} - -// HasFastBlock checks if a block is present in the testers canonical chain. -func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool { - dl.lock.RLock() - defer dl.lock.RUnlock() +// newPeer registers a new block download source into the downloader. +func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer { + dl.lock.Lock() + defer dl.lock.Unlock() - if _, ok := dl.ancientReceipts[hash]; ok { - return true + peer := &downloadTesterPeer{ + dl: dl, + id: id, + chain: newTestBlockchain(blocks), + withholdHeaders: make(map[common.Hash]struct{}), } - _, ok := dl.ownReceipts[hash] - return ok -} - -// GetHeader retrieves a header from the testers canonical chain. -func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { - dl.lock.RLock() - defer dl.lock.RUnlock() - return dl.getHeaderByHash(hash) -} + dl.peers[id] = peer -// getHeaderByHash returns the header if found either within ancients or own blocks) -// This method assumes that the caller holds at least the read-lock (dl.lock) -func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header { - header := dl.ancientHeaders[hash] - if header != nil { - return header + if err := dl.downloader.RegisterPeer(id, version, peer); err != nil { + panic(err) } - return dl.ownHeaders[hash] + if err := dl.downloader.SnapSyncer.Register(peer); err != nil { + panic(err) + } + return peer } -// GetBlock retrieves a block from the testers canonical chain. -func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() +// dropPeer simulates a hard peer removal from the connection pool. +func (dl *downloadTester) dropPeer(id string) { + dl.lock.Lock() + defer dl.lock.Unlock() - block := dl.ancientBlocks[hash] - if block != nil { - return block - } - return dl.ownBlocks[hash] + delete(dl.peers, id) + dl.downloader.SnapSyncer.Unregister(id) + dl.downloader.UnregisterPeer(id) } -// CurrentHeader retrieves the current head header from the canonical chain. -func (dl *downloadTester) CurrentHeader() *types.Header { - dl.lock.RLock() - defer dl.lock.RUnlock() +type downloadTesterPeer struct { + dl *downloadTester + id string + chain *core.BlockChain - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil { - return header - } - if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { - return header - } - } - return dl.genesis.Header() + withholdHeaders map[common.Hash]struct{} } -// CurrentBlock retrieves the current head block from the canonical chain. -func (dl *downloadTester) CurrentBlock() *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() +// Head constructs a function to retrieve a peer's current head hash +// and total difficulty. +func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { + head := dlp.chain.CurrentBlock() + return head.Hash(), dlp.chain.GetTd(head.Hash(), head.NumberU64()) +} - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { - if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { - return block - } - return block - } - if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { - if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { - return block +// RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed +// origin; associated with a particular peer in the download tester. The returned +// function can be used to retrieve batches of headers from the particular peer. +func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { + // Service the header query via the live handler code + headers := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: origin, + }, + Amount: uint64(amount), + Skip: uint64(skip), + Reverse: reverse, + }, nil) + + // If a malicious peer is simulated withholding headers, delete them + for hash := range dlp.withholdHeaders { + for i, header := range headers { + if header.Hash() == hash { + headers = append(headers[:i], headers[i+1:]...) + break } } } - return dl.genesis -} - -// CurrentFastBlock retrieves the current head fast-sync block from the canonical chain. -func (dl *downloadTester) CurrentFastBlock() *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() - - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { - return block - } - if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { - return block - } + hashes := make([]common.Hash, len(headers)) + for i, header := range headers { + hashes[i] = header.Hash() } - return dl.genesis -} - -// FastSyncCommitHead manually sets the head block to a given hash. -func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { - // For now only check that the state trie is correct - if block := dl.GetBlockByHash(hash); block != nil { - _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb)) - return err + // Deliver the headers to the downloader + req := ð.Request{ + Peer: dlp.id, } - return fmt.Errorf("non existent block: %x", hash[:4]) -} - -// GetTd retrieves the block's total difficulty from the canonical chain. -func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int { - dl.lock.RLock() - defer dl.lock.RUnlock() - - return dl.getTd(hash) -} - -// getTd retrieves the block's total difficulty if found either within -// ancients or own blocks). -// This method assumes that the caller holds at least the read-lock (dl.lock) -func (dl *downloadTester) getTd(hash common.Hash) *big.Int { - if td := dl.ancientChainTd[hash]; td != nil { - return td + res := ð.Response{ + Req: req, + Res: (*eth.BlockHeadersPacket)(&headers), + Meta: hashes, + Time: 1, + Done: make(chan error, 1), // Ignore the returned status } - return dl.ownChainTd[hash] + go func() { + sink <- res + }() + return req, nil } -// InsertHeaderChain injects a new batch of headers into the simulated chain. -func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) { - dl.lock.Lock() - defer dl.lock.Unlock() - // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors - if dl.getHeaderByHash(headers[0].ParentHash) == nil { - return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number) - } - var hashes []common.Hash - for i := 1; i < len(headers); i++ { - hash := headers[i-1].Hash() - if headers[i].ParentHash != headers[i-1].Hash() { - return i, fmt.Errorf("non-contiguous import at position %d", i) +// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered +// origin; associated with a particular peer in the download tester. The returned +// function can be used to retrieve batches of headers from the particular peer. +func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { + // Service the header query via the live handler code + headers := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: origin, + }, + Amount: uint64(amount), + Skip: uint64(skip), + Reverse: reverse, + }, nil) + + // If a malicious peer is simulated withholding headers, delete them + for hash := range dlp.withholdHeaders { + for i, header := range headers { + if header.Hash() == hash { + headers = append(headers[:i], headers[i+1:]...) + break + } } - hashes = append(hashes, hash) } - hashes = append(hashes, headers[len(headers)-1].Hash()) - // Do a full insert if pre-checks passed + hashes := make([]common.Hash, len(headers)) for i, header := range headers { - hash := hashes[i] - if dl.getHeaderByHash(hash) != nil { - continue - } - if dl.getHeaderByHash(header.ParentHash) == nil { - // This _should_ be impossible, due to precheck and induction - return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i) - } - dl.ownHashes = append(dl.ownHashes, hash) - dl.ownHeaders[hash] = header - - td := dl.getTd(header.ParentHash) - dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty) + hashes[i] = header.Hash() } - return len(headers), nil -} - -// InsertChain injects a new batch of blocks into the simulated chain. -func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) { - dl.lock.Lock() - defer dl.lock.Unlock() - for i, block := range blocks { - if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { - return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks)) - } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { - return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err) - } - if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil { - dl.ownHashes = append(dl.ownHashes, block.Hash()) - dl.ownHeaders[block.Hash()] = block.Header() - } - dl.ownBlocks[block.Hash()] = block - dl.ownReceipts[block.Hash()] = make(types.Receipts, 0) - dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) - td := dl.getTd(block.ParentHash()) - dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty()) + // Deliver the headers to the downloader + req := ð.Request{ + Peer: dlp.id, + } + res := ð.Response{ + Req: req, + Res: (*eth.BlockHeadersPacket)(&headers), + Meta: hashes, + Time: 1, + Done: make(chan error, 1), // Ignore the returned status } - return len(blocks), nil + go func() { + sink <- res + }() + return req, nil } -// InsertReceiptChain injects a new batch of receipts into the simulated chain. -func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) { - dl.lock.Lock() - defer dl.lock.Unlock() - - for i := 0; i < len(blocks) && i < len(receipts); i++ { - if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { - return i, errors.New("unknown owner") - } - if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok { - if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { - return i, errors.New("InsertReceiptChain: unknown parent") - } - } - if blocks[i].NumberU64() <= ancientLimit { - dl.ancientBlocks[blocks[i].Hash()] = blocks[i] - dl.ancientReceipts[blocks[i].Hash()] = receipts[i] - - // Migrate from active db to ancient db - dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header() - dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty()) - delete(dl.ownHeaders, blocks[i].Hash()) - delete(dl.ownChainTd, blocks[i].Hash()) - } else { - dl.ownBlocks[blocks[i].Hash()] = blocks[i] - dl.ownReceipts[blocks[i].Hash()] = receipts[i] - } +// RequestBodies constructs a getBlockBodies method associated with a particular +// peer in the download tester. The returned function can be used to retrieve +// batches of block bodies from the particularly requested peer. +func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { + blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes) + + bodies := make([]*eth.BlockBody, len(blobs)) + for i, blob := range blobs { + bodies[i] = new(eth.BlockBody) + rlp.DecodeBytes(blob, bodies[i]) + } + var ( + txsHashes = make([]common.Hash, len(bodies)) + uncleHashes = make([]common.Hash, len(bodies)) + ) + hasher := trie.NewStackTrie(nil) + for i, body := range bodies { + txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) + uncleHashes[i] = types.CalcUncleHash(body.Uncles) + } + req := ð.Request{ + Peer: dlp.id, + } + res := ð.Response{ + Req: req, + Res: (*eth.BlockBodiesPacket)(&bodies), + Meta: [][]common.Hash{txsHashes, uncleHashes}, + Time: 1, + Done: make(chan error, 1), // Ignore the returned status } - return len(blocks), nil + go func() { + sink <- res + }() + return req, nil } -// SetHead rewinds the local chain to a new head. -func (dl *downloadTester) SetHead(head uint64) error { - dl.lock.Lock() - defer dl.lock.Unlock() +// RequestReceipts constructs a getReceipts method associated with a particular +// peer in the download tester. The returned function can be used to retrieve +// batches of block receipts from the particularly requested peer. +func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { + blobs := eth.ServiceGetReceiptsQuery(dlp.chain, hashes) - // Find the hash of the head to reset to - var hash common.Hash - for h, header := range dl.ownHeaders { - if header.Number.Uint64() == head { - hash = h - } - } - for h, header := range dl.ancientHeaders { - if header.Number.Uint64() == head { - hash = h - } + receipts := make([][]*types.Receipt, len(blobs)) + for i, blob := range blobs { + rlp.DecodeBytes(blob, &receipts[i]) } - if hash == (common.Hash{}) { - return fmt.Errorf("unknown head to set: %d", head) + hasher := trie.NewStackTrie(nil) + hashes = make([]common.Hash, len(receipts)) + for i, receipt := range receipts { + hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) } - // Find the offset in the header chain - var offset int - for o, h := range dl.ownHashes { - if h == hash { - offset = o - break - } + req := ð.Request{ + Peer: dlp.id, } - // Remove all the hashes and associated data afterwards - for i := offset + 1; i < len(dl.ownHashes); i++ { - delete(dl.ownChainTd, dl.ownHashes[i]) - delete(dl.ownHeaders, dl.ownHashes[i]) - delete(dl.ownReceipts, dl.ownHashes[i]) - delete(dl.ownBlocks, dl.ownHashes[i]) - - delete(dl.ancientChainTd, dl.ownHashes[i]) - delete(dl.ancientHeaders, dl.ownHashes[i]) - delete(dl.ancientReceipts, dl.ownHashes[i]) - delete(dl.ancientBlocks, dl.ownHashes[i]) + res := ð.Response{ + Req: req, + Res: (*eth.ReceiptsPacket)(&receipts), + Meta: hashes, + Time: 1, + Done: make(chan error, 1), // Ignore the returned status } - dl.ownHashes = dl.ownHashes[:offset+1] - return nil + go func() { + sink <- res + }() + return req, nil } -// Rollback removes some recently added elements from the chain. -func (dl *downloadTester) Rollback(hashes []common.Hash) { +// ID retrieves the peer's unique identifier. +func (dlp *downloadTesterPeer) ID() string { + return dlp.id } -// newPeer registers a new block download source into the downloader. -func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error { - dl.lock.Lock() - defer dl.lock.Unlock() - - peer := &downloadTesterPeer{dl: dl, id: id, chain: chain} - dl.peers[id] = peer - return dl.downloader.RegisterPeer(id, version, peer) -} - -// dropPeer simulates a hard peer removal from the connection pool. -func (dl *downloadTester) dropPeer(id string) { - dl.lock.Lock() - defer dl.lock.Unlock() +// RequestAccountRange fetches a batch of accounts rooted in a specific account +// trie, starting with the origin. +func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { + // Create the request and service it + req := &snap.GetAccountRangePacket{ + ID: id, + Root: root, + Origin: origin, + Limit: limit, + Bytes: bytes, + } + slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req) - delete(dl.peers, id) - dl.downloader.UnregisterPeer(id) -} + // We need to convert to non-slim format, delegate to the packet code + res := &snap.AccountRangePacket{ + ID: id, + Accounts: slimaccs, + Proof: proofs, + } + hashes, accounts, _ := res.Unpack() -// Snapshots implements the BlockChain interface for the downloader, but is a noop. -func (dl *downloadTester) Snapshots() *snapshot.Tree { + go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs) return nil } -type downloadTesterPeer struct { - dl *downloadTester - id string - chain *testChain - missingStates map[common.Hash]bool // State entries that fast sync should not return -} - -// Head constructs a function to retrieve a peer's current head hash -// and total difficulty. -func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { - b := dlp.chain.headBlock() - return b.Hash(), dlp.chain.td(b.Hash()) -} +// RequestStorageRanges fetches a batch of storage slots belonging to one or +// more accounts. If slots from only one accout is requested, an origin marker +// may also be used to retrieve from there. +func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { + // Create the request and service it + req := &snap.GetStorageRangesPacket{ + ID: id, + Accounts: accounts, + Root: root, + Origin: origin, + Limit: limit, + Bytes: bytes, + } + storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req) -// RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed -// origin; associated with a particular peer in the download tester. The returned -// function can be used to retrieve batches of headers from the particular peer. -func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { - result := dlp.chain.headersByHash(origin, amount, skip, reverse) - go dlp.dl.downloader.DeliverHeaders(dlp.id, result) - return nil -} + // We need to convert to demultiplex, delegate to the packet code + res := &snap.StorageRangesPacket{ + ID: id, + Slots: storage, + Proof: proofs, + } + hashes, slots := res.Unpack() -// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered -// origin; associated with a particular peer in the download tester. The returned -// function can be used to retrieve batches of headers from the particular peer. -func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { - result := dlp.chain.headersByNumber(origin, amount, skip, reverse) - go dlp.dl.downloader.DeliverHeaders(dlp.id, result) + go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs) return nil } -// RequestBodies constructs a getBlockBodies method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of block bodies from the particularly requested peer. -func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error { - txs, uncles := dlp.chain.bodies(hashes) - go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles) +// RequestByteCodes fetches a batch of bytecodes by hash. +func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { + req := &snap.GetByteCodesPacket{ + ID: id, + Hashes: hashes, + Bytes: bytes, + } + codes := snap.ServiceGetByteCodesQuery(dlp.chain, req) + go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes) return nil } -// RequestReceipts constructs a getReceipts method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of block receipts from the particularly requested peer. -func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error { - receipts := dlp.chain.receipts(hashes) - go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts) +// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in +// a specificstate trie. +func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error { + req := &snap.GetTrieNodesPacket{ + ID: id, + Root: root, + Paths: paths, + Bytes: bytes, + } + nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now()) + go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes) return nil } -// RequestNodeData constructs a getNodeData method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of node state data from the particularly requested peer. -func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { - dlp.dl.lock.RLock() - defer dlp.dl.lock.RUnlock() - - results := make([][]byte, 0, len(hashes)) - for _, hash := range hashes { - if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil { - if !dlp.missingStates[hash] { - results = append(results, data) - } - } - } - go dlp.dl.downloader.DeliverNodeData(dlp.id, results) - return nil +// Log retrieves the peer's own contextual logger. +func (dlp *downloadTesterPeer) Log() log.Logger { + return log.New("peer", dlp.id) } // assertOwnChain checks if the local chain contains the correct number of items @@ -490,71 +408,52 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) { // Mark this method as a helper to report errors at callsite, not in here t.Helper() - assertOwnForkedChain(t, tester, 1, []int{length}) -} - -// assertOwnForkedChain checks if the local forked chain contains the correct -// number of items of the various chain components. -func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { - // Mark this method as a helper to report errors at callsite, not in here - t.Helper() - - // Initialize the counters for the first fork - headers, blocks, receipts := lengths[0], lengths[0], lengths[0] - - // Update the counters for each subsequent fork - for _, length := range lengths[1:] { - headers += length - common - blocks += length - common - receipts += length - common - } + headers, blocks, receipts := length, length, length if tester.downloader.getMode() == LightSync { blocks, receipts = 1, 1 } - if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers { + if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers { t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) } - if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks { + if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != blocks { t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) } - if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts { + if rs := int(tester.chain.CurrentFastBlock().NumberU64()) + 1; rs != receipts { t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) } } func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } -func TestCanonicalSynchronisation66Fast(t *testing.T) { testCanonSync(t, eth.ETH66, FastSync) } +func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) } func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() // Create a small enough block chain to download chain := testChainBase.shorten(blockCacheMaxItems - 15) - tester.newPeer("peer", protocol, chain) + tester.newPeer("peer", protocol, chain.blocks[1:]) // Synchronise with the peer and make sure all relevant data was retrieved if err := tester.sync("peer", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) } // Tests that if a large batch of blocks are being downloaded, it is throttled // until the cached blocks are retrieved. func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } -func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) } +func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) } func testThrottling(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() tester := newTester() + defer tester.terminate() // Create a long block chain to download and the tester - targetBlocks := testChainBase.len() - 1 - tester.newPeer("peer", protocol, testChainBase) + targetBlocks := len(testChainBase.blocks) - 1 + tester.newPeer("peer", protocol, testChainBase.blocks[1:]) // Wrap the importer to allow stepping blocked, proceed := uint32(0), make(chan struct{}) @@ -571,7 +470,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { for { // Check the retrieval count synchronously (! reason for this ugly block) tester.lock.RLock() - retrieved := len(tester.ownBlocks) + retrieved := int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 tester.lock.RUnlock() if retrieved >= targetBlocks+1 { break @@ -587,7 +486,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { { cached = tester.downloader.queue.resultCache.countCompleted() frozen = int(atomic.LoadUint32(&blocked)) - retrieved = len(tester.ownBlocks) + retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 } tester.downloader.queue.resultCache.lock.Unlock() tester.downloader.queue.lock.Unlock() @@ -603,12 +502,11 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { // Make sure we filled up the cache, then exhaust it time.Sleep(25 * time.Millisecond) // give it a chance to screw up tester.lock.RLock() - retrieved = len(tester.ownBlocks) + retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 tester.lock.RUnlock() if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1) } - // Permit the blocked blocks to import if atomic.LoadUint32(&blocked) > 0 { atomic.StoreUint32(&blocked, uint32(0)) @@ -620,93 +518,85 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { if err := <-errc; err != nil { t.Fatalf("block synchronization failed: %v", err) } - tester.terminate() - } // Tests that simple synchronization against a forked chain works correctly. In // this test common ancestor lookup should *not* be short circuited, and a full // binary search should be executed. func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } -func TestForkedSync66Fast(t *testing.T) { testForkedSync(t, eth.ETH66, FastSync) } +func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) } func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() - chainA := testChainForkLightA.shorten(testChainBase.len() + 80) - chainB := testChainForkLightB.shorten(testChainBase.len() + 80) - tester.newPeer("fork A", protocol, chainA) - tester.newPeer("fork B", protocol, chainB) + chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) + chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81) + tester.newPeer("fork A", protocol, chainA.blocks[1:]) + tester.newPeer("fork B", protocol, chainB.blocks[1:]) // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("fork A", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chainA.len()) + assertOwnChain(t, tester, len(chainA.blocks)) // Synchronise with the second peer and make sure that fork is pulled too if err := tester.sync("fork B", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) + assertOwnChain(t, tester, len(chainB.blocks)) } // Tests that synchronising against a much shorter but much heavyer fork works // corrently and is not dropped. func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } -func TestHeavyForkedSync66Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FastSync) } +func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() - chainA := testChainForkLightA.shorten(testChainBase.len() + 80) - chainB := testChainForkHeavy.shorten(testChainBase.len() + 80) - tester.newPeer("light", protocol, chainA) - tester.newPeer("heavy", protocol, chainB) + chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) + chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79) + tester.newPeer("light", protocol, chainA.blocks[1:]) + tester.newPeer("heavy", protocol, chainB.blocks[1:]) // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("light", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chainA.len()) + assertOwnChain(t, tester, len(chainA.blocks)) // Synchronise with the second peer and make sure that fork is pulled too if err := tester.sync("heavy", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) + assertOwnChain(t, tester, len(chainB.blocks)) } // Tests that chain forks are contained within a certain interval of the current // chain head, ensuring that malicious peers cannot waste resources by feeding // long dead chains. func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } -func TestBoundedForkedSync66Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FastSync) } +func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) } func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() chainA := testChainForkLightA chainB := testChainForkLightB - tester.newPeer("original", protocol, chainA) - tester.newPeer("rewriter", protocol, chainB) + tester.newPeer("original", protocol, chainA.blocks[1:]) + tester.newPeer("rewriter", protocol, chainB.blocks[1:]) // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("original", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chainA.len()) + assertOwnChain(t, tester, len(chainA.blocks)) // Synchronise with the second peer and ensure that the fork is rejected to being too old if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { @@ -720,69 +610,46 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { func TestBoundedHeavyForkedSync66Full(t *testing.T) { testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) } -func TestBoundedHeavyForkedSync66Fast(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, FastSync) +func TestBoundedHeavyForkedSync66Snap(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) } func TestBoundedHeavyForkedSync66Light(t *testing.T) { testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) } func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() tester := newTester() + defer tester.terminate() // Create a long enough forked chain chainA := testChainForkLightA chainB := testChainForkHeavy - tester.newPeer("original", protocol, chainA) + tester.newPeer("original", protocol, chainA.blocks[1:]) // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("original", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chainA.len()) + assertOwnChain(t, tester, len(chainA.blocks)) - tester.newPeer("heavy-rewriter", protocol, chainB) + tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:]) // Synchronise with the second peer and ensure that the fork is rejected to being too old if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) } - tester.terminate() -} - -// Tests that an inactive downloader will not accept incoming block headers, -// bodies and receipts. -func TestInactiveDownloader63(t *testing.T) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - // Check that neither block headers nor bodies are accepted - if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } - if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } - if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } } // Tests that a canceled download wipes all previously accumulated state. func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } -func TestCancel66Fast(t *testing.T) { testCancel(t, eth.ETH66, FastSync) } +func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) } func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } func testCancel(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() chain := testChainBase.shorten(MaxHeaderFetch) - tester.newPeer("peer", protocol, chain) + tester.newPeer("peer", protocol, chain.blocks[1:]) // Make sure canceling works with a pristine downloader tester.downloader.Cancel() @@ -801,12 +668,10 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } -func TestMultiSynchronisation66Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FastSync) } +func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) } func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() @@ -816,23 +681,21 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { for i := 0; i < targetPeers; i++ { id := fmt.Sprintf("peer #%d", i) - tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1))) + tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:]) } if err := tester.sync("peer #0", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) } // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } -func TestMultiProtoSynchronisation66Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FastSync) } +func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) } func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() @@ -840,14 +703,14 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { chain := testChainBase.shorten(blockCacheMaxItems - 15) // Create peers of every type - tester.newPeer("peer 66", eth.ETH66, chain) - //tester.newPeer("peer 65", eth.ETH67, chain) + tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) + //tester.newPeer("peer 65", eth.ETH67, chain.blocks[1:) // Synchronise with the requested peer and make sure all blocks were retrieved if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) // Check that no peers have been dropped off for _, version := range []int{66} { @@ -861,18 +724,16 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a block is empty (e.g. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } -func TestEmptyShortCircuit66Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FastSync) } +func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) } func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() // Create a block chain to download chain := testChainBase - tester.newPeer("peer", protocol, chain) + tester.newPeer("peer", protocol, chain.blocks[1:]) // Instrument the downloader to signal body requests bodiesHave, receiptsHave := int32(0), int32(0) @@ -886,17 +747,17 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { if err := tester.sync("peer", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) // Validate the number of block bodies that should have been requested bodiesNeeded, receiptsNeeded := 0, 0 - for _, block := range chain.blockm { - if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { + for _, block := range chain.blocks[1:] { + if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { bodiesNeeded++ } } - for _, receipt := range chain.receiptm { - if mode == FastSync && len(receipt) > 0 { + for _, block := range chain.blocks[1:] { + if mode == SnapSync && len(block.Transactions()) > 0 { receiptsNeeded++ } } @@ -911,72 +772,64 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { // Tests that headers are enqueued continuously, preventing malicious nodes from // stalling the downloader by feeding gapped header chains. func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } -func TestMissingHeaderAttack66Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FastSync) } +func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) } func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() chain := testChainBase.shorten(blockCacheMaxItems - 15) - brokenChain := chain.shorten(chain.len()) - delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2]) - tester.newPeer("attack", protocol, brokenChain) + + attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) + attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{} if err := tester.sync("attack", nil, mode); err == nil { t.Fatalf("succeeded attacker synchronisation") } // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain) + tester.newPeer("valid", protocol, chain.blocks[1:]) if err := tester.sync("valid", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) } // Tests that if requested headers are shifted (i.e. first is missing), the queue // detects the invalid numbering. func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } -func TestShiftedHeaderAttack66Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FastSync) } +func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) } func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() chain := testChainBase.shorten(blockCacheMaxItems - 15) // Attempt a full sync with an attacker feeding shifted headers - brokenChain := chain.shorten(chain.len()) - delete(brokenChain.headerm, brokenChain.chain[1]) - delete(brokenChain.blockm, brokenChain.chain[1]) - delete(brokenChain.receiptm, brokenChain.chain[1]) - tester.newPeer("attack", protocol, brokenChain) + attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) + attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{} + if err := tester.sync("attack", nil, mode); err == nil { t.Fatalf("succeeded attacker synchronisation") } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain) + tester.newPeer("valid", protocol, chain.blocks[1:]) if err := tester.sync("valid", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) } // Tests that upon detecting an invalid header, the recent ones are rolled back // for various failure scenarios. Afterwards a full sync is attempted to make // sure no state was corrupted. -func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) } +func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, SnapSync) } func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() + defer tester.terminate() // Create a small enough block chain to download targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks @@ -985,78 +838,67 @@ func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { // Attempt to sync with an attacker that feeds junk during the fast sync phase. // This should result in the last fsHeaderSafetyNet headers being rolled back. missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 - fastAttackChain := chain.shorten(chain.len()) - delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) - tester.newPeer("fast-attack", protocol, fastAttackChain) + + fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:]) + fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} if err := tester.sync("fast-attack", nil, mode); err == nil { t.Fatalf("succeeded fast attacker synchronisation") } - if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { + if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) } - // Attempt to sync with an attacker that feeds junk during the block import phase. // This should result in both the last fsHeaderSafetyNet number of headers being // rolled back, and also the pivot point being reverted to a non-block status. missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 - blockAttackChain := chain.shorten(chain.len()) - delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in - delete(blockAttackChain.headerm, blockAttackChain.chain[missing]) - tester.newPeer("block-attack", protocol, blockAttackChain) + + blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:]) + fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in + blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} if err := tester.sync("block-attack", nil, mode); err == nil { t.Fatalf("succeeded block attacker synchronisation") } - if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { + if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) } - if mode == FastSync { - if head := tester.CurrentBlock().NumberU64(); head != 0 { + if mode == SnapSync { + if head := tester.chain.CurrentBlock().NumberU64(); head != 0 { t.Errorf("fast sync pivot block #%d not rolled back", head) } } - // Attempt to sync with an attacker that withholds promised blocks after the // fast sync pivot point. This could be a trial to leave the node with a bad // but already imported pivot block. - withholdAttackChain := chain.shorten(chain.len()) - tester.newPeer("withhold-attack", protocol, withholdAttackChain) + withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:]) + tester.downloader.syncInitHook = func(uint64, uint64) { - for i := missing; i < withholdAttackChain.len(); i++ { - delete(withholdAttackChain.headerm, withholdAttackChain.chain[i]) + for i := missing; i < len(chain.blocks); i++ { + withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} } tester.downloader.syncInitHook = nil } if err := tester.sync("withhold-attack", nil, mode); err == nil { t.Fatalf("succeeded withholding attacker synchronisation") } - if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { + if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) } - if mode == FastSync { - if head := tester.CurrentBlock().NumberU64(); head != 0 { + if mode == SnapSync { + if head := tester.chain.CurrentBlock().NumberU64(); head != 0 { t.Errorf("fast sync pivot block #%d not rolled back", head) } } - - // synchronise with the valid peer and make sure sync succeeds. Since the last rollback + // Synchronise with the valid peer and make sure sync succeeds. Since the last rollback // should also disable fast syncing for this process, verify that we did a fresh full // sync. Note, we can't assert anything about the receipts since we won't purge the // database of them, hence we can't use assertOwnChain. - tester.newPeer("valid", protocol, chain) + tester.newPeer("valid", protocol, chain.blocks[1:]) if err := tester.sync("valid", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if hs := len(tester.ownHeaders); hs != chain.len() { - t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len()) - } - if mode != LightSync { - if bs := len(tester.ownBlocks); bs != chain.len() { - t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len()) - } - } - tester.terminate() + assertOwnChain(t, tester, len(chain.blocks)) } // Tests that a peer advertising a high TD doesn't get to stall the downloader @@ -1064,32 +906,28 @@ func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { func TestHighTDStarvationAttack66Full(t *testing.T) { testHighTDStarvationAttack(t, eth.ETH66, FullSync) } -func TestHighTDStarvationAttack66Fast(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, FastSync) +func TestHighTDStarvationAttack66Snap(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH66, SnapSync) } func TestHighTDStarvationAttack66Light(t *testing.T) { testHighTDStarvationAttack(t, eth.ETH66, LightSync) } func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() + defer tester.terminate() chain := testChainBase.shorten(1) - tester.newPeer("attack", protocol, chain) + tester.newPeer("attack", protocol, chain.blocks[1:]) if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) } - tester.terminate() } // Tests that misbehaving peers are disconnected, whilst behaving ones are not. func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { - t.Parallel() - // Define the disconnection requirement for individual hash fetch errors tests := []struct { result error @@ -1119,16 +957,14 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { for i, tt := range tests { // Register a new peer and ensure its presence id := fmt.Sprintf("test %d", i) - if err := tester.newPeer(id, protocol, chain); err != nil { - t.Fatalf("test %d: failed to register new peer: %v", i, err) - } + tester.newPeer(id, protocol, chain.blocks[1:]) if _, ok := tester.peers[id]; !ok { t.Fatalf("test %d: registered peer not found", i) } // Simulate a synchronisation and check the required result tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } - tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync) + tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync) if _, ok := tester.peers[id]; !ok != tt.drop { t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) } @@ -1138,14 +974,13 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { // Tests that synchronisation progress (origin block number, current block number // and highest block number) is tracked and updated correctly. func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } -func TestSyncProgress66Fast(t *testing.T) { testSyncProgress(t, eth.ETH66, FastSync) } +func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) } func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() + chain := testChainBase.shorten(blockCacheMaxItems - 15) // Set a sync init hook to catch progress changes @@ -1159,7 +994,7 @@ func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) // Synchronise half the blocks and check initial progress - tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2)) + tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:]) pending := new(sync.WaitGroup) pending.Add(1) @@ -1171,13 +1006,13 @@ func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { }() <-starting checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(chain.len()/2 - 1), + HighestBlock: uint64(len(chain.blocks)/2 - 1), }) progress <- struct{}{} pending.Wait() // Synchronise all the blocks and check continuation progress - tester.newPeer("peer-full", protocol, chain) + tester.newPeer("peer-full", protocol, chain.blocks[1:]) pending.Add(1) go func() { defer pending.Done() @@ -1187,18 +1022,18 @@ func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { }() <-starting checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - StartingBlock: uint64(chain.len()/2 - 1), - CurrentBlock: uint64(chain.len()/2 - 1), - HighestBlock: uint64(chain.len() - 1), + StartingBlock: uint64(len(chain.blocks)/2 - 1), + CurrentBlock: uint64(len(chain.blocks)/2 - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) // Check final progress after successful sync progress <- struct{}{} pending.Wait() checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(chain.len()/2 - 1), - CurrentBlock: uint64(chain.len() - 1), - HighestBlock: uint64(chain.len() - 1), + StartingBlock: uint64(len(chain.blocks)/2 - 1), + CurrentBlock: uint64(len(chain.blocks) - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) } @@ -1207,9 +1042,7 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync t.Helper() p := d.Progress() - p.KnownStates, p.PulledStates = 0, 0 - want.KnownStates, want.PulledStates = 0, 0 - if p != want { + if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock { t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) } } @@ -1218,16 +1051,15 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync // number) is tracked and updated correctly in case of a fork (or manual head // revertal). func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } -func TestForkedSyncProgress66Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FastSync) } +func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) } func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() - chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch) - chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch) + + chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch) + chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch) // Set a sync init hook to catch progress changes starting := make(chan struct{}) @@ -1240,7 +1072,7 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) // Synchronise with one of the forks and check progress - tester.newPeer("fork A", protocol, chainA) + tester.newPeer("fork A", protocol, chainA.blocks[1:]) pending := new(sync.WaitGroup) pending.Add(1) go func() { @@ -1252,7 +1084,7 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { <-starting checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(chainA.len() - 1), + HighestBlock: uint64(len(chainA.blocks) - 1), }) progress <- struct{}{} pending.Wait() @@ -1261,7 +1093,7 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight // Synchronise with the second fork and check progress resets - tester.newPeer("fork B", protocol, chainB) + tester.newPeer("fork B", protocol, chainB.blocks[1:]) pending.Add(1) go func() { defer pending.Done() @@ -1271,18 +1103,18 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { }() <-starting checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ - StartingBlock: uint64(testChainBase.len()) - 1, - CurrentBlock: uint64(chainA.len() - 1), - HighestBlock: uint64(chainB.len() - 1), + StartingBlock: uint64(len(testChainBase.blocks)) - 1, + CurrentBlock: uint64(len(chainA.blocks) - 1), + HighestBlock: uint64(len(chainB.blocks) - 1), }) // Check final progress after successful sync progress <- struct{}{} pending.Wait() checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(testChainBase.len()) - 1, - CurrentBlock: uint64(chainB.len() - 1), - HighestBlock: uint64(chainB.len() - 1), + StartingBlock: uint64(len(testChainBase.blocks)) - 1, + CurrentBlock: uint64(len(chainB.blocks) - 1), + HighestBlock: uint64(len(chainB.blocks) - 1), }) } @@ -1290,14 +1122,13 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // origin is not updated in the next sync cycle, as it should be considered the // continuation of the previous sync and not a new instance. func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } -func TestFailedSyncProgress66Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FastSync) } +func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) } func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() + chain := testChainBase.shorten(blockCacheMaxItems - 15) // Set a sync init hook to catch progress changes @@ -1311,12 +1142,10 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) // Attempt a full sync with a faulty peer - brokenChain := chain.shorten(chain.len()) - missing := brokenChain.len() / 2 - delete(brokenChain.headerm, brokenChain.chain[missing]) - delete(brokenChain.blockm, brokenChain.chain[missing]) - delete(brokenChain.receiptm, brokenChain.chain[missing]) - tester.newPeer("faulty", protocol, brokenChain) + missing := len(chain.blocks)/2 - 1 + + faulter := tester.newPeer("faulty", protocol, chain.blocks[1:]) + faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} pending := new(sync.WaitGroup) pending.Add(1) @@ -1328,7 +1157,7 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { }() <-starting checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(brokenChain.len() - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) progress <- struct{}{} pending.Wait() @@ -1336,7 +1165,7 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Synchronise with a good peer and check that the progress origin remind the same // after a failure - tester.newPeer("valid", protocol, chain) + tester.newPeer("valid", protocol, chain.blocks[1:]) pending.Add(1) go func() { defer pending.Done() @@ -1351,22 +1180,21 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { progress <- struct{}{} pending.Wait() checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(chain.len() - 1), - HighestBlock: uint64(chain.len() - 1), + CurrentBlock: uint64(len(chain.blocks) - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) } // Tests that if an attacker fakes a chain height, after the attack is detected, // the progress height is successfully reduced at the next sync invocation. func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } -func TestFakedSyncProgress66Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FastSync) } +func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) } func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() + chain := testChainBase.shorten(blockCacheMaxItems - 15) // Set a sync init hook to catch progress changes @@ -1379,13 +1207,11 @@ func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) // Create and sync with an attacker that promises a higher chain than available. - brokenChain := chain.shorten(chain.len()) + attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) numMissing := 5 - for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- { - delete(brokenChain.headerm, brokenChain.chain[i]) + for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- { + attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} } - tester.newPeer("attack", protocol, brokenChain) - pending := new(sync.WaitGroup) pending.Add(1) go func() { @@ -1396,7 +1222,7 @@ func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { }() <-starting checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(brokenChain.len() - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) progress <- struct{}{} pending.Wait() @@ -1404,8 +1230,8 @@ func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Synchronise with a good peer and check that the progress height has been reduced to // the true value. - validChain := chain.shorten(chain.len() - numMissing) - tester.newPeer("valid", protocol, validChain) + validChain := chain.shorten(len(chain.blocks) - numMissing) + tester.newPeer("valid", protocol, validChain.blocks[1:]) pending.Add(1) go func() { @@ -1417,100 +1243,17 @@ func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { <-starting checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ CurrentBlock: afterFailedSync.CurrentBlock, - HighestBlock: uint64(validChain.len() - 1), + HighestBlock: uint64(len(validChain.blocks) - 1), }) - // Check final progress after successful sync. progress <- struct{}{} pending.Wait() checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(validChain.len() - 1), - HighestBlock: uint64(validChain.len() - 1), + CurrentBlock: uint64(len(validChain.blocks) - 1), + HighestBlock: uint64(len(validChain.blocks) - 1), }) } -// This test reproduces an issue where unexpected deliveries would -// block indefinitely if they arrived at the right time. -func TestDeliverHeadersHang66Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FullSync) } -func TestDeliverHeadersHang66Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FastSync) } -func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) } - -func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - master := newTester() - defer master.terminate() - chain := testChainBase.shorten(15) - - for i := 0; i < 200; i++ { - tester := newTester() - tester.peerDb = master.peerDb - tester.newPeer("peer", protocol, chain) - - // Whenever the downloader requests headers, flood it with - // a lot of unrequested header deliveries. - tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{ - peer: tester.downloader.peers.peers["peer"].peer, - tester: tester, - } - if err := tester.sync("peer", nil, mode); err != nil { - t.Errorf("test %d: sync failed: %v", i, err) - } - tester.terminate() - } -} - -type floodingTestPeer struct { - peer Peer - tester *downloadTester -} - -func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() } -func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error { - return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse) -} -func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error { - return ftp.peer.RequestBodies(hashes) -} -func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error { - return ftp.peer.RequestReceipts(hashes) -} -func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error { - return ftp.peer.RequestNodeData(hashes) -} - -func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error { - deliveriesDone := make(chan struct{}, 500) - for i := 0; i < cap(deliveriesDone)-1; i++ { - peer := fmt.Sprintf("fake-peer%d", i) - go func() { - ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}}) - deliveriesDone <- struct{}{} - }() - } - - // None of the extra deliveries should block. - timeout := time.After(60 * time.Second) - launched := false - for i := 0; i < cap(deliveriesDone); i++ { - select { - case <-deliveriesDone: - if !launched { - // Start delivering the requested headers - // after one of the flooding responses has arrived. - go func() { - ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) - deliveriesDone <- struct{}{} - }() - launched = true - } - case <-timeout: - panic("blocked") - } - } - return nil -} - func TestRemoteHeaderRequestSpan(t *testing.T) { testCases := []struct { remoteHeight uint64 @@ -1589,14 +1332,12 @@ func TestRemoteHeaderRequestSpan(t *testing.T) { // Tests that peers below a pre-configured checkpoint block are prevented from // being fast-synced from, avoiding potential cheap eclipse attacks. func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) } -func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) } +func TestCheckpointEnforcement66Snap(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, SnapSync) } func TestCheckpointEnforcement66Light(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, LightSync) } func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - // Create a new tester with a particular hard coded checkpoint block tester := newTester() defer tester.terminate() @@ -1605,18 +1346,66 @@ func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) { chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1) // Attempt to sync with the peer and validate the result - tester.newPeer("peer", protocol, chain) + tester.newPeer("peer", protocol, chain.blocks[1:]) var expect error - if mode == FastSync || mode == LightSync { + if mode == SnapSync || mode == LightSync { expect = errUnsyncedPeer } if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) { t.Fatalf("block sync error mismatch: have %v, want %v", err, expect) } - if mode == FastSync || mode == LightSync { + if mode == SnapSync || mode == LightSync { assertOwnChain(t, tester, 1) } else { - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) + } +} + +// Tests that peers below a pre-configured checkpoint block are prevented from +// being fast-synced from, avoiding potential cheap eclipse attacks. +func TestBeaconSync66Full(t *testing.T) { testBeaconSync(t, eth.ETH66, FullSync) } +func TestBeaconSync66Snap(t *testing.T) { testBeaconSync(t, eth.ETH66, SnapSync) } + +func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { + //log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + + var cases = []struct { + name string // The name of testing scenario + local int // The length of local chain(canonical chain assumed), 0 means genesis is the head + }{ + {name: "Beacon sync since genesis", local: 0}, + {name: "Beacon sync with short local chain", local: 1}, + {name: "Beacon sync with long local chain", local: blockCacheMaxItems - 15 - fsMinFullBlocks/2}, + {name: "Beacon sync with full local chain", local: blockCacheMaxItems - 15 - 1}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + success := make(chan struct{}) + tester := newTesterWithNotification(func() { + close(success) + }) + defer tester.terminate() + + chain := testChainBase.shorten(blockCacheMaxItems - 15) + tester.newPeer("peer", protocol, chain.blocks[1:]) + + // Build the local chain segment if it's required + if c.local > 0 { + tester.chain.InsertChain(chain.blocks[1 : c.local+1]) + } + if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header()); err != nil { + t.Fatalf("Failed to beacon sync chain %v %v", c.name, err) + } + select { + case <-success: + // Ok, downloader fully cancelled after sync cycle + if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != len(chain.blocks) { + t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks)) + } + case <-time.NewTimer(time.Second * 3).C: + t.Fatalf("Failed to sync chain in three seconds") + } + }) } } diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go new file mode 100644 index 000000000..a7022240c --- /dev/null +++ b/eth/downloader/fetchers.go @@ -0,0 +1,115 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "time" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/eth/protocols/eth" +) + +// fetchHeadersByHash is a blocking version of Peer.RequestHeadersByHash which +// handles all the cancellation, interruption and timeout mechanisms of a data +// retrieval to allow blocking API calls. +func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) { + // Create the response sink and send the network request + start := time.Now() + resCh := make(chan *eth.Response) + + req, err := p.peer.RequestHeadersByHash(hash, amount, skip, reverse, resCh) + if err != nil { + return nil, nil, err + } + defer req.Close() + + // Wait until the response arrives, the request is cancelled or times out + ttl := d.peers.rates.TargetTimeout() + + timeoutTimer := time.NewTimer(ttl) + defer timeoutTimer.Stop() + + select { + case <-d.cancelCh: + return nil, nil, errCanceled + + case <-timeoutTimer.C: + // Header retrieval timed out, update the metrics + p.log.Debug("Header request timed out", "elapsed", ttl) + headerTimeoutMeter.Mark(1) + + return nil, nil, errTimeout + + case res := <-resCh: + // Headers successfully retrieved, update the metrics + headerReqTimer.Update(time.Since(start)) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) + + // Don't reject the packet even if it turns out to be bad, downloader will + // disconnect the peer on its own terms. Simply delivery the headers to + // be processed by the caller + res.Done <- nil + + return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil + } +} + +// fetchHeadersByNumber is a blocking version of Peer.RequestHeadersByNumber which +// handles all the cancellation, interruption and timeout mechanisms of a data +// retrieval to allow blocking API calls. +func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) { + // Create the response sink and send the network request + start := time.Now() + resCh := make(chan *eth.Response) + + req, err := p.peer.RequestHeadersByNumber(number, amount, skip, reverse, resCh) + if err != nil { + return nil, nil, err + } + defer req.Close() + + // Wait until the response arrives, the request is cancelled or times out + ttl := d.peers.rates.TargetTimeout() + + timeoutTimer := time.NewTimer(ttl) + defer timeoutTimer.Stop() + + select { + case <-d.cancelCh: + return nil, nil, errCanceled + + case <-timeoutTimer.C: + // Header retrieval timed out, update the metrics + p.log.Debug("Header request timed out", "elapsed", ttl) + headerTimeoutMeter.Mark(1) + + return nil, nil, errTimeout + + case res := <-resCh: + // Headers successfully retrieved, update the metrics + headerReqTimer.Update(time.Since(start)) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) + + // Don't reject the packet even if it turns out to be bad, downloader will + // disconnect the peer on its own terms. Simply delivery the headers to + // be processed by the caller + res.Done <- nil + + return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil + } +} diff --git a/eth/downloader/fetchers_concurrent.go b/eth/downloader/fetchers_concurrent.go new file mode 100644 index 000000000..7a32b97c9 --- /dev/null +++ b/eth/downloader/fetchers_concurrent.go @@ -0,0 +1,381 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "errors" + "sort" + "time" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/common/prque" + "github.com/scroll-tech/go-ethereum/eth/protocols/eth" + "github.com/scroll-tech/go-ethereum/log" +) + +// timeoutGracePeriod is the amount of time to allow for a peer to deliver a +// response to a locally already timed out request. Timeouts are not penalized +// as a peer might be temporarily overloaded, however, they still must reply +// to each request. Failing to do so is considered a protocol violation. +var timeoutGracePeriod = 2 * time.Minute + +// typedQueue is an interface defining the adaptor needed to translate the type +// specific downloader/queue schedulers into the type-agnostic general concurrent +// fetcher algorithm calls. +type typedQueue interface { + // waker returns a notification channel that gets pinged in case more fetches + // have been queued up, so the fetcher might assign it to idle peers. + waker() chan bool + + // pending returns the number of wrapped items that are currently queued for + // fetching by the concurrent downloader. + pending() int + + // capacity is responsible for calculating how many items of the abstracted + // type a particular peer is estimated to be able to retrieve within the + // alloted round trip time. + capacity(peer *peerConnection, rtt time.Duration) int + + // updateCapacity is responsible for updating how many items of the abstracted + // type a particular peer is estimated to be able to retrieve in a unit time. + updateCapacity(peer *peerConnection, items int, elapsed time.Duration) + + // reserve is responsible for allocating a requested number of pending items + // from the download queue to the specified peer. + reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) + + // unreserve is resposible for removing the current retrieval allocation + // assigned to a specific peer and placing it back into the pool to allow + // reassigning to some other peer. + unreserve(peer string) int + + // request is responsible for converting a generic fetch request into a typed + // one and sending it to the remote peer for fulfillment. + request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) + + // deliver is responsible for taking a generic response packet from the + // concurrent fetcher, unpacking the type specific data and delivering + // it to the downloader's queue. + deliver(peer *peerConnection, packet *eth.Response) (int, error) +} + +// concurrentFetch iteratively downloads scheduled block parts, taking available +// peers, reserving a chunk of fetch requests for each and waiting for delivery +// or timeouts. +func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { + // Create a delivery channel to accept responses from all peers + responses := make(chan *eth.Response) + + // Track the currently active requests and their timeout order + pending := make(map[string]*eth.Request) + defer func() { + // Abort all requests on sync cycle cancellation. The requests may still + // be fulfilled by the remote side, but the dispatcher will not wait to + // deliver them since nobody's going to be listening. + for _, req := range pending { + req.Close() + } + }() + ordering := make(map[*eth.Request]int) + timeouts := prque.New(func(data interface{}, index int) { + ordering[data.(*eth.Request)] = index + }) + + timeout := time.NewTimer(0) + if !timeout.Stop() { + <-timeout.C + } + defer timeout.Stop() + + // Track the timed-out but not-yet-answered requests separately. We want to + // keep tracking which peers are busy (potentially overloaded), so removing + // all trace of a timed out request is not good. We also can't just cancel + // the pending request altogether as that would prevent a late response from + // being delivered, thus never unblocking the peer. + stales := make(map[string]*eth.Request) + defer func() { + // Abort all requests on sync cycle cancellation. The requests may still + // be fulfilled by the remote side, but the dispatcher will not wait to + // deliver them since nobody's going to be listening. + for _, req := range stales { + req.Close() + } + }() + // Subscribe to peer lifecycle events to schedule tasks to new joiners and + // reschedule tasks upon disconnections. We don't care which event happened + // for simplicity, so just use a single channel. + peering := make(chan *peeringEvent, 64) // arbitrary buffer, just some burst protection + + peeringSub := d.peers.SubscribeEvents(peering) + defer peeringSub.Unsubscribe() + + // Prepare the queue and fetch block parts until the block header fetcher's done + finished := false + for { + // Short circuit if we lost all our peers + if d.peers.Len() == 0 && !beaconMode { + return errNoPeers + } + // If there's nothing more to fetch, wait or terminate + if queue.pending() == 0 { + if len(pending) == 0 && finished { + return nil + } + } else { + // Send a download request to all idle peers, until throttled + var ( + idles []*peerConnection + caps []int + ) + for _, peer := range d.peers.AllPeers() { + pending, stale := pending[peer.id], stales[peer.id] + if pending == nil && stale == nil { + idles = append(idles, peer) + caps = append(caps, queue.capacity(peer, time.Second)) + } else if stale != nil { + if waited := time.Since(stale.Sent); waited > timeoutGracePeriod { + // Request has been in flight longer than the grace period + // permitted it, consider the peer malicious attempting to + // stall the sync. + peer.log.Warn("Peer stalling, dropping", "waited", common.PrettyDuration(waited)) + d.dropPeer(peer.id) + } + } + } + sort.Sort(&peerCapacitySort{idles, caps}) + + var ( + progressed bool + throttled bool + queued = queue.pending() + ) + for _, peer := range idles { + // Short circuit if throttling activated or there are no more + // queued tasks to be retrieved + if throttled { + break + } + if queued = queue.pending(); queued == 0 { + break + } + // Reserve a chunk of fetches for a peer. A nil can mean either that + // no more headers are available, or that the peer is known not to + // have them. + request, progress, throttle := queue.reserve(peer, queue.capacity(peer, d.peers.rates.TargetRoundTrip())) + if progress { + progressed = true + } + if throttle { + throttled = true + throttleCounter.Inc(1) + } + if request == nil { + continue + } + // Fetch the chunk and make sure any errors return the hashes to the queue + req, err := queue.request(peer, request, responses) + if err != nil { + // Sending the request failed, which generally means the peer + // was diconnected in between assignment and network send. + // Although all peer removal operations return allocated tasks + // to the queue, that is async, and we can do better here by + // immediately pushing the unfulfilled requests. + queue.unreserve(peer.id) // TODO(karalabe): This needs a non-expiration method + continue + } + pending[peer.id] = req + + ttl := d.peers.rates.TargetTimeout() + ordering[req] = timeouts.Size() + + timeouts.Push(req, -time.Now().Add(ttl).UnixNano()) + if timeouts.Size() == 1 { + timeout.Reset(ttl) + } + } + // Make sure that we have peers available for fetching. If all peers have been tried + // and all failed throw an error + if !progressed && !throttled && len(pending) == 0 && len(idles) == d.peers.Len() && queued > 0 && !beaconMode { + return errPeersUnavailable + } + } + // Wait for something to happen + select { + case <-d.cancelCh: + // If sync was cancelled, tear down the parallel retriever. Pending + // requests will be cancelled locally, and the remote responses will + // be dropped when they arrive + return errCanceled + + case event := <-peering: + // A peer joined or left, the tasks queue and allocations need to be + // checked for potential assignment or reassignment + peerid := event.peer.id + + if event.join { + // Sanity check the internal state; this can be dropped later + if _, ok := pending[peerid]; ok { + event.peer.log.Error("Pending request exists for joining peer") + } + if _, ok := stales[peerid]; ok { + event.peer.log.Error("Stale request exists for joining peer") + } + // Loop back to the entry point for task assignment + continue + } + // A peer left, any existing requests need to be untracked, pending + // tasks returned and possible reassignment checked + if req, ok := pending[peerid]; ok { + queue.unreserve(peerid) // TODO(karalabe): This needs a non-expiration method + delete(pending, peerid) + req.Close() + + if index, live := ordering[req]; live { + timeouts.Remove(index) + if index == 0 { + if !timeout.Stop() { + <-timeout.C + } + if timeouts.Size() > 0 { + _, exp := timeouts.Peek() + timeout.Reset(time.Until(time.Unix(0, -exp))) + } + } + delete(ordering, req) + } + } + if req, ok := stales[peerid]; ok { + delete(stales, peerid) + req.Close() + } + + case <-timeout.C: + // Retrieve the next request which should have timed out. The check + // below is purely for to catch programming errors, given the correct + // code, there's no possible order of events that should result in a + // timeout firing for a non-existent event. + item, exp := timeouts.Peek() + if now, at := time.Now(), time.Unix(0, -exp); now.Before(at) { + log.Error("Timeout triggered but not reached", "left", at.Sub(now)) + timeout.Reset(at.Sub(now)) + continue + } + req := item.(*eth.Request) + + // Stop tracking the timed out request from a timing perspective, + // cancel it, so it's not considered in-flight anymore, but keep + // the peer marked busy to prevent assigning a second request and + // overloading it further. + delete(pending, req.Peer) + stales[req.Peer] = req + delete(ordering, req) + + timeouts.Pop() + if timeouts.Size() > 0 { + _, exp := timeouts.Peek() + timeout.Reset(time.Until(time.Unix(0, -exp))) + } + // New timeout potentially set if there are more requests pending, + // reschedule the failed one to a free peer + fails := queue.unreserve(req.Peer) + + // Finally, update the peer's retrieval capacity, or if it's already + // below the minimum allowance, drop the peer. If a lot of retrieval + // elements expired, we might have overestimated the remote peer or + // perhaps ourselves. Only reset to minimal throughput but don't drop + // just yet. + // + // The reason the minimum threshold is 2 is that the downloader tries + // to estimate the bandwidth and latency of a peer separately, which + // requires pushing the measured capacity a bit and seeing how response + // times reacts, to it always requests one more than the minimum (i.e. + // min 2). + peer := d.peers.Peer(req.Peer) + if peer == nil { + // If the peer got disconnected in between, we should really have + // short-circuited it already. Just in case there's some strange + // codepath, leave this check in not to crash. + log.Error("Delivery timeout from unknown peer", "peer", req.Peer) + continue + } + if fails > 2 { + queue.updateCapacity(peer, 0, 0) + } else { + d.dropPeer(peer.id) + + // If this peer was the master peer, abort sync immediately + d.cancelLock.RLock() + master := peer.id == d.cancelPeer + d.cancelLock.RUnlock() + + if master { + d.cancel() + return errTimeout + } + } + + case res := <-responses: + // Response arrived, it may be for an existing or an already timed + // out request. If the former, update the timeout heap and perhaps + // reschedule the timeout timer. + index, live := ordering[res.Req] + if live { + timeouts.Remove(index) + if index == 0 { + if !timeout.Stop() { + <-timeout.C + } + if timeouts.Size() > 0 { + _, exp := timeouts.Peek() + timeout.Reset(time.Until(time.Unix(0, -exp))) + } + } + delete(ordering, res.Req) + } + // Delete the pending request (if it still exists) and mark the peer idle + delete(pending, res.Req.Peer) + delete(stales, res.Req.Peer) + + // Signal the dispatcher that the round trip is done. We'll drop the + // peer if the data turns out to be junk. + res.Done <- nil + res.Req.Close() + + // If the peer was previously banned and failed to deliver its pack + // in a reasonable time frame, ignore its message. + if peer := d.peers.Peer(res.Req.Peer); peer != nil { + // Deliver the received chunk of data and check chain validity + accepted, err := queue.deliver(peer, res) + if errors.Is(err, errInvalidChain) { + return err + } + // Unless a peer delivered something completely else than requested (usually + // caused by a timed out request which came through in the end), set it to + // idle. If the delivery's stale, the peer should have already been idled. + if !errors.Is(err, errStaleDelivery) { + queue.updateCapacity(peer, accepted, res.Time) + } + } + + case cont := <-queue.waker(): + // The header fetcher sent a continuation flag, check if it's done + if !cont { + finished = true + } + } + } +} diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go new file mode 100644 index 000000000..23eea70c3 --- /dev/null +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -0,0 +1,105 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "time" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/eth/protocols/eth" + "github.com/scroll-tech/go-ethereum/log" +) + +// bodyQueue implements typedQueue and is a type adapter between the generic +// concurrent fetcher and the downloader. +type bodyQueue Downloader + +// waker returns a notification channel that gets pinged in case more body +// fetches have been queued up, so the fetcher might assign it to idle peers. +func (q *bodyQueue) waker() chan bool { + return q.queue.blockWakeCh +} + +// pending returns the number of bodies that are currently queued for fetching +// by the concurrent downloader. +func (q *bodyQueue) pending() int { + return q.queue.PendingBodies() +} + +// capacity is responsible for calculating how many bodies a particular peer is +// estimated to be able to retrieve within the alloted round trip time. +func (q *bodyQueue) capacity(peer *peerConnection, rtt time.Duration) int { + return peer.BodyCapacity(rtt) +} + +// updateCapacity is responsible for updating how many bodies a particular peer +// is estimated to be able to retrieve in a unit time. +func (q *bodyQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) { + peer.UpdateBodyRate(items, span) +} + +// reserve is responsible for allocating a requested number of pending bodies +// from the download queue to the specified peer. +func (q *bodyQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) { + return q.queue.ReserveBodies(peer, items) +} + +// unreserve is resposible for removing the current body retrieval allocation +// assigned to a specific peer and placing it back into the pool to allow +// reassigning to some other peer. +func (q *bodyQueue) unreserve(peer string) int { + fails := q.queue.ExpireBodies(peer) + if fails > 2 { + log.Trace("Body delivery timed out", "peer", peer) + } else { + log.Debug("Body delivery stalling", "peer", peer) + } + return fails +} + +// request is responsible for converting a generic fetch request into a body +// one and sending it to the remote peer for fulfillment. +func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) { + peer.log.Trace("Requesting new batch of bodies", "count", len(req.Headers), "from", req.Headers[0].Number) + if q.bodyFetchHook != nil { + q.bodyFetchHook(req.Headers) + } + + hashes := make([]common.Hash, 0, len(req.Headers)) + for _, header := range req.Headers { + hashes = append(hashes, header.Hash()) + } + return peer.peer.RequestBodies(hashes, resCh) +} + +// deliver is responsible for taking a generic response packet from the concurrent +// fetcher, unpacking the body data and delivering it to the downloader's queue. +func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { + txs, uncles := packet.Res.(*eth.BlockBodiesPacket).Unpack() + hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes} + + accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1]) + switch { + case err == nil && len(txs) == 0: + peer.log.Trace("Requested bodies delivered") + case err == nil: + peer.log.Trace("Delivered new batch of bodies", "count", len(txs), "accepted", accepted) + default: + peer.log.Debug("Failed to deliver retrieved bodies", "err", err) + } + return accepted, err +} diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go new file mode 100644 index 000000000..d634cddf3 --- /dev/null +++ b/eth/downloader/fetchers_concurrent_headers.go @@ -0,0 +1,97 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "time" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/eth/protocols/eth" + "github.com/scroll-tech/go-ethereum/log" +) + +// headerQueue implements typedQueue and is a type adapter between the generic +// concurrent fetcher and the downloader. +type headerQueue Downloader + +// waker returns a notification channel that gets pinged in case more header +// fetches have been queued up, so the fetcher might assign it to idle peers. +func (q *headerQueue) waker() chan bool { + return q.queue.headerContCh +} + +// pending returns the number of headers that are currently queued for fetching +// by the concurrent downloader. +func (q *headerQueue) pending() int { + return q.queue.PendingHeaders() +} + +// capacity is responsible for calculating how many headers a particular peer is +// estimated to be able to retrieve within the alloted round trip time. +func (q *headerQueue) capacity(peer *peerConnection, rtt time.Duration) int { + return peer.HeaderCapacity(rtt) +} + +// updateCapacity is responsible for updating how many headers a particular peer +// is estimated to be able to retrieve in a unit time. +func (q *headerQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) { + peer.UpdateHeaderRate(items, span) +} + +// reserve is responsible for allocating a requested number of pending headers +// from the download queue to the specified peer. +func (q *headerQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) { + return q.queue.ReserveHeaders(peer, items), false, false +} + +// unreserve is resposible for removing the current header retrieval allocation +// assigned to a specific peer and placing it back into the pool to allow +// reassigning to some other peer. +func (q *headerQueue) unreserve(peer string) int { + fails := q.queue.ExpireHeaders(peer) + if fails > 2 { + log.Trace("Header delivery timed out", "peer", peer) + } else { + log.Debug("Header delivery stalling", "peer", peer) + } + return fails +} + +// request is responsible for converting a generic fetch request into a header +// one and sending it to the remote peer for fulfillment. +func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) { + peer.log.Trace("Requesting new batch of headers", "from", req.From) + return peer.peer.RequestHeadersByNumber(req.From, MaxHeaderFetch, 0, false, resCh) +} + +// deliver is responsible for taking a generic response packet from the concurrent +// fetcher, unpacking the header data and delivering it to the downloader's queue. +func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { + headers := *packet.Res.(*eth.BlockHeadersPacket) + hashes := packet.Meta.([]common.Hash) + + accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) + switch { + case err == nil && len(headers) == 0: + peer.log.Trace("Requested headers delivered") + case err == nil: + peer.log.Trace("Delivered new batch of headers", "count", len(headers), "accepted", accepted) + default: + peer.log.Debug("Failed to deliver retrieved headers", "err", err) + } + return accepted, err +} diff --git a/eth/downloader/fetchers_concurrent_receipts.go b/eth/downloader/fetchers_concurrent_receipts.go new file mode 100644 index 000000000..9f8e5831f --- /dev/null +++ b/eth/downloader/fetchers_concurrent_receipts.go @@ -0,0 +1,104 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "time" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/eth/protocols/eth" + "github.com/scroll-tech/go-ethereum/log" +) + +// receiptQueue implements typedQueue and is a type adapter between the generic +// concurrent fetcher and the downloader. +type receiptQueue Downloader + +// waker returns a notification channel that gets pinged in case more reecipt +// fetches have been queued up, so the fetcher might assign it to idle peers. +func (q *receiptQueue) waker() chan bool { + return q.queue.receiptWakeCh +} + +// pending returns the number of receipt that are currently queued for fetching +// by the concurrent downloader. +func (q *receiptQueue) pending() int { + return q.queue.PendingReceipts() +} + +// capacity is responsible for calculating how many receipts a particular peer is +// estimated to be able to retrieve within the alloted round trip time. +func (q *receiptQueue) capacity(peer *peerConnection, rtt time.Duration) int { + return peer.ReceiptCapacity(rtt) +} + +// updateCapacity is responsible for updating how many receipts a particular peer +// is estimated to be able to retrieve in a unit time. +func (q *receiptQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) { + peer.UpdateReceiptRate(items, span) +} + +// reserve is responsible for allocating a requested number of pending receipts +// from the download queue to the specified peer. +func (q *receiptQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) { + return q.queue.ReserveReceipts(peer, items) +} + +// unreserve is resposible for removing the current receipt retrieval allocation +// assigned to a specific peer and placing it back into the pool to allow +// reassigning to some other peer. +func (q *receiptQueue) unreserve(peer string) int { + fails := q.queue.ExpireReceipts(peer) + if fails > 2 { + log.Trace("Receipt delivery timed out", "peer", peer) + } else { + log.Debug("Receipt delivery stalling", "peer", peer) + } + return fails +} + +// request is responsible for converting a generic fetch request into a receipt +// one and sending it to the remote peer for fulfillment. +func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) { + peer.log.Trace("Requesting new batch of receipts", "count", len(req.Headers), "from", req.Headers[0].Number) + if q.receiptFetchHook != nil { + q.receiptFetchHook(req.Headers) + } + hashes := make([]common.Hash, 0, len(req.Headers)) + for _, header := range req.Headers { + hashes = append(hashes, header.Hash()) + } + return peer.peer.RequestReceipts(hashes, resCh) +} + +// deliver is responsible for taking a generic response packet from the concurrent +// fetcher, unpacking the receipt data and delivering it to the downloader's queue. +func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { + receipts := *packet.Res.(*eth.ReceiptsPacket) + hashes := packet.Meta.([]common.Hash) // {receipt hashes} + + accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes) + switch { + case err == nil && len(receipts) == 0: + peer.log.Trace("Requested receipts delivered") + case err == nil: + peer.log.Trace("Delivered new batch of receipts", "count", len(receipts), "accepted", accepted) + default: + peer.log.Debug("Failed to deliver retrieved receipts", "err", err) + } + return accepted, err +} diff --git a/eth/downloader/metrics.go b/eth/downloader/metrics.go index 80f570100..49b9313d3 100644 --- a/eth/downloader/metrics.go +++ b/eth/downloader/metrics.go @@ -38,8 +38,5 @@ var ( receiptDropMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/drop", nil) receiptTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/timeout", nil) - stateInMeter = metrics.NewRegisteredMeter("eth/downloader/states/in", nil) - stateDropMeter = metrics.NewRegisteredMeter("eth/downloader/states/drop", nil) - throttleCounter = metrics.NewRegisteredCounter("eth/downloader/throttle", nil) ) diff --git a/eth/downloader/modes.go b/eth/downloader/modes.go index 3ea14d22d..d388b9ee4 100644 --- a/eth/downloader/modes.go +++ b/eth/downloader/modes.go @@ -24,7 +24,6 @@ type SyncMode uint32 const ( FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks - FastSync // Quickly download the headers, full sync only at the chain SnapSync // Download the chain and the state via compact snapshots LightSync // Download only the headers and terminate afterwards ) @@ -38,8 +37,6 @@ func (mode SyncMode) String() string { switch mode { case FullSync: return "full" - case FastSync: - return "fast" case SnapSync: return "snap" case LightSync: @@ -53,8 +50,6 @@ func (mode SyncMode) MarshalText() ([]byte, error) { switch mode { case FullSync: return []byte("full"), nil - case FastSync: - return []byte("fast"), nil case SnapSync: return []byte("snap"), nil case LightSync: @@ -68,14 +63,12 @@ func (mode *SyncMode) UnmarshalText(text []byte) error { switch string(text) { case "full": *mode = FullSync - case "fast": - *mode = FastSync case "snap": *mode = SnapSync case "light": *mode = LightSync default: - return fmt.Errorf(`unknown sync mode %q, want "full", "fast" or "light"`, text) + return fmt.Errorf(`unknown sync mode %q, want "full", "snap" or "light"`, text) } return nil } diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 15091c444..6caf13961 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -22,9 +22,7 @@ package downloader import ( "errors" "math/big" - "sort" "sync" - "sync/atomic" "time" "github.com/scroll-tech/go-ethereum/common" @@ -39,7 +37,6 @@ const ( ) var ( - errAlreadyFetching = errors.New("already fetching blocks from peer") errAlreadyRegistered = errors.New("peer is already registered") errNotRegistered = errors.New("peer is not registered") ) @@ -48,16 +45,6 @@ var ( type peerConnection struct { id string // Unique identifier of the peer - headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) - blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) - receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) - stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) - - headerStarted time.Time // Time instance when the last header fetch was started - blockStarted time.Time // Time instance when the last block (body) fetch was started - receiptStarted time.Time // Time instance when the last receipt fetch was started - stateStarted time.Time // Time instance when the last node data fetch was started - rates *msgrate.Tracker // Tracker to hone in on the number of items retrievable per second lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) @@ -71,16 +58,15 @@ type peerConnection struct { // LightPeer encapsulates the methods required to synchronise with a remote light peer. type LightPeer interface { Head() (common.Hash, *big.Int) - RequestHeadersByHash(common.Hash, int, int, bool) error - RequestHeadersByNumber(uint64, int, int, bool) error + RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) + RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error) } // Peer encapsulates the methods required to synchronise with a remote full peer. type Peer interface { LightPeer - RequestBodies([]common.Hash) error - RequestReceipts([]common.Hash) error - RequestNodeData([]common.Hash) error + RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) + RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) } // lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods. @@ -89,21 +75,18 @@ type lightPeerWrapper struct { } func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } -func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error { - return w.peer.RequestHeadersByHash(h, amount, skip, reverse) +func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { + return w.peer.RequestHeadersByHash(h, amount, skip, reverse, sink) } -func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error { - return w.peer.RequestHeadersByNumber(i, amount, skip, reverse) +func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { + return w.peer.RequestHeadersByNumber(i, amount, skip, reverse, sink) } -func (w *lightPeerWrapper) RequestBodies([]common.Hash) error { +func (w *lightPeerWrapper) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) { panic("RequestBodies not supported in light client mode sync") } -func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error { +func (w *lightPeerWrapper) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) { panic("RequestReceipts not supported in light client mode sync") } -func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error { - panic("RequestNodeData not supported in light client mode sync") -} // newPeerConnection creates a new downloader peer. func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection { @@ -121,114 +104,28 @@ func (p *peerConnection) Reset() { p.lock.Lock() defer p.lock.Unlock() - atomic.StoreInt32(&p.headerIdle, 0) - atomic.StoreInt32(&p.blockIdle, 0) - atomic.StoreInt32(&p.receiptIdle, 0) - atomic.StoreInt32(&p.stateIdle, 0) - p.lacking = make(map[common.Hash]struct{}) } -// FetchHeaders sends a header retrieval request to the remote peer. -func (p *peerConnection) FetchHeaders(from uint64, count int) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { - return errAlreadyFetching - } - p.headerStarted = time.Now() - - // Issue the header retrieval request (absolute upwards without gaps) - go p.peer.RequestHeadersByNumber(from, count, 0, false) - - return nil -} - -// FetchBodies sends a block body retrieval request to the remote peer. -func (p *peerConnection) FetchBodies(request *fetchRequest) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { - return errAlreadyFetching - } - p.blockStarted = time.Now() - - go func() { - // Convert the header set to a retrievable slice - hashes := make([]common.Hash, 0, len(request.Headers)) - for _, header := range request.Headers { - hashes = append(hashes, header.Hash()) - } - p.peer.RequestBodies(hashes) - }() - - return nil -} - -// FetchReceipts sends a receipt retrieval request to the remote peer. -func (p *peerConnection) FetchReceipts(request *fetchRequest) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { - return errAlreadyFetching - } - p.receiptStarted = time.Now() - - go func() { - // Convert the header set to a retrievable slice - hashes := make([]common.Hash, 0, len(request.Headers)) - for _, header := range request.Headers { - hashes = append(hashes, header.Hash()) - } - p.peer.RequestReceipts(hashes) - }() - - return nil +// UpdateHeaderRate updates the peer's estimated header retrieval throughput with +// the current measurement. +func (p *peerConnection) UpdateHeaderRate(delivered int, elapsed time.Duration) { + p.rates.Update(eth.BlockHeadersMsg, elapsed, delivered) } -// FetchNodeData sends a node state data retrieval request to the remote peer. -func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { - return errAlreadyFetching - } - p.stateStarted = time.Now() - - go p.peer.RequestNodeData(hashes) - - return nil +// UpdateBodyRate updates the peer's estimated body retrieval throughput with the +// current measurement. +func (p *peerConnection) UpdateBodyRate(delivered int, elapsed time.Duration) { + p.rates.Update(eth.BlockBodiesMsg, elapsed, delivered) } -// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval -// requests. Its estimated header retrieval throughput is updated with that measured -// just now. -func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.BlockHeadersMsg, deliveryTime.Sub(p.headerStarted), delivered) - atomic.StoreInt32(&p.headerIdle, 0) +// UpdateReceiptRate updates the peer's estimated receipt retrieval throughput +// with the current measurement. +func (p *peerConnection) UpdateReceiptRate(delivered int, elapsed time.Duration) { + p.rates.Update(eth.ReceiptsMsg, elapsed, delivered) } -// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval -// requests. Its estimated body retrieval throughput is updated with that measured -// just now. -func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.BlockBodiesMsg, deliveryTime.Sub(p.blockStarted), delivered) - atomic.StoreInt32(&p.blockIdle, 0) -} - -// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt -// retrieval requests. Its estimated receipt retrieval throughput is updated -// with that measured just now. -func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.ReceiptsMsg, deliveryTime.Sub(p.receiptStarted), delivered) - atomic.StoreInt32(&p.receiptIdle, 0) -} - -// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie -// data retrieval requests. Its estimated state retrieval throughput is updated -// with that measured just now. -func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.NodeDataMsg, deliveryTime.Sub(p.stateStarted), delivered) - atomic.StoreInt32(&p.stateIdle, 0) -} - -// HeaderCapacity retrieves the peers header download allowance based on its +// HeaderCapacity retrieves the peer's header download allowance based on its // previously discovered throughput. func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT) @@ -238,9 +135,9 @@ func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { return cap } -// BlockCapacity retrieves the peers block download allowance based on its +// BodyCapacity retrieves the peer's body download allowance based on its // previously discovered throughput. -func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { +func (p *peerConnection) BodyCapacity(targetRTT time.Duration) int { cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT) if cap > MaxBlockFetch { cap = MaxBlockFetch @@ -258,16 +155,6 @@ func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { return cap } -// NodeDataCapacity retrieves the peers state download allowance based on its -// previously discovered throughput. -func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int { - cap := p.rates.Capacity(eth.NodeDataMsg, targetRTT) - if cap > MaxStateFetch { - cap = MaxStateFetch - } - return cap -} - // MarkLacking appends a new entity to the set of items (blocks, receipts, states) // that a peer is known not to have (i.e. have been requested before). If the // set reaches its maximum allowed capacity, items are randomly dropped off. @@ -294,14 +181,19 @@ func (p *peerConnection) Lacks(hash common.Hash) bool { return ok } +// peeringEvent is sent on the peer event feed when a remote peer connects or +// disconnects. +type peeringEvent struct { + peer *peerConnection + join bool +} + // peerSet represents the collection of active peer participating in the chain // download procedure. type peerSet struct { - peers map[string]*peerConnection - rates *msgrate.Trackers // Set of rate trackers to give the sync a common beat - - newPeerFeed event.Feed - peerDropFeed event.Feed + peers map[string]*peerConnection + rates *msgrate.Trackers // Set of rate trackers to give the sync a common beat + events event.Feed // Feed to publish peer lifecycle events on lock sync.RWMutex } @@ -314,14 +206,9 @@ func newPeerSet() *peerSet { } } -// SubscribeNewPeers subscribes to peer arrival events. -func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription { - return ps.newPeerFeed.Subscribe(ch) -} - -// SubscribePeerDrops subscribes to peer departure events. -func (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription { - return ps.peerDropFeed.Subscribe(ch) +// SubscribeEvents subscribes to peer arrival and departure events. +func (ps *peerSet) SubscribeEvents(ch chan<- *peeringEvent) event.Subscription { + return ps.events.Subscribe(ch) } // Reset iterates over the current peer set, and resets each of the known peers @@ -355,7 +242,7 @@ func (ps *peerSet) Register(p *peerConnection) error { ps.peers[p.id] = p ps.lock.Unlock() - ps.newPeerFeed.Send(p) + ps.events.Send(&peeringEvent{peer: p, join: true}) return nil } @@ -372,7 +259,7 @@ func (ps *peerSet) Unregister(id string) error { ps.rates.Untrack(id) ps.lock.Unlock() - ps.peerDropFeed.Send(p) + ps.events.Send(&peeringEvent{peer: p, join: false}) return nil } @@ -404,98 +291,22 @@ func (ps *peerSet) AllPeers() []*peerConnection { return list } -// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers -// within the active peer set, ordered by their reputation. -func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.headerIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.BlockHeadersMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) -} - -// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within -// the active peer set, ordered by their reputation. -func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.blockIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.BlockBodiesMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) -} - -// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers -// within the active peer set, ordered by their reputation. -func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.receiptIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.ReceiptsMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) -} - -// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle -// peers within the active peer set, ordered by their reputation. -func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.stateIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.NodeDataMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) -} - -// idlePeers retrieves a flat list of all currently idle peers satisfying the -// protocol version constraints, using the provided function to check idleness. -// The resulting set of peers are sorted by their capacity. -func (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peerConnection) bool, capacity func(*peerConnection) int) ([]*peerConnection, int) { - ps.lock.RLock() - defer ps.lock.RUnlock() - - var ( - total = 0 - idle = make([]*peerConnection, 0, len(ps.peers)) - tps = make([]int, 0, len(ps.peers)) - ) - for _, p := range ps.peers { - if p.version >= minProtocol && p.version <= maxProtocol { - if idleCheck(p) { - idle = append(idle, p) - tps = append(tps, capacity(p)) - } - total++ - } - } - - // And sort them - sortPeers := &peerCapacitySort{idle, tps} - sort.Sort(sortPeers) - return sortPeers.p, total -} - // peerCapacitySort implements sort.Interface. // It sorts peer connections by capacity (descending). type peerCapacitySort struct { - p []*peerConnection - tp []int + peers []*peerConnection + caps []int } func (ps *peerCapacitySort) Len() int { - return len(ps.p) + return len(ps.peers) } func (ps *peerCapacitySort) Less(i, j int) bool { - return ps.tp[i] > ps.tp[j] + return ps.caps[i] > ps.caps[j] } func (ps *peerCapacitySort) Swap(i, j int) { - ps.p[i], ps.p[j] = ps.p[j], ps.p[i] - ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i] + ps.peers[i], ps.peers[j] = ps.peers[j], ps.peers[i] + ps.caps[i], ps.caps[j] = ps.caps[j], ps.caps[i] } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 158608ccd..3a5d3d88d 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -31,7 +31,6 @@ import ( "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/metrics" - "github.com/scroll-tech/go-ethereum/trie" ) const ( @@ -54,8 +53,8 @@ var ( // fetchRequest is a currently running data retrieval operation. type fetchRequest struct { Peer *peerConnection // Peer to which the request was sent - From uint64 // [eth/62] Requested chain element index (used for skeleton fills only) - Headers []*types.Header // [eth/62] Requested headers, sorted by request order + From uint64 // Requested chain element index (used for skeleton fills only) + Headers []*types.Header // Requested headers, sorted by request order Time time.Time // Time when the request was made } @@ -119,6 +118,7 @@ type queue struct { headerPeerMiss map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable headerPendPool map[string]*fetchRequest // Currently pending header retrieval operations headerResults []*types.Header // Result cache accumulating the completed headers + headerHashes []common.Hash // Result cache accumulating the completed header hashes headerProced int // Number of headers already processed from the results headerOffset uint64 // Number of the first header in the result cache headerContCh chan bool // Channel to notify when header download finishes @@ -127,10 +127,12 @@ type queue struct { blockTaskPool map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers blockTaskQueue *prque.Prque // Priority queue of the headers to fetch the blocks (bodies) for blockPendPool map[string]*fetchRequest // Currently pending block (body) retrieval operations + blockWakeCh chan bool // Channel to notify the block fetcher of new tasks receiptTaskPool map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers receiptTaskQueue *prque.Prque // Priority queue of the headers to fetch the receipts for receiptPendPool map[string]*fetchRequest // Currently pending receipt retrieval operations + receiptWakeCh chan bool // Channel to notify when receipt fetcher of new tasks resultCache *resultStore // Downloaded but not yet delivered fetch results resultSize common.StorageSize // Approximate size of a block (exponential moving average) @@ -146,9 +148,11 @@ type queue struct { func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue { lock := new(sync.RWMutex) q := &queue{ - headerContCh: make(chan bool), + headerContCh: make(chan bool, 1), blockTaskQueue: prque.New(nil), + blockWakeCh: make(chan bool, 1), receiptTaskQueue: prque.New(nil), + receiptWakeCh: make(chan bool, 1), active: sync.NewCond(lock), lock: lock, } @@ -196,8 +200,8 @@ func (q *queue) PendingHeaders() int { return q.headerTaskQueue.Size() } -// PendingBlocks retrieves the number of block (body) requests pending for retrieval. -func (q *queue) PendingBlocks() int { +// PendingBodies retrieves the number of block body requests pending for retrieval. +func (q *queue) PendingBodies() int { q.lock.Lock() defer q.lock.Unlock() @@ -212,15 +216,6 @@ func (q *queue) PendingReceipts() int { return q.receiptTaskQueue.Size() } -// InFlightHeaders retrieves whether there are header fetch requests currently -// in flight. -func (q *queue) InFlightHeaders() bool { - q.lock.Lock() - defer q.lock.Unlock() - - return len(q.headerPendPool) > 0 -} - // InFlightBlocks retrieves whether there are block fetch requests currently in // flight. func (q *queue) InFlightBlocks() bool { @@ -265,6 +260,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { q.headerTaskQueue = prque.New(nil) q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) + q.headerHashes = make([]common.Hash, len(skeleton)*MaxHeaderFetch) q.headerProced = 0 q.headerOffset = from q.headerContCh = make(chan bool, 1) @@ -279,27 +275,27 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { // RetrieveHeaders retrieves the header chain assemble based on the scheduled // skeleton. -func (q *queue) RetrieveHeaders() ([]*types.Header, int) { +func (q *queue) RetrieveHeaders() ([]*types.Header, []common.Hash, int) { q.lock.Lock() defer q.lock.Unlock() - headers, proced := q.headerResults, q.headerProced - q.headerResults, q.headerProced = nil, 0 + headers, hashes, proced := q.headerResults, q.headerHashes, q.headerProced + q.headerResults, q.headerHashes, q.headerProced = nil, nil, 0 - return headers, proced + return headers, hashes, proced } // Schedule adds a set of headers for the download queue for scheduling, returning // the new headers encountered. -func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { +func (q *queue) Schedule(headers []*types.Header, hashes []common.Hash, from uint64) []*types.Header { q.lock.Lock() defer q.lock.Unlock() // Insert all the headers prioritised by the contained block number inserts := make([]*types.Header, 0, len(headers)) - for _, header := range headers { + for i, header := range headers { // Make sure chain order is honoured and preserved throughout - hash := header.Hash() + hash := hashes[i] if header.Number == nil || header.Number.Uint64() != from { log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) break @@ -318,7 +314,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) } // Queue for receipt retrieval - if q.mode == FastSync && !header.EmptyReceipts() { + if q.mode == SnapSync && !header.EmptyReceipts() { if _, ok := q.receiptTaskPool[hash]; ok { log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) } else { @@ -383,6 +379,13 @@ func (q *queue) Results(block bool) []*fetchResult { throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize) throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold) + // With results removed from the cache, wake throttled fetchers + for _, ch := range []chan bool{q.blockWakeCh, q.receiptWakeCh} { + select { + case ch <- true: + default: + } + } // Log some info at certain times if time.Since(q.lastStatLog) > 60*time.Second { q.lastStatLog = time.Now() @@ -477,9 +480,10 @@ func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bo // to access the queue, so they already need a lock anyway. // // Returns: -// item - the fetchRequest -// progress - whether any progress was made -// throttle - if the caller should throttle for a while +// +// item - the fetchRequest +// progress - whether any progress was made +// throttle - if the caller should throttle for a while func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) { // Short circuit if the pool has been depleted, or if the peer's already @@ -503,7 +507,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common // we can ask the resultcache if this header is within the // "prioritized" segment of blocks. If it is not, we need to throttle - stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync) + stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == SnapSync) if stale { // Don't put back in the task queue, this item has already been // delivered upstream @@ -566,40 +570,6 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common return request, progress, throttled } -// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. -func (q *queue) CancelHeaders(request *fetchRequest) { - q.lock.Lock() - defer q.lock.Unlock() - q.cancel(request, q.headerTaskQueue, q.headerPendPool) -} - -// CancelBodies aborts a body fetch request, returning all pending headers to the -// task queue. -func (q *queue) CancelBodies(request *fetchRequest) { - q.lock.Lock() - defer q.lock.Unlock() - q.cancel(request, q.blockTaskQueue, q.blockPendPool) -} - -// CancelReceipts aborts a body fetch request, returning all pending headers to -// the task queue. -func (q *queue) CancelReceipts(request *fetchRequest) { - q.lock.Lock() - defer q.lock.Unlock() - q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) -} - -// Cancel aborts a fetch request, returning all pending hashes to the task queue. -func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { - if request.From > 0 { - taskQueue.Push(request.From, -int64(request.From)) - } - for _, header := range request.Headers { - taskQueue.Push(header, -int64(header.Number.Uint64())) - } - delete(pendPool, request.Peer.id) -} - // Revoke cancels all pending requests belonging to a given peer. This method is // meant to be called during a peer drop to quickly reassign owned data fetches // to remaining nodes. @@ -607,6 +577,10 @@ func (q *queue) Revoke(peerID string) { q.lock.Lock() defer q.lock.Unlock() + if request, ok := q.headerPendPool[peerID]; ok { + q.headerTaskQueue.Push(request.From, -int64(request.From)) + delete(q.headerPendPool, peerID) + } if request, ok := q.blockPendPool[peerID]; ok { for _, header := range request.Headers { q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) @@ -621,62 +595,60 @@ func (q *queue) Revoke(peerID string) { } } -// ExpireHeaders checks for in flight requests that exceeded a timeout allowance, -// canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { +// ExpireHeaders cancels a request that timed out and moves the pending fetch +// task back into the queue for rescheduling. +func (q *queue) ExpireHeaders(peer string) int { q.lock.Lock() defer q.lock.Unlock() - return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) + headerTimeoutMeter.Mark(1) + return q.expire(peer, q.headerPendPool, q.headerTaskQueue) } // ExpireBodies checks for in flight block body requests that exceeded a timeout // allowance, canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { +func (q *queue) ExpireBodies(peer string) int { q.lock.Lock() defer q.lock.Unlock() - return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) + bodyTimeoutMeter.Mark(1) + return q.expire(peer, q.blockPendPool, q.blockTaskQueue) } // ExpireReceipts checks for in flight receipt requests that exceeded a timeout // allowance, canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { +func (q *queue) ExpireReceipts(peer string) int { q.lock.Lock() defer q.lock.Unlock() - return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) + receiptTimeoutMeter.Mark(1) + return q.expire(peer, q.receiptPendPool, q.receiptTaskQueue) } -// expire is the generic check that move expired tasks from a pending pool back -// into a task pool, returning all entities caught with expired tasks. +// expire is the generic check that moves a specific expired task from a pending +// pool back into a task pool. // -// Note, this method expects the queue lock to be already held. The -// reason the lock is not obtained in here is because the parameters already need -// to access the queue, so they already need a lock anyway. -func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { - // Iterate over the expired requests and return each to the queue - expiries := make(map[string]int) - for id, request := range pendPool { - if time.Since(request.Time) > timeout { - // Update the metrics with the timeout - timeoutMeter.Mark(1) - - // Return any non satisfied requests to the pool - if request.From > 0 { - taskQueue.Push(request.From, -int64(request.From)) - } - for _, header := range request.Headers { - taskQueue.Push(header, -int64(header.Number.Uint64())) - } - // Add the peer to the expiry report along the number of failed requests - expiries[id] = len(request.Headers) - - // Remove the expired requests from the pending pool directly - delete(pendPool, id) - } +// Note, this method expects the queue lock to be already held. The reason the +// lock is not obtained in here is that the parameters already need to access +// the queue, so they already need a lock anyway. +func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue *prque.Prque) int { + // Retrieve the request being expired and log an error if it's non-existnet, + // as there's no order of events that should lead to such expirations. + req := pendPool[peer] + if req == nil { + log.Error("Expired request does not exist", "peer", peer) + return 0 + } + delete(pendPool, peer) + + // Return any non-satisfied requests to the pool + if req.From > 0 { + taskQueue.Push(req.From, -int64(req.From)) + } + for _, header := range req.Headers { + taskQueue.Push(header, -int64(header.Number.Uint64())) } - return expiries + return len(req.Headers) } // DeliverHeaders injects a header retrieval response into the header results @@ -684,9 +656,9 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, // if they do not map correctly to the skeleton. // // If the headers are accepted, the method makes an attempt to deliver the set -// of ready headers to the processor to keep the pipeline full. However it will +// of ready headers to the processor to keep the pipeline full. However, it will // not block to prevent stalling other pending deliveries. -func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { +func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []common.Hash, headerProcCh chan *headerTask) (int, error) { q.lock.Lock() defer q.lock.Unlock() @@ -700,28 +672,31 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh // Short circuit if the data was never requested request := q.headerPendPool[id] if request == nil { + headerDropMeter.Mark(int64(len(headers))) return 0, errNoFetchesPending } - headerReqTimer.UpdateSince(request.Time) delete(q.headerPendPool, id) + headerReqTimer.UpdateSince(request.Time) + headerInMeter.Mark(int64(len(headers))) + // Ensure headers can be mapped onto the skeleton chain target := q.headerTaskPool[request.From].Hash() accepted := len(headers) == MaxHeaderFetch if accepted { if headers[0].Number.Uint64() != request.From { - logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", headers[0].Hash(), "expected", request.From) + logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", hashes[0], "expected", request.From) accepted = false - } else if headers[len(headers)-1].Hash() != target { - logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) + } else if hashes[len(headers)-1] != target { + logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", hashes[len(headers)-1], "expected", target) accepted = false } } if accepted { - parentHash := headers[0].Hash() + parentHash := hashes[0] for i, header := range headers[1:] { - hash := header.Hash() + hash := hashes[i+1] if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want) accepted = false @@ -739,6 +714,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh // If the batch of headers wasn't accepted, mark as unavailable if !accepted { logger.Trace("Skeleton filling not accepted", "from", request.From) + headerDropMeter.Mark(int64(len(headers))) miss := q.headerPeerMiss[id] if miss == nil { @@ -752,6 +728,8 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh } // Clean up a successful fetch and try to deliver any sub-results copy(q.headerResults[request.From-q.headerOffset:], headers) + copy(q.headerHashes[request.From-q.headerOffset:], hashes) + delete(q.headerTaskPool, request.From) ready := 0 @@ -760,13 +738,19 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh } if ready > 0 { // Headers are ready for delivery, gather them and push forward (non blocking) - process := make([]*types.Header, ready) - copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) + processHeaders := make([]*types.Header, ready) + copy(processHeaders, q.headerResults[q.headerProced:q.headerProced+ready]) + + processHashes := make([]common.Hash, ready) + copy(processHashes, q.headerHashes[q.headerProced:q.headerProced+ready]) select { - case headerProcCh <- process: - logger.Trace("Pre-scheduled new headers", "count", len(process), "from", process[0].Number) - q.headerProced += len(process) + case headerProcCh <- &headerTask{ + headers: processHeaders, + hashes: processHashes, + }: + logger.Trace("Pre-scheduled new headers", "count", len(processHeaders), "from", processHeaders[0].Number) + q.headerProced += len(processHeaders) default: } } @@ -780,15 +764,15 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh // DeliverBodies injects a block body retrieval response into the results queue. // The method returns the number of blocks bodies accepted from the delivery and // also wakes any threads waiting for data delivery. -func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { +func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash, uncleLists [][]*types.Header, uncleListHashes []common.Hash) (int, error) { q.lock.Lock() defer q.lock.Unlock() - trieHasher := trie.NewStackTrie(nil) + validate := func(index int, header *types.Header) error { - if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash { + if txListHashes[index] != header.TxHash { return errInvalidBody } - if types.CalcUncleHash(uncleLists[index]) != header.UncleHash { + if uncleListHashes[index] != header.UncleHash { return errInvalidBody } return nil @@ -800,18 +784,18 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi result.SetBodyDone() } return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, - bodyReqTimer, len(txLists), validate, reconstruct) + bodyReqTimer, bodyInMeter, bodyDropMeter, len(txLists), validate, reconstruct) } // DeliverReceipts injects a receipt retrieval response into the results queue. // The method returns the number of transaction receipts accepted from the delivery // and also wakes any threads waiting for data delivery. -func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { +func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt, receiptListHashes []common.Hash) (int, error) { q.lock.Lock() defer q.lock.Unlock() - trieHasher := trie.NewStackTrie(nil) + validate := func(index int, header *types.Header) error { - if types.DeriveSha(types.Receipts(receiptList[index]), trieHasher) != header.ReceiptHash { + if receiptListHashes[index] != header.ReceiptHash { return errInvalidReceipt } return nil @@ -821,7 +805,7 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, result.SetReceiptsDone() } return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, - receiptReqTimer, len(receiptList), validate, reconstruct) + receiptReqTimer, receiptInMeter, receiptDropMeter, len(receiptList), validate, reconstruct) } // deliver injects a data retrieval response into the results queue. @@ -830,18 +814,22 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, // reason this lock is not obtained in here is because the parameters already need // to access the queue, so they already need a lock anyway. func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, - taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer, + taskQueue *prque.Prque, pendPool map[string]*fetchRequest, + reqTimer metrics.Timer, resInMeter metrics.Meter, resDropMeter metrics.Meter, results int, validate func(index int, header *types.Header) error, reconstruct func(index int, result *fetchResult)) (int, error) { // Short circuit if the data was never requested request := pendPool[id] if request == nil { + resDropMeter.Mark(int64(results)) return 0, errNoFetchesPending } - reqTimer.UpdateSince(request.Time) delete(pendPool, id) + reqTimer.UpdateSince(request.Time) + resInMeter.Mark(int64(results)) + // If no data items were retrieved, mark them as unavailable for the origin peer if results == 0 { for _, header := range request.Headers { @@ -883,6 +871,8 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, delete(taskPool, hashes[accepted]) accepted++ } + resDropMeter.Mark(int64(results - accepted)) + // Return all failed or missing fetches to the queue for _, header := range request.Headers[accepted:] { taskQueue.Push(header, -int64(header.Number.Uint64())) diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go index 07a05dc20..a514966af 100644 --- a/eth/downloader/queue_test.go +++ b/eth/downloader/queue_test.go @@ -31,6 +31,7 @@ import ( "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/trie" ) var ( @@ -104,17 +105,22 @@ func TestBasics(t *testing.T) { if !q.Idle() { t.Errorf("new queue should be idle") } - q.Prepare(1, FastSync) + q.Prepare(1, SnapSync) if res := q.Results(false); len(res) != 0 { t.Fatal("new queue should have 0 results") } // Schedule a batch of headers - q.Schedule(chain.headers(), 1) + headers := chain.headers() + hashes := make([]common.Hash, len(headers)) + for i, header := range headers { + hashes[i] = header.Hash() + } + q.Schedule(headers, hashes, 1) if q.Idle() { t.Errorf("queue should not be idle") } - if got, exp := q.PendingBlocks(), chain.Len(); got != exp { + if got, exp := q.PendingBodies(), chain.Len(); got != exp { t.Errorf("wrong pending block count, got %d, exp %d", got, exp) } // Only non-empty receipts get added to task-queue @@ -197,13 +203,19 @@ func TestEmptyBlocks(t *testing.T) { q := newQueue(10, 10) - q.Prepare(1, FastSync) + q.Prepare(1, SnapSync) + // Schedule a batch of headers - q.Schedule(emptyChain.headers(), 1) + headers := emptyChain.headers() + hashes := make([]common.Hash, len(headers)) + for i, header := range headers { + hashes[i] = header.Hash() + } + q.Schedule(headers, hashes, 1) if q.Idle() { t.Errorf("queue should not be idle") } - if got, exp := q.PendingBlocks(), len(emptyChain.blocks); got != exp { + if got, exp := q.PendingBodies(), len(emptyChain.blocks); got != exp { t.Errorf("wrong pending block count, got %d, exp %d", got, exp) } if got, exp := q.PendingReceipts(), 0; got != exp { @@ -272,7 +284,7 @@ func XTestDelivery(t *testing.T) { } q := newQueue(10, 10) var wg sync.WaitGroup - q.Prepare(1, FastSync) + q.Prepare(1, SnapSync) wg.Add(1) go func() { // deliver headers @@ -280,11 +292,15 @@ func XTestDelivery(t *testing.T) { c := 1 for { //fmt.Printf("getting headers from %d\n", c) - hdrs := world.headers(c) - l := len(hdrs) + headers := world.headers(c) + hashes := make([]common.Hash, len(headers)) + for i, header := range headers { + hashes[i] = header.Hash() + } + l := len(headers) //fmt.Printf("scheduling %d headers, first %d last %d\n", - // l, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64()) - q.Schedule(hdrs, uint64(c)) + // l, headers[0].Number.Uint64(), headers[len(headers)-1].Number.Uint64()) + q.Schedule(headers, hashes, uint64(c)) c += l } }() @@ -311,18 +327,31 @@ func XTestDelivery(t *testing.T) { peer := dummyPeer(fmt.Sprintf("peer-%d", i)) f, _, _ := q.ReserveBodies(peer, rand.Intn(30)) if f != nil { - var emptyList []*types.Header - var txs [][]*types.Transaction - var uncles [][]*types.Header + var ( + emptyList []*types.Header + txset [][]*types.Transaction + uncleset [][]*types.Header + ) numToSkip := rand.Intn(len(f.Headers)) for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] { - txs = append(txs, world.getTransactions(hdr.Number.Uint64())) - uncles = append(uncles, emptyList) + txset = append(txset, world.getTransactions(hdr.Number.Uint64())) + uncleset = append(uncleset, emptyList) + } + var ( + txsHashes = make([]common.Hash, len(txset)) + uncleHashes = make([]common.Hash, len(uncleset)) + ) + hasher := trie.NewStackTrie(nil) + for i, txs := range txset { + txsHashes[i] = types.DeriveSha(types.Transactions(txs), hasher) + } + for i, uncles := range uncleset { + uncleHashes[i] = types.CalcUncleHash(uncles) } time.Sleep(100 * time.Millisecond) - _, err := q.DeliverBodies(peer.id, txs, uncles) + _, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes) if err != nil { - fmt.Printf("delivered %d bodies %v\n", len(txs), err) + fmt.Printf("delivered %d bodies %v\n", len(txset), err) } } else { i++ @@ -341,7 +370,12 @@ func XTestDelivery(t *testing.T) { for _, hdr := range f.Headers { rcs = append(rcs, world.getReceipts(hdr.Number.Uint64())) } - _, err := q.DeliverReceipts(peer.id, rcs) + hasher := trie.NewStackTrie(nil) + hashes := make([]common.Hash, len(rcs)) + for i, receipt := range rcs { + hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) + } + _, err := q.DeliverReceipts(peer.id, rcs, hashes) if err != nil { fmt.Printf("delivered %d receipts %v\n", len(rcs), err) } diff --git a/eth/downloader/skeleton.go b/eth/downloader/skeleton.go new file mode 100644 index 000000000..66e1992df --- /dev/null +++ b/eth/downloader/skeleton.go @@ -0,0 +1,1195 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "encoding/json" + "errors" + "fmt" + "math/rand" + "sort" + "time" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/eth/protocols/eth" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/log" +) + +// scratchHeaders is the number of headers to store in a scratch space to allow +// concurrent downloads. A header is about 0.5KB in size, so there is no worry +// about using too much memory. The only catch is that we can only validate gaps +// afer they're linked to the head, so the bigger the scratch space, the larger +// potential for invalid headers. +// +// The current scratch space of 131072 headers is expected to use 64MB RAM. +const scratchHeaders = 131072 + +// requestHeaders is the number of header to request from a remote peer in a single +// network packet. Although the skeleton downloader takes into consideration peer +// capacities when picking idlers, the packet size was decided to remain constant +// since headers are relatively small and it's easier to work with fixed batches +// vs. dynamic interval fillings. +const requestHeaders = 512 + +// errSyncLinked is an internal helper error to signal that the current sync +// cycle linked up to the genesis block, this the skeleton syncer should ping +// the backfiller to resume. Since we already have that logic on sync start, +// piggie-back on that instead of 2 entrypoints. +var errSyncLinked = errors.New("sync linked") + +// errSyncMerged is an internal helper error to signal that the current sync +// cycle merged with a previously aborted subchain, thus the skeleton syncer +// should abort and restart with the new state. +var errSyncMerged = errors.New("sync merged") + +// errSyncReorged is an internal helper error to signal that the head chain of +// the current sync cycle was (partially) reorged, thus the skeleton syncer +// should abort and restart with the new state. +var errSyncReorged = errors.New("sync reorged") + +// errTerminated is returned if the sync mechanism was terminated for this run of +// the process. This is usually the case when Geth is shutting down and some events +// might still be propagating. +var errTerminated = errors.New("terminated") + +// errReorgDenied is returned if an attempt is made to extend the beacon chain +// with a new header, but it does not link up to the existing sync. +var errReorgDenied = errors.New("non-forced head reorg denied") + +func init() { + // Tuning parameters is nice, but the scratch space must be assignable in + // full to peers. It's a useless cornercase to support a dangling half-group. + if scratchHeaders%requestHeaders != 0 { + panic("Please make scratchHeaders divisible by requestHeaders") + } +} + +// subchain is a contiguous header chain segment that is backed by the database, +// but may not be linked to the live chain. The skeleton downloader may produce +// a new one of these every time it is restarted until the subchain grows large +// enough to connect with a previous subchain. +// +// The subchains use the exact same database namespace and are not disjoint from +// each other. As such, extending one to overlap the other entails reducing the +// second one first. This combined buffer model is used to avoid having to move +// data on disk when two subchains are joined together. +type subchain struct { + Head uint64 // Block number of the newest header in the subchain + Tail uint64 // Block number of the oldest header in the subchain + Next common.Hash // Block hash of the next oldest header in the subchain +} + +// skeletonProgress is a database entry to allow suspending and resuming a chain +// sync. As the skeleton header chain is downloaded backwards, restarts can and +// will produce temporarily disjoint subchains. There is no way to restart a +// suspended skeleton sync without prior knowledge of all prior suspension points. +type skeletonProgress struct { + Subchains []*subchain // Disjoint subchains downloaded until now +} + +// headUpdate is a notification that the beacon sync should switch to a new target. +// The update might request whether to forcefully change the target, or only try to +// extend it and fail if it's not possible. +type headUpdate struct { + header *types.Header // Header to update the sync target to + force bool // Whether to force the update or only extend if possible + errc chan error // Channel to signal acceptance of the new head +} + +// headerRequest tracks a pending header request to ensure responses are to +// actual requests and to validate any security constraints. +// +// Concurrency note: header requests and responses are handled concurrently from +// the main runloop to allow Keccak256 hash verifications on the peer's thread and +// to drop on invalid response. The request struct must contain all the data to +// construct the response without accessing runloop internals (i.e. subchains). +// That is only included to allow the runloop to match a response to the task being +// synced without having yet another set of maps. +type headerRequest struct { + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + + deliver chan *headerResponse // Channel to deliver successful response on + revert chan *headerRequest // Channel to deliver request failure on + cancel chan struct{} // Channel to track sync cancellation + stale chan struct{} // Channel to signal the request was dropped + + head uint64 // Head number of the requested batch of headers +} + +// headerResponse is an already verified remote response to a header request. +type headerResponse struct { + peer *peerConnection // Peer from which this response originates + reqid uint64 // Request ID that this response fulfils + headers []*types.Header // Chain of headers +} + +// backfiller is a callback interface through which the skeleton sync can tell +// the downloader that it should suspend or resume backfilling on specific head +// events (e.g. suspend on forks or gaps, resume on successful linkups). +type backfiller interface { + // suspend requests the backfiller to abort any running full or snap sync + // based on the skeleton chain as it might be invalid. The backfiller should + // gracefully handle multiple consecutive suspends without a resume, even + // on initial sartup. + // + // The method should return the last block header that has been successfully + // backfilled, or nil if the backfiller was not resumed. + suspend() *types.Header + + // resume requests the backfiller to start running fill or snap sync based on + // the skeleton chain as it has successfully been linked. Appending new heads + // to the end of the chain will not result in suspend/resume cycles. + // leaking too much sync logic out to the filler. + resume() +} + +// skeleton represents a header chain synchronized after the merge where blocks +// aren't validated any more via PoW in a forward fashion, rather are dictated +// and extended at the head via the beacon chain and backfilled on the original +// Ethereum block sync protocol. +// +// Since the skeleton is grown backwards from head to genesis, it is handled as +// a separate entity, not mixed in with the logical sequential transition of the +// blocks. Once the skeleton is connected to an existing, validated chain, the +// headers will be moved into the main downloader for filling and execution. +// +// Opposed to the original Ethereum block synchronization which is trustless (and +// uses a master peer to minimize the attack surface), post-merge block sync starts +// from a trusted head. As such, there is no need for a master peer any more and +// headers can be requested fully concurrently (though some batches might be +// discarded if they don't link up correctly). +// +// Although a skeleton is part of a sync cycle, it is not recreated, rather stays +// alive throughout the lifetime of the downloader. This allows it to be extended +// concurrently with the sync cycle, since extensions arrive from an API surface, +// not from within (vs. legacy Ethereum sync). +// +// Since the skeleton tracks the entire header chain until it is consumed by the +// forward block filling, it needs 0.5KB/block storage. At current mainnet sizes +// this is only possible with a disk backend. Since the skeleton is separate from +// the node's header chain, storing the headers ephemerally until sync finishes +// is wasted disk IO, but it's a price we're going to pay to keep things simple +// for now. +type skeleton struct { + db ethdb.Database // Database backing the skeleton + filler backfiller // Chain syncer suspended/resumed by head events + + peers *peerSet // Set of peers we can sync from + idles map[string]*peerConnection // Set of idle peers in the current sync cycle + drop peerDropFn // Drops a peer for misbehaving + + progress *skeletonProgress // Sync progress tracker for resumption and metrics + started time.Time // Timestamp when the skeleton syncer was created + logged time.Time // Timestamp when progress was last logged to the user + pulled uint64 // Number of headers downloaded in this run + + scratchSpace []*types.Header // Scratch space to accumulate headers in (first = recent) + scratchOwners []string // Peer IDs owning chunks of the scratch space (pend or delivered) + scratchHead uint64 // Block number of the first item in the scratch space + + requests map[uint64]*headerRequest // Header requests currently running + + headEvents chan *headUpdate // Notification channel for new heads + terminate chan chan error // Termination channel to abort sync + terminated chan struct{} // Channel to signal that the syner is dead + + // Callback hooks used during testing + syncStarting func() // callback triggered after a sync cycle is inited but before started +} + +// newSkeleton creates a new sync skeleton that tracks a potentially dangling +// header chain until it's linked into an existing set of blocks. +func newSkeleton(db ethdb.Database, peers *peerSet, drop peerDropFn, filler backfiller) *skeleton { + sk := &skeleton{ + db: db, + filler: filler, + peers: peers, + drop: drop, + requests: make(map[uint64]*headerRequest), + headEvents: make(chan *headUpdate), + terminate: make(chan chan error), + terminated: make(chan struct{}), + } + go sk.startup() + return sk +} + +// startup is an initial background loop which waits for an event to start or +// tear the syncer down. This is required to make the skeleton sync loop once +// per process but at the same time not start before the beacon chain announces +// a new (existing) head. +func (s *skeleton) startup() { + // Close a notification channel so anyone sending us events will know if the + // sync loop was torn down for good. + defer close(s.terminated) + + // Wait for startup or teardown. This wait might loop a few times if a beacon + // client requests sync head extensions, but not forced reorgs (i.e. they are + // giving us new payloads without setting a starting head initially). + for { + select { + case errc := <-s.terminate: + // No head was announced but Geth is shutting down + errc <- nil + return + + case event := <-s.headEvents: + // New head announced, start syncing to it, looping every time a current + // cycle is terminated due to a chain event (head reorg, old chain merge). + if !event.force { + event.errc <- errors.New("forced head needed for startup") + continue + } + event.errc <- nil // forced head accepted for startup + head := event.header + s.started = time.Now() + + for { + // If the sync cycle terminated or was terminated, propagate up when + // higher layers request termination. There's no fancy explicit error + // signalling as the sync loop should never terminate (TM). + newhead, err := s.sync(head) + switch { + case err == errSyncLinked: + // Sync cycle linked up to the genesis block. Tear down the loop + // and restart it so, it can properly notify the backfiller. Don't + // account a new head. + head = nil + + case err == errSyncMerged: + // Subchains were merged, we just need to reinit the internal + // start to continue on the tail of the merged chain. Don't + // announce a new head, + head = nil + + case err == errSyncReorged: + // The subchain being synced got modified at the head in a + // way that requires resyncing it. Restart sync with the new + // head to force a cleanup. + head = newhead + + case err == errTerminated: + // Sync was requested to be terminated from within, stop and + // return (no need to pass a message, was already done internally) + return + + default: + // Sync either successfully terminated or failed with an unhandled + // error. Abort and wait until Geth requests a termination. + errc := <-s.terminate + errc <- err + return + } + } + } + } +} + +// Terminate tears down the syncer indefinitely. +func (s *skeleton) Terminate() error { + // Request termination and fetch any errors + errc := make(chan error) + s.terminate <- errc + err := <-errc + + // Wait for full shutdown (not necessary, but cleaner) + <-s.terminated + return err +} + +// Sync starts or resumes a previous sync cycle to download and maintain a reverse +// header chain starting at the head and leading towards genesis to an available +// ancestor. +// +// This method does not block, rather it just waits until the syncer receives the +// fed header. What the syncer does with it is the syncer's problem. +func (s *skeleton) Sync(head *types.Header, force bool) error { + log.Trace("New skeleton head announced", "number", head.Number, "hash", head.Hash(), "force", force) + errc := make(chan error) + + select { + case s.headEvents <- &headUpdate{header: head, force: force, errc: errc}: + return <-errc + case <-s.terminated: + return errTerminated + } +} + +// sync is the internal version of Sync that executes a single sync cycle, either +// until some termination condition is reached, or until the current cycle merges +// with a previously aborted run. +func (s *skeleton) sync(head *types.Header) (*types.Header, error) { + // If we're continuing a previous merge interrupt, just access the existing + // old state without initing from disk. + if head == nil { + head = rawdb.ReadSkeletonHeader(s.db, s.progress.Subchains[0].Head) + } else { + // Otherwise, initialize the sync, trimming and previous leftovers until + // we're consistent with the newly requested chain head + s.initSync(head) + } + // Create the scratch space to fill with concurrently downloaded headers + s.scratchSpace = make([]*types.Header, scratchHeaders) + defer func() { s.scratchSpace = nil }() // don't hold on to references after sync + + s.scratchOwners = make([]string, scratchHeaders/requestHeaders) + defer func() { s.scratchOwners = nil }() // don't hold on to references after sync + + s.scratchHead = s.progress.Subchains[0].Tail - 1 // tail must not be 0! + + // If the sync is already done, resume the backfiller. When the loop stops, + // terminate the backfiller too. + linked := len(s.progress.Subchains) == 1 && + rawdb.HasHeader(s.db, s.progress.Subchains[0].Next, s.scratchHead) && + rawdb.HasBody(s.db, s.progress.Subchains[0].Next, s.scratchHead) && + rawdb.HasReceipts(s.db, s.progress.Subchains[0].Next, s.scratchHead) + if linked { + s.filler.resume() + } + defer func() { + if filled := s.filler.suspend(); filled != nil { + // If something was filled, try to delete stale sync helpers. If + // unsuccessful, warn the user, but not much else we can do (it's + // a programming error, just let users report an issue and don't + // choke in the meantime). + if err := s.cleanStales(filled); err != nil { + log.Error("Failed to clean stale beacon headers", "err", err) + } + } + }() + // Create a set of unique channels for this sync cycle. We need these to be + // ephemeral so a data race doesn't accidentally deliver something stale on + // a persistent channel across syncs (yup, this happened) + var ( + requestFails = make(chan *headerRequest) + responses = make(chan *headerResponse) + ) + cancel := make(chan struct{}) + defer close(cancel) + + log.Debug("Starting reverse header sync cycle", "head", head.Number, "hash", head.Hash(), "cont", s.scratchHead) + + // Whether sync completed or not, disregard any future packets + defer func() { + log.Debug("Terminating reverse header sync cycle", "head", head.Number, "hash", head.Hash(), "cont", s.scratchHead) + s.requests = make(map[uint64]*headerRequest) + }() + + // Start tracking idle peers for task assignments + peering := make(chan *peeringEvent, 64) // arbitrary buffer, just some burst protection + + peeringSub := s.peers.SubscribeEvents(peering) + defer peeringSub.Unsubscribe() + + s.idles = make(map[string]*peerConnection) + for _, peer := range s.peers.AllPeers() { + s.idles[peer.id] = peer + } + // Nofity any tester listening for startup events + if s.syncStarting != nil { + s.syncStarting() + } + for { + // Something happened, try to assign new tasks to any idle peers + if !linked { + s.assignTasks(responses, requestFails, cancel) + } + // Wait for something to happen + select { + case event := <-peering: + // A peer joined or left, the tasks queue and allocations need to be + // checked for potential assignment or reassignment + peerid := event.peer.id + if event.join { + log.Debug("Joining skeleton peer", "id", peerid) + s.idles[peerid] = event.peer + } else { + log.Debug("Leaving skeleton peer", "id", peerid) + s.revertRequests(peerid) + delete(s.idles, peerid) + } + + case errc := <-s.terminate: + errc <- nil + return nil, errTerminated + + case event := <-s.headEvents: + // New head was announced, try to integrate it. If successful, nothing + // needs to be done as the head simply extended the last range. For now + // we don't seamlessly integrate reorgs to keep things simple. If the + // network starts doing many mini reorgs, it might be worthwhile handling + // a limited depth without an error. + if reorged := s.processNewHead(event.header, event.force); reorged { + // If a reorg is needed, and we're forcing the new head, signal + // the syncer to tear down and start over. Otherwise, drop the + // non-force reorg. + if event.force { + event.errc <- nil // forced head reorg accepted + return event.header, errSyncReorged + } + event.errc <- errReorgDenied + continue + } + event.errc <- nil // head extension accepted + + // New head was integrated into the skeleton chain. If the backfiller + // is still running, it will pick it up. If it already terminated, + // a new cycle needs to be spun up. + if linked { + s.filler.resume() + } + + case req := <-requestFails: + s.revertRequest(req) + + case res := <-responses: + // Process the batch of headers. If though processing we managed to + // link the current subchain to a previously downloaded one, abort the + // sync and restart with the merged subchains. + // + // If we managed to link to the existing local chain or genesis block, + // abort sync altogether. + linked, merged := s.processResponse(res) + if linked { + log.Debug("Beacon sync linked to local chain") + return nil, errSyncLinked + } + if merged { + log.Debug("Beacon sync merged subchains") + return nil, errSyncMerged + } + // We still have work to do, loop and repeat + } + } +} + +// initSync attempts to get the skeleton sync into a consistent state wrt any +// past state on disk and the newly requested head to sync to. If the new head +// is nil, the method will return and continue from the previous head. +func (s *skeleton) initSync(head *types.Header) { + // Extract the head number, we'll need it all over + number := head.Number.Uint64() + + // Retrieve the previously saved sync progress + if status := rawdb.ReadSkeletonSyncStatus(s.db); len(status) > 0 { + s.progress = new(skeletonProgress) + if err := json.Unmarshal(status, s.progress); err != nil { + log.Error("Failed to decode skeleton sync status", "err", err) + } else { + // Previous sync was available, print some continuation logs + for _, subchain := range s.progress.Subchains { + log.Debug("Restarting skeleton subchain", "head", subchain.Head, "tail", subchain.Tail) + } + // Create a new subchain for the head (unless the last can be extended), + // trimming anything it would overwrite + headchain := &subchain{ + Head: number, + Tail: number, + Next: head.ParentHash, + } + for len(s.progress.Subchains) > 0 { + // If the last chain is above the new head, delete altogether + lastchain := s.progress.Subchains[0] + if lastchain.Tail >= headchain.Tail { + log.Debug("Dropping skeleton subchain", "head", lastchain.Head, "tail", lastchain.Tail) + s.progress.Subchains = s.progress.Subchains[1:] + continue + } + // Otherwise truncate the last chain if needed and abort trimming + if lastchain.Head >= headchain.Tail { + log.Debug("Trimming skeleton subchain", "oldhead", lastchain.Head, "newhead", headchain.Tail-1, "tail", lastchain.Tail) + lastchain.Head = headchain.Tail - 1 + } + break + } + // If the last subchain can be extended, we're lucky. Otherwise, create + // a new subchain sync task. + var extended bool + if n := len(s.progress.Subchains); n > 0 { + lastchain := s.progress.Subchains[0] + if lastchain.Head == headchain.Tail-1 { + lasthead := rawdb.ReadSkeletonHeader(s.db, lastchain.Head) + if lasthead.Hash() == head.ParentHash { + log.Debug("Extended skeleton subchain with new head", "head", headchain.Tail, "tail", lastchain.Tail) + lastchain.Head = headchain.Tail + extended = true + } + } + } + if !extended { + log.Debug("Created new skeleton subchain", "head", number, "tail", number) + s.progress.Subchains = append([]*subchain{headchain}, s.progress.Subchains...) + } + // Update the database with the new sync stats and insert the new + // head header. We won't delete any trimmed skeleton headers since + // those will be outside the index space of the many subchains and + // the database space will be reclaimed eventually when processing + // blocks above the current head (TODO(karalabe): don't forget). + batch := s.db.NewBatch() + + rawdb.WriteSkeletonHeader(batch, head) + s.saveSyncStatus(batch) + + if err := batch.Write(); err != nil { + log.Crit("Failed to write skeleton sync status", "err", err) + } + return + } + } + // Either we've failed to decode the previus state, or there was none. Start + // a fresh sync with a single subchain represented by the currently sent + // chain head. + s.progress = &skeletonProgress{ + Subchains: []*subchain{ + { + Head: number, + Tail: number, + Next: head.ParentHash, + }, + }, + } + batch := s.db.NewBatch() + + rawdb.WriteSkeletonHeader(batch, head) + s.saveSyncStatus(batch) + + if err := batch.Write(); err != nil { + log.Crit("Failed to write initial skeleton sync status", "err", err) + } + log.Debug("Created initial skeleton subchain", "head", number, "tail", number) +} + +// saveSyncStatus marshals the remaining sync tasks into leveldb. +func (s *skeleton) saveSyncStatus(db ethdb.KeyValueWriter) { + status, err := json.Marshal(s.progress) + if err != nil { + panic(err) // This can only fail during implementation + } + rawdb.WriteSkeletonSyncStatus(db, status) +} + +// processNewHead does the internal shuffling for a new head marker and either +// accepts and integrates it into the skeleton or requests a reorg. Upon reorg, +// the syncer will tear itself down and restart with a fresh head. It is simpler +// to reconstruct the sync state than to mutate it and hope for the best. +func (s *skeleton) processNewHead(head *types.Header, force bool) bool { + // If the header cannot be inserted without interruption, return an error for + // the outer loop to tear down the skeleton sync and restart it + number := head.Number.Uint64() + + lastchain := s.progress.Subchains[0] + if lastchain.Tail >= number { + // If the chain is down to a single beacon header, and it is re-announced + // once more, ignore it instead of tearing down sync for a noop. + if lastchain.Head == lastchain.Tail { + if current := rawdb.ReadSkeletonHeader(s.db, number); current.Hash() == head.Hash() { + return false + } + } + // Not a noop / double head announce, abort with a reorg + if force { + log.Warn("Beacon chain reorged", "tail", lastchain.Tail, "head", lastchain.Head, "newHead", number) + } + return true + } + if lastchain.Head+1 < number { + if force { + log.Warn("Beacon chain gapped", "head", lastchain.Head, "newHead", number) + } + return true + } + if parent := rawdb.ReadSkeletonHeader(s.db, number-1); parent.Hash() != head.ParentHash { + if force { + log.Warn("Beacon chain forked", "ancestor", parent.Number, "hash", parent.Hash(), "want", head.ParentHash) + } + return true + } + // New header seems to be in the last subchain range. Unwind any extra headers + // from the chain tip and insert the new head. We won't delete any trimmed + // skeleton headers since those will be outside the index space of the many + // subchains and the database space will be reclaimed eventually when processing + // blocks above the current head (TODO(karalabe): don't forget). + batch := s.db.NewBatch() + + rawdb.WriteSkeletonHeader(batch, head) + lastchain.Head = number + s.saveSyncStatus(batch) + + if err := batch.Write(); err != nil { + log.Crit("Failed to write skeleton sync status", "err", err) + } + return false +} + +// assignTasks attempts to match idle peers to pending header retrievals. +func (s *skeleton) assignTasks(success chan *headerResponse, fail chan *headerRequest, cancel chan struct{}) { + // Sort the peers by download capacity to use faster ones if many available + idlers := &peerCapacitySort{ + peers: make([]*peerConnection, 0, len(s.idles)), + caps: make([]int, 0, len(s.idles)), + } + targetTTL := s.peers.rates.TargetTimeout() + for _, peer := range s.idles { + idlers.peers = append(idlers.peers, peer) + idlers.caps = append(idlers.caps, s.peers.rates.Capacity(peer.id, eth.BlockHeadersMsg, targetTTL)) + } + if len(idlers.peers) == 0 { + return + } + sort.Sort(idlers) + + // Find header regions not yet downloading and fill them + for task, owner := range s.scratchOwners { + // If we're out of idle peers, stop assigning tasks + if len(idlers.peers) == 0 { + return + } + // Skip any tasks already filling + if owner != "" { + continue + } + // If we've reached the genesis, stop assigning tasks + if uint64(task*requestHeaders) >= s.scratchHead { + return + } + // Found a task and have peers available, assign it + idle := idlers.peers[0] + + idlers.peers = idlers.peers[1:] + idlers.caps = idlers.caps[1:] + + // Matched a pending task to an idle peer, allocate a unique request id + var reqid uint64 + for { + reqid = uint64(rand.Int63()) + if reqid == 0 { + continue + } + if _, ok := s.requests[reqid]; ok { + continue + } + break + } + // Generate the network query and send it to the peer + req := &headerRequest{ + peer: idle.id, + id: reqid, + deliver: success, + revert: fail, + cancel: cancel, + stale: make(chan struct{}), + head: s.scratchHead - uint64(task*requestHeaders), + } + s.requests[reqid] = req + delete(s.idles, idle.id) + + // Generate the network query and send it to the peer + go s.executeTask(idle, req) + + // Inject the request into the task to block further assignments + s.scratchOwners[task] = idle.id + } +} + +// executeTask executes a single fetch request, blocking until either a result +// arrives or a timeouts / cancellation is triggered. The method should be run +// on its own goroutine and will deliver on the requested channels. +func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) { + start := time.Now() + resCh := make(chan *eth.Response) + + // Figure out how many headers to fetch. Usually this will be a full batch, + // but for the very tail of the chain, trim the request to the number left. + // Since nodes may or may not return the genesis header for a batch request, + // don't even request it. The parent hash of block #1 is enough to link. + requestCount := requestHeaders + if req.head < requestHeaders { + requestCount = int(req.head) + } + peer.log.Trace("Fetching skeleton headers", "from", req.head, "count", requestCount) + netreq, err := peer.peer.RequestHeadersByNumber(req.head, requestCount, 0, true, resCh) + if err != nil { + peer.log.Trace("Failed to request headers", "err", err) + s.scheduleRevertRequest(req) + return + } + defer netreq.Close() + + // Wait until the response arrives, the request is cancelled or times out + ttl := s.peers.rates.TargetTimeout() + + timeoutTimer := time.NewTimer(ttl) + defer timeoutTimer.Stop() + + select { + case <-req.cancel: + peer.log.Debug("Header request cancelled") + s.scheduleRevertRequest(req) + + case <-timeoutTimer.C: + // Header retrieval timed out, update the metrics + peer.log.Warn("Header request timed out, dropping peer", "elapsed", ttl) + headerTimeoutMeter.Mark(1) + s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, 0, 0) + s.scheduleRevertRequest(req) + + // At this point we either need to drop the offending peer, or we need a + // mechanism to allow waiting for the response and not cancel it. For now + // lets go with dropping since the header sizes are deterministic and the + // beacon sync runs exclusive (downloader is idle) so there should be no + // other load to make timeouts probable. If we notice that timeouts happen + // more often than we'd like, we can introduce a tracker for the requests + // gone stale and monitor them. However, in that case too, we need a way + // to protect against malicious peers never responding, so it would need + // a second, hard-timeout mechanism. + s.drop(peer.id) + + case res := <-resCh: + // Headers successfully retrieved, update the metrics + headers := *res.Res.(*eth.BlockHeadersPacket) + + headerReqTimer.Update(time.Since(start)) + s.peers.rates.Update(peer.id, eth.BlockHeadersMsg, res.Time, len(headers)) + + // Cross validate the headers with the requests + switch { + case len(headers) == 0: + // No headers were delivered, reject the response and reschedule + peer.log.Debug("No headers delivered") + res.Done <- errors.New("no headers delivered") + s.scheduleRevertRequest(req) + + case headers[0].Number.Uint64() != req.head: + // Header batch anchored at non-requested number + peer.log.Debug("Invalid header response head", "have", headers[0].Number, "want", req.head) + res.Done <- errors.New("invalid header batch anchor") + s.scheduleRevertRequest(req) + + case req.head >= requestHeaders && len(headers) != requestHeaders: + // Invalid number of non-genesis headers delivered, reject the response and reschedule + peer.log.Debug("Invalid non-genesis header count", "have", len(headers), "want", requestHeaders) + res.Done <- errors.New("not enough non-genesis headers delivered") + s.scheduleRevertRequest(req) + + case req.head < requestHeaders && uint64(len(headers)) != req.head: + // Invalid number of genesis headers delivered, reject the response and reschedule + peer.log.Debug("Invalid genesis header count", "have", len(headers), "want", headers[0].Number.Uint64()) + res.Done <- errors.New("not enough genesis headers delivered") + s.scheduleRevertRequest(req) + + default: + // Packet seems structurally valid, check hash progression and if it + // is correct too, deliver for storage + for i := 0; i < len(headers)-1; i++ { + if headers[i].ParentHash != headers[i+1].Hash() { + peer.log.Debug("Invalid hash progression", "index", i, "wantparenthash", headers[i].ParentHash, "haveparenthash", headers[i+1].Hash()) + res.Done <- errors.New("invalid hash progression") + s.scheduleRevertRequest(req) + return + } + } + // Hash chain is valid. The delivery might still be junk as we're + // downloading batches concurrently (so no way to link the headers + // until gaps are filled); in that case, we'll nuke the peer when + // we detect the fault. + res.Done <- nil + + select { + case req.deliver <- &headerResponse{ + peer: peer, + reqid: req.id, + headers: headers, + }: + case <-req.cancel: + } + } + } +} + +// revertRequests locates all the currently pending reuqests from a particular +// peer and reverts them, rescheduling for others to fulfill. +func (s *skeleton) revertRequests(peer string) { + // Gather the requests first, revertals need the lock too + var requests []*headerRequest + for _, req := range s.requests { + if req.peer == peer { + requests = append(requests, req) + } + } + // Revert all the requests matching the peer + for _, req := range requests { + s.revertRequest(req) + } +} + +// scheduleRevertRequest asks the event loop to clean up a request and return +// all failed retrieval tasks to the scheduler for reassignment. +func (s *skeleton) scheduleRevertRequest(req *headerRequest) { + select { + case req.revert <- req: + // Sync event loop notified + case <-req.cancel: + // Sync cycle got cancelled + case <-req.stale: + // Request already reverted + } +} + +// revertRequest cleans up a request and returns all failed retrieval tasks to +// the scheduler for reassignment. +// +// Note, this needs to run on the event runloop thread to reschedule to idle peers. +// On peer threads, use scheduleRevertRequest. +func (s *skeleton) revertRequest(req *headerRequest) { + log.Trace("Reverting header request", "peer", req.peer, "reqid", req.id) + select { + case <-req.stale: + log.Trace("Header request already reverted", "peer", req.peer, "reqid", req.id) + return + default: + } + close(req.stale) + + // Remove the request from the tracked set + delete(s.requests, req.id) + + // Remove the request from the tracked set and mark the task as not-pending, + // ready for resheduling + s.scratchOwners[(s.scratchHead-req.head)/requestHeaders] = "" +} + +func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged bool) { + res.peer.log.Trace("Processing header response", "head", res.headers[0].Number, "hash", res.headers[0].Hash(), "count", len(res.headers)) + + // Whether the response is valid, we can mark the peer as idle and notify + // the scheduler to assign a new task. If the response is invalid, we'll + // drop the peer in a bit. + s.idles[res.peer.id] = res.peer + + // Ensure the response is for a valid request + if _, ok := s.requests[res.reqid]; !ok { + // Some internal accounting is broken. A request either times out or it + // gets fulfilled successfully. It should not be possible to deliver a + // response to a non-existing request. + res.peer.log.Error("Unexpected header packet") + return false, false + } + delete(s.requests, res.reqid) + + // Insert the delivered headers into the scratch space independent of the + // content or continuation; those will be validated in a moment + head := res.headers[0].Number.Uint64() + copy(s.scratchSpace[s.scratchHead-head:], res.headers) + + // If there's still a gap in the head of the scratch space, abort + if s.scratchSpace[0] == nil { + return false, false + } + // Try to consume any head headers, validating the boundary conditions + batch := s.db.NewBatch() + for s.scratchSpace[0] != nil { + // Next batch of headers available, cross-reference with the subchain + // we are extending and either accept or discard + if s.progress.Subchains[0].Next != s.scratchSpace[0].Hash() { + // Print a log messages to track what's going on + tail := s.progress.Subchains[0].Tail + want := s.progress.Subchains[0].Next + have := s.scratchSpace[0].Hash() + + log.Warn("Invalid skeleton headers", "peer", s.scratchOwners[0], "number", tail-1, "want", want, "have", have) + + // The peer delivered junk, or at least not the subchain we are + // syncing to. Free up the scratch space and assignment, reassign + // and drop the original peer. + for i := 0; i < requestHeaders; i++ { + s.scratchSpace[i] = nil + } + s.drop(s.scratchOwners[0]) + s.scratchOwners[0] = "" + break + } + // Scratch delivery matches required subchain, deliver the batch of + // headers and push the subchain forward + var consumed int + for _, header := range s.scratchSpace[:requestHeaders] { + if header != nil { // nil when the genesis is reached + consumed++ + + rawdb.WriteSkeletonHeader(batch, header) + s.pulled++ + + s.progress.Subchains[0].Tail-- + s.progress.Subchains[0].Next = header.ParentHash + + // If we've reached an existing block in the chain, stop retrieving + // headers. Note, if we want to support light clients with the same + // code we'd need to switch here based on the downloader mode. That + // said, there's no such functionality for now, so don't complicate. + // + // In the case of full sync it would be enough to check for the body, + // but even a full syncing node will generate a receipt once block + // processing is done, so it's just one more "needless" check. + // + // The weird cascading checks are done to minimize the database reads. + linked = rawdb.HasHeader(s.db, header.ParentHash, header.Number.Uint64()-1) && + rawdb.HasBody(s.db, header.ParentHash, header.Number.Uint64()-1) && + rawdb.HasReceipts(s.db, header.ParentHash, header.Number.Uint64()-1) + if linked { + break + } + } + } + head := s.progress.Subchains[0].Head + tail := s.progress.Subchains[0].Tail + next := s.progress.Subchains[0].Next + + log.Trace("Primary subchain extended", "head", head, "tail", tail, "next", next) + + // If the beacon chain was linked to the local chain, completely swap out + // all internal progress and abort header synchronization. + if linked { + // Linking into the local chain should also mean that there are no + // leftover subchains, but in the case of importing the blocks via + // the engine API, we will not push the subchains forward. This will + // lead to a gap between an old sync cycle and a future one. + if subchains := len(s.progress.Subchains); subchains > 1 { + switch { + // If there are only 2 subchains - the current one and an older + // one - and the old one consists of a single block, then it's + // the expected new sync cycle after some propagated blocks. Log + // it for debugging purposes, explicitly clean and don't escalate. + case subchains == 2 && s.progress.Subchains[1].Head == s.progress.Subchains[1].Tail: + // Remove the leftover skeleton header associated with old + // skeleton chain only if it's not covered by the current + // skeleton range. + if s.progress.Subchains[1].Head < s.progress.Subchains[0].Tail { + log.Debug("Cleaning previous beacon sync state", "head", s.progress.Subchains[1].Head) + rawdb.DeleteSkeletonHeader(batch, s.progress.Subchains[1].Head) + } + // Drop the leftover skeleton chain since it's stale. + s.progress.Subchains = s.progress.Subchains[:1] + + // If we have more than one header or more than one leftover chain, + // the syncer's internal state is corrupted. Do try to fix it, but + // be very vocal about the fault. + default: + var context []interface{} + + for i := range s.progress.Subchains[1:] { + context = append(context, fmt.Sprintf("stale_head_%d", i+1)) + context = append(context, s.progress.Subchains[i+1].Head) + context = append(context, fmt.Sprintf("stale_tail_%d", i+1)) + context = append(context, s.progress.Subchains[i+1].Tail) + context = append(context, fmt.Sprintf("stale_next_%d", i+1)) + context = append(context, s.progress.Subchains[i+1].Next) + } + log.Error("Cleaning spurious beacon sync leftovers", context...) + s.progress.Subchains = s.progress.Subchains[:1] + + // Note, here we didn't actually delete the headers at all, + // just the metadata. We could implement a cleanup mechanism, + // but further modifying corrupted state is kind of asking + // for it. Unless there's a good enough reason to risk it, + // better to live with the small database junk. + } + } + break + } + // Batch of headers consumed, shift the download window forward + copy(s.scratchSpace, s.scratchSpace[requestHeaders:]) + for i := 0; i < requestHeaders; i++ { + s.scratchSpace[scratchHeaders-i-1] = nil + } + copy(s.scratchOwners, s.scratchOwners[1:]) + s.scratchOwners[scratchHeaders/requestHeaders-1] = "" + + s.scratchHead -= uint64(consumed) + + // If the subchain extended into the next subchain, we need to handle + // the overlap. Since there could be many overlaps (come on), do this + // in a loop. + for len(s.progress.Subchains) > 1 && s.progress.Subchains[1].Head >= s.progress.Subchains[0].Tail { + // Extract some stats from the second subchain + head := s.progress.Subchains[1].Head + tail := s.progress.Subchains[1].Tail + next := s.progress.Subchains[1].Next + + // Since we just overwrote part of the next subchain, we need to trim + // its head independent of matching or mismatching content + if s.progress.Subchains[1].Tail >= s.progress.Subchains[0].Tail { + // Fully overwritten, get rid of the subchain as a whole + log.Debug("Previous subchain fully overwritten", "head", head, "tail", tail, "next", next) + s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...) + continue + } else { + // Partially overwritten, trim the head to the overwritten size + log.Debug("Previous subchain partially overwritten", "head", head, "tail", tail, "next", next) + s.progress.Subchains[1].Head = s.progress.Subchains[0].Tail - 1 + } + // If the old subchain is an extension of the new one, merge the two + // and let the skeleton syncer restart (to clean internal state) + if rawdb.ReadSkeletonHeader(s.db, s.progress.Subchains[1].Head).Hash() == s.progress.Subchains[0].Next { + log.Debug("Previous subchain merged", "head", head, "tail", tail, "next", next) + s.progress.Subchains[0].Tail = s.progress.Subchains[1].Tail + s.progress.Subchains[0].Next = s.progress.Subchains[1].Next + + s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...) + merged = true + } + } + // If subchains were merged, all further available headers in the scratch + // space are invalid since we skipped ahead. Stop processing the scratch + // space to avoid dropping peers thinking they delivered invalid data. + if merged { + break + } + } + s.saveSyncStatus(batch) + if err := batch.Write(); err != nil { + log.Crit("Failed to write skeleton headers and progress", "err", err) + } + // Print a progress report making the UX a bit nicer + left := s.progress.Subchains[0].Tail - 1 + if linked { + left = 0 + } + if time.Since(s.logged) > 8*time.Second || left == 0 { + s.logged = time.Now() + + if s.pulled == 0 { + log.Info("Beacon sync starting", "left", left) + } else { + eta := float64(time.Since(s.started)) / float64(s.pulled) * float64(left) + log.Info("Syncing beacon headers", "downloaded", s.pulled, "left", left, "eta", common.PrettyDuration(eta)) + } + } + return linked, merged +} + +// cleanStales removes previously synced beacon headers that have become stale +// due to the downloader backfilling past the tracked tail. +func (s *skeleton) cleanStales(filled *types.Header) error { + number := filled.Number.Uint64() + log.Trace("Cleaning stale beacon headers", "filled", number, "hash", filled.Hash()) + + // If the filled header is below the linked subchain, something's + // corrupted internally. Report and error and refuse to do anything. + if number < s.progress.Subchains[0].Tail { + return fmt.Errorf("filled header below beacon header tail: %d < %d", number, s.progress.Subchains[0].Tail) + } + // Subchain seems trimmable, push the tail forward up to the last + // filled header and delete everything before it - if available. In + // case we filled past the head, recreate the subchain with a new + // head to keep it consistent with the data on disk. + var ( + start = s.progress.Subchains[0].Tail // start deleting from the first known header + end = number // delete until the requested threshold + batch = s.db.NewBatch() + ) + s.progress.Subchains[0].Tail = number + s.progress.Subchains[0].Next = filled.ParentHash + + if s.progress.Subchains[0].Head < number { + // If more headers were filled than available, push the entire + // subchain forward to keep tracking the node's block imports + end = s.progress.Subchains[0].Head + 1 // delete the entire original range, including the head + s.progress.Subchains[0].Head = number // assign a new head (tail is already assigned to this) + + // The entire original skeleton chain was deleted and a new one + // defined. Make sure the new single-header chain gets pushed to + // disk to keep internal state consistent. + rawdb.WriteSkeletonHeader(batch, filled) + } + // Execute the trimming and the potential rewiring of the progress + s.saveSyncStatus(batch) + for n := start; n < end; n++ { + // If the batch grew too big, flush it and continue with a new batch. + // The catch is that the sync metadata needs to reflect the actually + // flushed state, so temporarily change the subchain progress and + // revert after the flush. + if batch.ValueSize() >= ethdb.IdealBatchSize { + tmpTail := s.progress.Subchains[0].Tail + tmpNext := s.progress.Subchains[0].Next + + s.progress.Subchains[0].Tail = n + s.progress.Subchains[0].Next = rawdb.ReadSkeletonHeader(s.db, n).ParentHash + s.saveSyncStatus(batch) + + if err := batch.Write(); err != nil { + log.Crit("Failed to write beacon trim data", "err", err) + } + batch.Reset() + + s.progress.Subchains[0].Tail = tmpTail + s.progress.Subchains[0].Next = tmpNext + s.saveSyncStatus(batch) + } + rawdb.DeleteSkeletonHeader(batch, n) + } + if err := batch.Write(); err != nil { + log.Crit("Failed to write beacon trim data", "err", err) + } + return nil +} + +// Bounds retrieves the current head and tail tracked by the skeleton syncer. +// This method is used by the backfiller, whose life cycle is controlled by the +// skeleton syncer. +// +// Note, the method will not use the internal state of the skeleton, but will +// rather blindly pull stuff from the database. This is fine, because the back- +// filler will only run when the skeleton chain is fully downloaded and stable. +// There might be new heads appended, but those are atomic from the perspective +// of this method. Any head reorg will first tear down the backfiller and only +// then make the modification. +func (s *skeleton) Bounds() (head *types.Header, tail *types.Header, err error) { + // Read the current sync progress from disk and figure out the current head. + // Although there's a lot of error handling here, these are mostly as sanity + // checks to avoid crashing if a programming error happens. These should not + // happen in live code. + status := rawdb.ReadSkeletonSyncStatus(s.db) + if len(status) == 0 { + return nil, nil, errors.New("beacon sync not yet started") + } + progress := new(skeletonProgress) + if err := json.Unmarshal(status, progress); err != nil { + return nil, nil, err + } + head = rawdb.ReadSkeletonHeader(s.db, progress.Subchains[0].Head) + if head == nil { + return nil, nil, fmt.Errorf("head skeleton header %d is missing", progress.Subchains[0].Head) + } + tail = rawdb.ReadSkeletonHeader(s.db, progress.Subchains[0].Tail) + if tail == nil { + return nil, nil, fmt.Errorf("tail skeleton header %d is missing", progress.Subchains[0].Tail) + } + return head, tail, nil +} + +// Header retrieves a specific header tracked by the skeleton syncer. This method +// is meant to be used by the backfiller, whose life cycle is controlled by the +// skeleton syncer. +// +// Note, outside the permitted runtimes, this method might return nil results and +// subsequent calls might return headers from different chains. +func (s *skeleton) Header(number uint64) *types.Header { + return rawdb.ReadSkeletonHeader(s.db, number) +} diff --git a/eth/downloader/skeleton_test.go b/eth/downloader/skeleton_test.go new file mode 100644 index 000000000..8515864c1 --- /dev/null +++ b/eth/downloader/skeleton_test.go @@ -0,0 +1,977 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "os" + "sync/atomic" + "testing" + "time" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/eth/protocols/eth" + "github.com/scroll-tech/go-ethereum/log" +) + +// hookedBackfiller is a tester backfiller with all interface methods mocked and +// hooked so tests can implement only the things they need. +type hookedBackfiller struct { + // suspendHook is an optional hook to be called when the filler is requested + // to be suspended. + suspendHook func() *types.Header + + // resumeHook is an optional hook to be called when the filler is requested + // to be resumed. + resumeHook func() +} + +// newHookedBackfiller creates a hooked backfiller with all callbacks disabled, +// essentially acting as a noop. +func newHookedBackfiller() backfiller { + return new(hookedBackfiller) +} + +// suspend requests the backfiller to abort any running full or snap sync +// based on the skeleton chain as it might be invalid. The backfiller should +// gracefully handle multiple consecutive suspends without a resume, even +// on initial sartup. +func (hf *hookedBackfiller) suspend() *types.Header { + if hf.suspendHook != nil { + return hf.suspendHook() + } + return nil // we don't really care about header cleanups for now +} + +// resume requests the backfiller to start running fill or snap sync based on +// the skeleton chain as it has successfully been linked. Appending new heads +// to the end of the chain will not result in suspend/resume cycles. +func (hf *hookedBackfiller) resume() { + if hf.resumeHook != nil { + hf.resumeHook() + } +} + +// skeletonTestPeer is a mock peer that can only serve header requests from a +// pre-perated header chain (which may be arbitrarily wrong for testing). +// +// Requesting anything else from these peers will hard panic. Note, do *not* +// implement any other methods. We actually want to make sure that the skeleton +// syncer only depends on - and will only ever do so - on header requests. +type skeletonTestPeer struct { + id string // Unique identifier of the mock peer + headers []*types.Header // Headers to serve when requested + + serve func(origin uint64) []*types.Header // Hook to allow custom responses + + served uint64 // Number of headers served by this peer + dropped uint64 // Flag whether the peer was dropped (stop responding) +} + +// newSkeletonTestPeer creates a new mock peer to test the skeleton sync with. +func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer { + return &skeletonTestPeer{ + id: id, + headers: headers, + } +} + +// newSkeletonTestPeer creates a new mock peer to test the skeleton sync with, +// and sets an optional serve hook that can return headers for delivery instead +// of the predefined chain. Useful for emulating malicious behavior that would +// otherwise require dedicated peer types. +func newSkeletonTestPeerWithHook(id string, headers []*types.Header, serve func(origin uint64) []*types.Header) *skeletonTestPeer { + return &skeletonTestPeer{ + id: id, + headers: headers, + serve: serve, + } +} + +// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered +// origin; associated with a particular peer in the download tester. The returned +// function can be used to retrieve batches of headers from the particular peer. +func (p *skeletonTestPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { + // Since skeleton test peer are in-memory mocks, dropping the does not make + // them inaccepssible. As such, check a local `dropped` field to see if the + // peer has been dropped and should not respond any more. + if atomic.LoadUint64(&p.dropped) != 0 { + return nil, errors.New("peer already dropped") + } + // Skeleton sync retrieves batches of headers going backward without gaps. + // This ensures we can follow a clean parent progression without any reorg + // hiccups. There is no need for any other type of header retrieval, so do + // panic if there's such a request. + if !reverse || skip != 0 { + // Note, if other clients want to do these kinds of requests, it's their + // problem, it will still work. We just don't want *us* making complicated + // requests without a very strong reason to. + panic(fmt.Sprintf("invalid header retrieval: reverse %v, want true; skip %d, want 0", reverse, skip)) + } + // If the skeleton syncer requests the genesis block, panic. Whilst it could + // be considered a valid request, our code specifically should not request it + // ever since we want to link up headers to an existing local chain, which at + // worse will be the genesis. + if int64(origin)-int64(amount) < 0 { + panic(fmt.Sprintf("headers requested before (or at) genesis: origin %d, amount %d", origin, amount)) + } + // To make concurrency easier, the skeleton syncer always requests fixed size + // batches of headers. Panic if the peer is requested an amount other than the + // configured batch size (apart from the request leading to the genesis). + if amount > requestHeaders || (amount < requestHeaders && origin > uint64(amount)) { + panic(fmt.Sprintf("non-chunk size header batch requested: requested %d, want %d, origin %d", amount, requestHeaders, origin)) + } + // Simple reverse header retrieval. Fill from the peer's chain and return. + // If the tester has a serve hook set, try to use that before falling back + // to the default behavior. + var headers []*types.Header + if p.serve != nil { + headers = p.serve(origin) + } + if headers == nil { + headers = make([]*types.Header, 0, amount) + if len(p.headers) > int(origin) { // Don't serve headers if we're missing the origin + for i := 0; i < amount; i++ { + // Consider nil headers as a form of attack and withhold them. Nil + // cannot be decoded from RLP, so it's not possible to produce an + // attack by sending/receiving those over eth. + header := p.headers[int(origin)-i] + if header == nil { + continue + } + headers = append(headers, header) + } + } + } + atomic.AddUint64(&p.served, uint64(len(headers))) + + hashes := make([]common.Hash, len(headers)) + for i, header := range headers { + hashes[i] = header.Hash() + } + // Deliver the headers to the downloader + req := ð.Request{ + Peer: p.id, + } + res := ð.Response{ + Req: req, + Res: (*eth.BlockHeadersPacket)(&headers), + Meta: hashes, + Time: 1, + Done: make(chan error), + } + go func() { + sink <- res + if err := <-res.Done; err != nil { + log.Warn("Skeleton test peer response rejected", "err", err) + atomic.AddUint64(&p.dropped, 1) + } + }() + return req, nil +} + +func (p *skeletonTestPeer) Head() (common.Hash, *big.Int) { + panic("skeleton sync must not request the remote head") +} + +func (p *skeletonTestPeer) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) { + panic("skeleton sync must not request headers by hash") +} + +func (p *skeletonTestPeer) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) { + panic("skeleton sync must not request block bodies") +} + +func (p *skeletonTestPeer) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) { + panic("skeleton sync must not request receipts") +} + +// Tests various sync initialzations based on previous leftovers in the database +// and announced heads. +func TestSkeletonSyncInit(t *testing.T) { + // Create a few key headers + var ( + genesis = &types.Header{Number: big.NewInt(0)} + block49 = &types.Header{Number: big.NewInt(49)} + block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")} + block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()} + ) + tests := []struct { + headers []*types.Header // Database content (beside the genesis) + oldstate []*subchain // Old sync state with various interrupted subchains + head *types.Header // New head header to announce to reorg to + newstate []*subchain // Expected sync state after the reorg + }{ + // Completely empty database with only the genesis set. The sync is expected + // to create a single subchain with the requested head. + { + head: block50, + newstate: []*subchain{{Head: 50, Tail: 50}}, + }, + // Empty database with only the genesis set with a leftover empty sync + // progess. This is a synthetic case, just for the sake of covering things. + { + oldstate: []*subchain{}, + head: block50, + newstate: []*subchain{{Head: 50, Tail: 50}}, + }, + // A single leftover subchain is present, older than the new head. The + // old subchain should be left as is and a new one appended to the sync + // status. + { + oldstate: []*subchain{{Head: 10, Tail: 5}}, + head: block50, + newstate: []*subchain{ + {Head: 50, Tail: 50}, + {Head: 10, Tail: 5}, + }, + }, + // Multiple leftover subchains are present, older than the new head. The + // old subchains should be left as is and a new one appended to the sync + // status. + { + oldstate: []*subchain{ + {Head: 20, Tail: 15}, + {Head: 10, Tail: 5}, + }, + head: block50, + newstate: []*subchain{ + {Head: 50, Tail: 50}, + {Head: 20, Tail: 15}, + {Head: 10, Tail: 5}, + }, + }, + // A single leftover subchain is present, newer than the new head. The + // newer subchain should be deleted and a fresh one created for the head. + { + oldstate: []*subchain{{Head: 65, Tail: 60}}, + head: block50, + newstate: []*subchain{{Head: 50, Tail: 50}}, + }, + // Multiple leftover subchain is present, newer than the new head. The + // newer subchains should be deleted and a fresh one created for the head. + { + oldstate: []*subchain{ + {Head: 75, Tail: 70}, + {Head: 65, Tail: 60}, + }, + head: block50, + newstate: []*subchain{{Head: 50, Tail: 50}}, + }, + + // Two leftover subchains are present, one fully older and one fully + // newer than the announced head. The head should delete the newer one, + // keeping the older one. + { + oldstate: []*subchain{ + {Head: 65, Tail: 60}, + {Head: 10, Tail: 5}, + }, + head: block50, + newstate: []*subchain{ + {Head: 50, Tail: 50}, + {Head: 10, Tail: 5}, + }, + }, + // Multiple leftover subchains are present, some fully older and some + // fully newer than the announced head. The head should delete the newer + // ones, keeping the older ones. + { + oldstate: []*subchain{ + {Head: 75, Tail: 70}, + {Head: 65, Tail: 60}, + {Head: 20, Tail: 15}, + {Head: 10, Tail: 5}, + }, + head: block50, + newstate: []*subchain{ + {Head: 50, Tail: 50}, + {Head: 20, Tail: 15}, + {Head: 10, Tail: 5}, + }, + }, + // A single leftover subchain is present and the new head is extending + // it with one more header. We expect the subchain head to be pushed + // forward. + { + headers: []*types.Header{block49}, + oldstate: []*subchain{{Head: 49, Tail: 5}}, + head: block50, + newstate: []*subchain{{Head: 50, Tail: 5}}, + }, + // A single leftover subchain is present and although the new head does + // extend it number wise, the hash chain does not link up. We expect a + // new subchain to be created for the dangling head. + { + headers: []*types.Header{block49B}, + oldstate: []*subchain{{Head: 49, Tail: 5}}, + head: block50, + newstate: []*subchain{ + {Head: 50, Tail: 50}, + {Head: 49, Tail: 5}, + }, + }, + // A single leftover subchain is present. A new head is announced that + // links into the middle of it, correctly anchoring into an existing + // header. We expect the old subchain to be truncated and extended with + // the new head. + { + headers: []*types.Header{block49}, + oldstate: []*subchain{{Head: 100, Tail: 5}}, + head: block50, + newstate: []*subchain{{Head: 50, Tail: 5}}, + }, + // A single leftover subchain is present. A new head is announced that + // links into the middle of it, but does not anchor into an existing + // header. We expect the old subchain to be truncated and a new chain + // be created for the dangling head. + { + headers: []*types.Header{block49B}, + oldstate: []*subchain{{Head: 100, Tail: 5}}, + head: block50, + newstate: []*subchain{ + {Head: 50, Tail: 50}, + {Head: 49, Tail: 5}, + }, + }, + } + for i, tt := range tests { + // Create a fresh database and initialize it with the starting state + db := rawdb.NewMemoryDatabase() + + rawdb.WriteHeader(db, genesis) + for _, header := range tt.headers { + rawdb.WriteSkeletonHeader(db, header) + } + if tt.oldstate != nil { + blob, _ := json.Marshal(&skeletonProgress{Subchains: tt.oldstate}) + rawdb.WriteSkeletonSyncStatus(db, blob) + } + // Create a skeleton sync and run a cycle + wait := make(chan struct{}) + + skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) + skeleton.syncStarting = func() { close(wait) } + skeleton.Sync(tt.head, true) + + <-wait + skeleton.Terminate() + + // Ensure the correct resulting sync status + var progress skeletonProgress + json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) + + if len(progress.Subchains) != len(tt.newstate) { + t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) + continue + } + for j := 0; j < len(progress.Subchains); j++ { + if progress.Subchains[j].Head != tt.newstate[j].Head { + t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) + } + if progress.Subchains[j].Tail != tt.newstate[j].Tail { + t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) + } + } + } +} + +// Tests that a running skeleton sync can be extended with properly linked up +// headers but not with side chains. +func TestSkeletonSyncExtend(t *testing.T) { + // Create a few key headers + var ( + genesis = &types.Header{Number: big.NewInt(0)} + block49 = &types.Header{Number: big.NewInt(49)} + block49B = &types.Header{Number: big.NewInt(49), Extra: []byte("B")} + block50 = &types.Header{Number: big.NewInt(50), ParentHash: block49.Hash()} + block51 = &types.Header{Number: big.NewInt(51), ParentHash: block50.Hash()} + ) + tests := []struct { + head *types.Header // New head header to announce to reorg to + extend *types.Header // New head header to announce to extend with + newstate []*subchain // Expected sync state after the reorg + err error // Whether extension succeeds or not + }{ + // Initialize a sync and try to extend it with a subsequent block. + { + head: block49, + extend: block50, + newstate: []*subchain{ + {Head: 50, Tail: 49}, + }, + }, + // Initialize a sync and try to extend it with the existing head block. + { + head: block49, + extend: block49, + newstate: []*subchain{ + {Head: 49, Tail: 49}, + }, + }, + // Initialize a sync and try to extend it with a sibling block. + { + head: block49, + extend: block49B, + newstate: []*subchain{ + {Head: 49, Tail: 49}, + }, + err: errReorgDenied, + }, + // Initialize a sync and try to extend it with a number-wise sequential + // header, but a hash wise non-linking one. + { + head: block49B, + extend: block50, + newstate: []*subchain{ + {Head: 49, Tail: 49}, + }, + err: errReorgDenied, + }, + // Initialize a sync and try to extend it with a non-linking future block. + { + head: block49, + extend: block51, + newstate: []*subchain{ + {Head: 49, Tail: 49}, + }, + err: errReorgDenied, + }, + // Initialize a sync and try to extend it with a past canonical block. + { + head: block50, + extend: block49, + newstate: []*subchain{ + {Head: 50, Tail: 50}, + }, + err: errReorgDenied, + }, + // Initialize a sync and try to extend it with a past sidechain block. + { + head: block50, + extend: block49B, + newstate: []*subchain{ + {Head: 50, Tail: 50}, + }, + err: errReorgDenied, + }, + } + for i, tt := range tests { + // Create a fresh database and initialize it with the starting state + db := rawdb.NewMemoryDatabase() + rawdb.WriteHeader(db, genesis) + + // Create a skeleton sync and run a cycle + wait := make(chan struct{}) + + skeleton := newSkeleton(db, newPeerSet(), nil, newHookedBackfiller()) + skeleton.syncStarting = func() { close(wait) } + skeleton.Sync(tt.head, true) + + <-wait + if err := skeleton.Sync(tt.extend, false); err != tt.err { + t.Errorf("test %d: extension failure mismatch: have %v, want %v", i, err, tt.err) + } + skeleton.Terminate() + + // Ensure the correct resulting sync status + var progress skeletonProgress + json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) + + if len(progress.Subchains) != len(tt.newstate) { + t.Errorf("test %d: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.newstate)) + continue + } + for j := 0; j < len(progress.Subchains); j++ { + if progress.Subchains[j].Head != tt.newstate[j].Head { + t.Errorf("test %d: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.newstate[j].Head) + } + if progress.Subchains[j].Tail != tt.newstate[j].Tail { + t.Errorf("test %d: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.newstate[j].Tail) + } + } + } +} + +// Tests that the skeleton sync correctly retrieves headers from one or more +// peers without duplicates or other strange side effects. +func TestSkeletonSyncRetrievals(t *testing.T) { + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + + // Since skeleton headers don't need to be meaningful, beyond a parent hash + // progression, create a long fake chain to test with. + chain := []*types.Header{{Number: big.NewInt(0)}} + for i := 1; i < 10000; i++ { + chain = append(chain, &types.Header{ + ParentHash: chain[i-1].Hash(), + Number: big.NewInt(int64(i)), + }) + } + // Some tests require a forking side chain to trigger cornercases. + var sidechain []*types.Header + for i := 0; i < len(chain)/2; i++ { // Fork at block #5000 + sidechain = append(sidechain, chain[i]) + } + for i := len(chain) / 2; i < len(chain); i++ { + sidechain = append(sidechain, &types.Header{ + ParentHash: sidechain[i-1].Hash(), + Number: big.NewInt(int64(i)), + Extra: []byte("B"), // force a different hash + }) + } + tests := []struct { + fill bool // Whether to run a real backfiller in this test case + unpredictable bool // Whether to ignore drops/serves due to uncertain packet assignments + + head *types.Header // New head header to announce to reorg to + peers []*skeletonTestPeer // Initial peer set to start the sync with + midstate []*subchain // Expected sync state after initial cycle + midserve uint64 // Expected number of header retrievals after initial cycle + middrop uint64 // Expectd number of peers dropped after initial cycle + + newHead *types.Header // New header to annount on top of the old one + newPeer *skeletonTestPeer // New peer to join the skeleton syncer + endstate []*subchain // Expected sync state after the post-init event + endserve uint64 // Expected number of header retrievals after the post-init event + enddrop uint64 // Expectd number of peers dropped after the post-init event + }{ + // Completely empty database with only the genesis set. The sync is expected + // to create a single subchain with the requested head. No peers however, so + // the sync should be stuck without any progression. + // + // When a new peer is added, it should detect the join and fill the headers + // to the genesis block. + { + head: chain[len(chain)-1], + midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: uint64(len(chain) - 1)}}, + + newPeer: newSkeletonTestPeer("test-peer", chain), + endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + endserve: uint64(len(chain) - 2), // len - head - genesis + }, + // Completely empty database with only the genesis set. The sync is expected + // to create a single subchain with the requested head. With one valid peer, + // the sync is expected to complete already in the initial round. + // + // Adding a second peer should not have any effect. + { + head: chain[len(chain)-1], + peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-1", chain)}, + midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + midserve: uint64(len(chain) - 2), // len - head - genesis + + newPeer: newSkeletonTestPeer("test-peer-2", chain), + endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + endserve: uint64(len(chain) - 2), // len - head - genesis + }, + // Completely empty database with only the genesis set. The sync is expected + // to create a single subchain with the requested head. With many valid peers, + // the sync is expected to complete already in the initial round. + // + // Adding a new peer should not have any effect. + { + head: chain[len(chain)-1], + peers: []*skeletonTestPeer{ + newSkeletonTestPeer("test-peer-1", chain), + newSkeletonTestPeer("test-peer-2", chain), + newSkeletonTestPeer("test-peer-3", chain), + }, + midstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + midserve: uint64(len(chain) - 2), // len - head - genesis + + newPeer: newSkeletonTestPeer("test-peer-4", chain), + endstate: []*subchain{{Head: uint64(len(chain) - 1), Tail: 1}}, + endserve: uint64(len(chain) - 2), // len - head - genesis + }, + // This test checks if a peer tries to withhold a header - *on* the sync + // boundary - instead of sending the requested amount. The malicious short + // package should not be accepted. + // + // Joining with a new peer should however unblock the sync. + { + head: chain[requestHeaders+100], + peers: []*skeletonTestPeer{ + newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)), + }, + midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + midserve: requestHeaders + 101 - 3, // len - head - genesis - missing + middrop: 1, // penalize shortened header deliveries + + newPeer: newSkeletonTestPeer("good-peer", chain), + endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis + enddrop: 1, // no new drops + }, + // This test checks if a peer tries to withhold a header - *off* the sync + // boundary - instead of sending the requested amount. The malicious short + // package should not be accepted. + // + // Joining with a new peer should however unblock the sync. + { + head: chain[requestHeaders+100], + peers: []*skeletonTestPeer{ + newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)), + }, + midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + midserve: requestHeaders + 101 - 3, // len - head - genesis - missing + middrop: 1, // penalize shortened header deliveries + + newPeer: newSkeletonTestPeer("good-peer", chain), + endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + endserve: (requestHeaders + 101 - 3) + (100 - 1), // midserve + lenrest - genesis + enddrop: 1, // no new drops + }, + // This test checks if a peer tries to duplicate a header - *on* the sync + // boundary - instead of sending the correct sequence. The malicious duped + // package should not be accepted. + // + // Joining with a new peer should however unblock the sync. + { + head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary + peers: []*skeletonTestPeer{ + newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)), + }, + midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + midserve: requestHeaders + 101 - 2, // len - head - genesis + middrop: 1, // penalize invalid header sequences + + newPeer: newSkeletonTestPeer("good-peer", chain), + endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + enddrop: 1, // no new drops + }, + // This test checks if a peer tries to duplicate a header - *off* the sync + // boundary - instead of sending the correct sequence. The malicious duped + // package should not be accepted. + // + // Joining with a new peer should however unblock the sync. + { + head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary + peers: []*skeletonTestPeer{ + newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)), + }, + midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + midserve: requestHeaders + 101 - 2, // len - head - genesis + middrop: 1, // penalize invalid header sequences + + newPeer: newSkeletonTestPeer("good-peer", chain), + endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + enddrop: 1, // no new drops + }, + // This test checks if a peer tries to inject a different header - *on* + // the sync boundary - instead of sending the correct sequence. The bad + // package should not be accepted. + // + // Joining with a new peer should however unblock the sync. + { + head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary + peers: []*skeletonTestPeer{ + newSkeletonTestPeer("header-changer", + append( + append( + append([]*types.Header{}, chain[:99]...), + &types.Header{ + ParentHash: chain[98].Hash(), + Number: big.NewInt(int64(99)), + GasLimit: 1, + }, + ), chain[100:]..., + ), + ), + }, + midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + midserve: requestHeaders + 101 - 2, // len - head - genesis + middrop: 1, // different set of headers, drop // TODO(karalabe): maybe just diff sync? + + newPeer: newSkeletonTestPeer("good-peer", chain), + endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + enddrop: 1, // no new drops + }, + // This test checks if a peer tries to inject a different header - *off* + // the sync boundary - instead of sending the correct sequence. The bad + // package should not be accepted. + // + // Joining with a new peer should however unblock the sync. + { + head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary + peers: []*skeletonTestPeer{ + newSkeletonTestPeer("header-changer", + append( + append( + append([]*types.Header{}, chain[:50]...), + &types.Header{ + ParentHash: chain[49].Hash(), + Number: big.NewInt(int64(50)), + GasLimit: 1, + }, + ), chain[51:]..., + ), + ), + }, + midstate: []*subchain{{Head: requestHeaders + 100, Tail: 100}}, + midserve: requestHeaders + 101 - 2, // len - head - genesis + middrop: 1, // different set of headers, drop + + newPeer: newSkeletonTestPeer("good-peer", chain), + endstate: []*subchain{{Head: requestHeaders + 100, Tail: 1}}, + endserve: (requestHeaders + 101 - 2) + (100 - 1), // midserve + lenrest - genesis + enddrop: 1, // no new drops + }, + // This test reproduces a bug caught during review (kudos to @holiman) + // where a subchain is merged with a previously interrupted one, causing + // pending data in the scratch space to become "invalid" (since we jump + // ahead during subchain merge). In that case it is expected to ignore + // the queued up data instead of trying to process on top of a shifted + // task set. + // + // The test is a bit convoluted since it needs to trigger a concurrency + // issue. First we sync up an initial chain of 2x512 items. Then announce + // 2x512+2 as head and delay delivering the head batch to fill the scratch + // space first. The delivery head should merge with the previous download + // and the scratch space must not be consumed further. + { + head: chain[2*requestHeaders], + peers: []*skeletonTestPeer{ + newSkeletonTestPeerWithHook("peer-1", chain, func(origin uint64) []*types.Header { + if origin == chain[2*requestHeaders+1].Number.Uint64() { + time.Sleep(100 * time.Millisecond) + } + return nil // Fallback to default behavior, just delayed + }), + newSkeletonTestPeerWithHook("peer-2", chain, func(origin uint64) []*types.Header { + if origin == chain[2*requestHeaders+1].Number.Uint64() { + time.Sleep(100 * time.Millisecond) + } + return nil // Fallback to default behavior, just delayed + }), + }, + midstate: []*subchain{{Head: 2 * requestHeaders, Tail: 1}}, + midserve: 2*requestHeaders - 1, // len - head - genesis + + newHead: chain[2*requestHeaders+2], + endstate: []*subchain{{Head: 2*requestHeaders + 2, Tail: 1}}, + endserve: 4 * requestHeaders, + }, + // This test reproduces a bug caught by (@rjl493456442) where a skeleton + // header goes missing, causing the sync to get stuck and/or panic. + // + // The setup requires a previously successfully synced chain up to a block + // height N. That results is a single skeleton header (block N) and a single + // subchain (head N, Tail N) being stored on disk. + // + // The following step requires a new sync cycle to a new side chain of a + // height higher than N, and an ancestor lower than N (e.g. N-2, N+2). + // In this scenario, when processing a batch of headers, a link point of + // N-2 will be found, meaning that N-1 and N have been overwritten. + // + // The link event triggers an early exit, noticing that the previous sub- + // chain is a leftover and deletes it (with it's skeleton header N). But + // since skeleton header N has been overwritten to the new side chain, we + // end up losing it and creating a gap. + { + fill: true, + unpredictable: true, // We have good and bad peer too, bad may be dropped, test too short for certainty + + head: chain[len(chain)/2+1], // Sync up until the sidechain common ancestor + 2 + peers: []*skeletonTestPeer{newSkeletonTestPeer("test-peer-oldchain", chain)}, + midstate: []*subchain{{Head: uint64(len(chain)/2 + 1), Tail: 1}}, + + newHead: sidechain[len(sidechain)/2+3], // Sync up until the sidechain common ancestor + 4 + newPeer: newSkeletonTestPeer("test-peer-newchain", sidechain), + endstate: []*subchain{{Head: uint64(len(sidechain)/2 + 3), Tail: uint64(len(chain) / 2)}}, + }, + } + for i, tt := range tests { + // Create a fresh database and initialize it with the starting state + db := rawdb.NewMemoryDatabase() + + rawdb.WriteBlock(db, types.NewBlockWithHeader(chain[0])) + rawdb.WriteReceipts(db, chain[0].Hash(), chain[0].Number.Uint64(), types.Receipts{}) + + // Create a peer set to feed headers through + peerset := newPeerSet() + for _, peer := range tt.peers { + peerset.Register(newPeerConnection(peer.id, eth.ETH66, peer, log.New("id", peer.id))) + } + // Create a peer dropper to track malicious peers + dropped := make(map[string]int) + drop := func(peer string) { + if p := peerset.Peer(peer); p != nil { + atomic.AddUint64(&p.peer.(*skeletonTestPeer).dropped, 1) + } + peerset.Unregister(peer) + dropped[peer]++ + } + // Create a backfiller if we need to run more advanced tests + filler := newHookedBackfiller() + if tt.fill { + var filled *types.Header + + filler = &hookedBackfiller{ + resumeHook: func() { + var progress skeletonProgress + json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) + + for progress.Subchains[0].Tail < progress.Subchains[0].Head { + header := rawdb.ReadSkeletonHeader(db, progress.Subchains[0].Tail) + + rawdb.WriteBlock(db, types.NewBlockWithHeader(header)) + rawdb.WriteReceipts(db, header.Hash(), header.Number.Uint64(), types.Receipts{}) + + rawdb.DeleteSkeletonHeader(db, header.Number.Uint64()) + + progress.Subchains[0].Tail++ + progress.Subchains[0].Next = header.Hash() + } + filled = rawdb.ReadSkeletonHeader(db, progress.Subchains[0].Tail) + + rawdb.WriteBlock(db, types.NewBlockWithHeader(filled)) + rawdb.WriteReceipts(db, filled.Hash(), filled.Number.Uint64(), types.Receipts{}) + }, + + suspendHook: func() *types.Header { + prev := filled + filled = nil + + return prev + }, + } + } + // Create a skeleton sync and run a cycle + skeleton := newSkeleton(db, peerset, drop, filler) + skeleton.Sync(tt.head, true) + + var progress skeletonProgress + // Wait a bit (bleah) for the initial sync loop to go to idle. This might + // be either a finish or a never-start hence why there's no event to hook. + check := func() error { + if len(progress.Subchains) != len(tt.midstate) { + return fmt.Errorf("test %d, mid state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.midstate)) + + } + for j := 0; j < len(progress.Subchains); j++ { + if progress.Subchains[j].Head != tt.midstate[j].Head { + return fmt.Errorf("test %d, mid state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.midstate[j].Head) + } + if progress.Subchains[j].Tail != tt.midstate[j].Tail { + return fmt.Errorf("test %d, mid state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.midstate[j].Tail) + } + } + return nil + } + + waitStart := time.Now() + for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 { + time.Sleep(waitTime) + // Check the post-init end state if it matches the required results + json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) + if err := check(); err == nil { + break + } + } + if err := check(); err != nil { + t.Error(err) + continue + } + if !tt.unpredictable { + var served uint64 + for _, peer := range tt.peers { + served += atomic.LoadUint64(&peer.served) + } + if served != tt.midserve { + t.Errorf("test %d, mid state: served headers mismatch: have %d, want %d", i, served, tt.midserve) + } + var drops uint64 + for _, peer := range tt.peers { + drops += atomic.LoadUint64(&peer.dropped) + } + if drops != tt.middrop { + t.Errorf("test %d, mid state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) + } + } + // Apply the post-init events if there's any + if tt.newHead != nil { + skeleton.Sync(tt.newHead, true) + } + if tt.newPeer != nil { + if err := peerset.Register(newPeerConnection(tt.newPeer.id, eth.ETH66, tt.newPeer, log.New("id", tt.newPeer.id))); err != nil { + t.Errorf("test %d: failed to register new peer: %v", i, err) + } + } + // Wait a bit (bleah) for the second sync loop to go to idle. This might + // be either a finish or a never-start hence why there's no event to hook. + check = func() error { + if len(progress.Subchains) != len(tt.endstate) { + return fmt.Errorf("test %d, end state: subchain count mismatch: have %d, want %d", i, len(progress.Subchains), len(tt.endstate)) + } + for j := 0; j < len(progress.Subchains); j++ { + if progress.Subchains[j].Head != tt.endstate[j].Head { + return fmt.Errorf("test %d, end state: subchain %d head mismatch: have %d, want %d", i, j, progress.Subchains[j].Head, tt.endstate[j].Head) + } + if progress.Subchains[j].Tail != tt.endstate[j].Tail { + return fmt.Errorf("test %d, end state: subchain %d tail mismatch: have %d, want %d", i, j, progress.Subchains[j].Tail, tt.endstate[j].Tail) + } + } + return nil + } + waitStart = time.Now() + for waitTime := 20 * time.Millisecond; time.Since(waitStart) < time.Second; waitTime = waitTime * 2 { + time.Sleep(waitTime) + // Check the post-init end state if it matches the required results + json.Unmarshal(rawdb.ReadSkeletonSyncStatus(db), &progress) + if err := check(); err == nil { + break + } + } + if err := check(); err != nil { + t.Error(err) + continue + } + // Check that the peers served no more headers than we actually needed + if !tt.unpredictable { + served := uint64(0) + for _, peer := range tt.peers { + served += atomic.LoadUint64(&peer.served) + } + if tt.newPeer != nil { + served += atomic.LoadUint64(&tt.newPeer.served) + } + if served != tt.endserve { + t.Errorf("test %d, end state: served headers mismatch: have %d, want %d", i, served, tt.endserve) + } + drops := uint64(0) + for _, peer := range tt.peers { + drops += atomic.LoadUint64(&peer.dropped) + } + if tt.newPeer != nil { + drops += atomic.LoadUint64(&tt.newPeer.dropped) + } + if drops != tt.enddrop { + t.Errorf("test %d, end state: dropped peers mismatch: have %d, want %d", i, drops, tt.middrop) + } + } + // Clean up any leftover skeleton sync resources + skeleton.Terminate() + } +} diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go index e4db59a4e..57c0d294d 100644 --- a/eth/downloader/statesync.go +++ b/eth/downloader/statesync.go @@ -17,49 +17,12 @@ package downloader import ( - "fmt" "sync" - "time" - - "golang.org/x/crypto/sha3" "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/core/state" - "github.com/scroll-tech/go-ethereum/crypto" - "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/trie" ) -// stateReq represents a batch of state fetch requests grouped together into -// a single data retrieval network packet. -type stateReq struct { - nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient) - trieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts - codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts - timeout time.Duration // Maximum round trip time for this to complete - timer *time.Timer // Timer to fire when the RTT timeout expires - peer *peerConnection // Peer that we're requesting from - delivered time.Time // Time when the packet was delivered (independent when we process it) - response [][]byte // Response data of the peer (nil for timeouts) - dropped bool // Flag whether the peer dropped off early -} - -// timedOut returns if this request timed out. -func (req *stateReq) timedOut() bool { - return req.response == nil -} - -// stateSyncStats is a collection of progress stats to report during a state trie -// sync to RPC requests as well as to display in user logs. -type stateSyncStats struct { - processed uint64 // Number of state entries processed - duplicate uint64 // Number of state entries downloaded twice - unexpected uint64 // Number of non-requested state entries received - pending uint64 // Number of still pending state entries -} - // syncState starts downloading state with the given root hash. func (d *Downloader) syncState(root common.Hash) *stateSync { // Create the state sync @@ -86,8 +49,6 @@ func (d *Downloader) stateFetcher() { for next := s; next != nil; { next = d.runStateSync(next) } - case <-d.stateCh: - // Ignore state responses while no sync is running. case <-d.quitCh: return } @@ -97,216 +58,44 @@ func (d *Downloader) stateFetcher() { // runStateSync runs a state synchronisation until it completes or another root // hash is requested to be switched over to. func (d *Downloader) runStateSync(s *stateSync) *stateSync { - var ( - active = make(map[string]*stateReq) // Currently in-flight requests - finished []*stateReq // Completed or failed requests - timeout = make(chan *stateReq) // Timed out active requests - ) log.Trace("State sync starting", "root", s.root) - defer func() { - // Cancel active request timers on exit. Also set peers to idle so they're - // available for the next sync. - for _, req := range active { - req.timer.Stop() - req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) - } - }() go s.run() defer s.Cancel() - // Listen for peer departure events to cancel assigned tasks - peerDrop := make(chan *peerConnection, 1024) - peerSub := s.d.peers.SubscribePeerDrops(peerDrop) - defer peerSub.Unsubscribe() - for { - // Enable sending of the first buffered element if there is one. - var ( - deliverReq *stateReq - deliverReqCh chan *stateReq - ) - if len(finished) > 0 { - deliverReq = finished[0] - deliverReqCh = s.deliver - } - select { - // The stateSync lifecycle: case next := <-d.stateSyncStart: - d.spindownStateSync(active, finished, timeout, peerDrop) return next case <-s.done: - d.spindownStateSync(active, finished, timeout, peerDrop) return nil - - // Send the next finished request to the current sync: - case deliverReqCh <- deliverReq: - // Shift out the first request, but also set the emptied slot to nil for GC - copy(finished, finished[1:]) - finished[len(finished)-1] = nil - finished = finished[:len(finished)-1] - - // Handle incoming state packs: - case pack := <-d.stateCh: - // Discard any data not requested (or previously timed out) - req := active[pack.PeerId()] - if req == nil { - log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items()) - continue - } - // Finalize the request and queue up for processing - req.timer.Stop() - req.response = pack.(*statePack).states - req.delivered = time.Now() - - finished = append(finished, req) - delete(active, pack.PeerId()) - - // Handle dropped peer connections: - case p := <-peerDrop: - // Skip if no request is currently pending - req := active[p.id] - if req == nil { - continue - } - // Finalize the request and queue up for processing - req.timer.Stop() - req.dropped = true - req.delivered = time.Now() - - finished = append(finished, req) - delete(active, p.id) - - // Handle timed-out requests: - case req := <-timeout: - // If the peer is already requesting something else, ignore the stale timeout. - // This can happen when the timeout and the delivery happens simultaneously, - // causing both pathways to trigger. - if active[req.peer.id] != req { - continue - } - req.delivered = time.Now() - // Move the timed out data back into the download queue - finished = append(finished, req) - delete(active, req.peer.id) - - // Track outgoing state requests: - case req := <-d.trackStateReq: - // If an active request already exists for this peer, we have a problem. In - // theory the trie node schedule must never assign two requests to the same - // peer. In practice however, a peer might receive a request, disconnect and - // immediately reconnect before the previous times out. In this case the first - // request is never honored, alas we must not silently overwrite it, as that - // causes valid requests to go missing and sync to get stuck. - if old := active[req.peer.id]; old != nil { - log.Warn("Busy peer assigned new state fetch", "peer", old.peer.id) - // Move the previous request to the finished set - old.timer.Stop() - old.dropped = true - old.delivered = time.Now() - finished = append(finished, old) - } - // Start a timer to notify the sync loop if the peer stalled. - req.timer = time.AfterFunc(req.timeout, func() { - timeout <- req - }) - active[req.peer.id] = req - } - } -} - -// spindownStateSync 'drains' the outstanding requests; some will be delivered and other -// will time out. This is to ensure that when the next stateSync starts working, all peers -// are marked as idle and de facto _are_ idle. -func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) { - log.Trace("State sync spinning down", "active", len(active), "finished", len(finished)) - for len(active) > 0 { - var ( - req *stateReq - reason string - ) - select { - // Handle (drop) incoming state packs: - case pack := <-d.stateCh: - req = active[pack.PeerId()] - reason = "delivered" - // Handle dropped peer connections: - case p := <-peerDrop: - req = active[p.id] - reason = "peerdrop" - // Handle timed-out requests: - case req = <-timeout: - reason = "timeout" - } - if req == nil { - continue } - req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason) - req.timer.Stop() - delete(active, req.peer.id) - req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) - } - // The 'finished' set contains deliveries that we were going to pass to processing. - // Those are now moot, but we still need to set those peers as idle, which would - // otherwise have been done after processing - for _, req := range finished { - req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) } } // stateSync schedules requests for downloading a particular state trie defined // by a given state root. type stateSync struct { - d *Downloader // Downloader instance to access and manage current peerset - - root common.Hash // State root currently being synced - sched *trie.Sync // State trie sync scheduler defining the tasks - keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with - - trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval - codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval - - numUncommitted int - bytesUncommitted int - - started chan struct{} // Started is signalled once the sync loop starts + d *Downloader // Downloader instance to access and manage current peerset + root common.Hash // State root currently being synced - deliver chan *stateReq // Delivery channel multiplexing peer responses - cancel chan struct{} // Channel to signal a termination request - cancelOnce sync.Once // Ensures cancel only ever gets called once - done chan struct{} // Channel to signal termination completion - err error // Any error hit during sync (set before completion) -} - -// trieTask represents a single trie node download task, containing a set of -// peers already attempted retrieval from to detect stalled syncs and abort. -type trieTask struct { - path [][]byte - attempts map[string]struct{} -} - -// codeTask represents a single byte code download task, containing a set of -// peers already attempted retrieval from to detect stalled syncs and abort. -type codeTask struct { - attempts map[string]struct{} + started chan struct{} // Started is signalled once the sync loop starts + cancel chan struct{} // Channel to signal a termination request + cancelOnce sync.Once // Ensures cancel only ever gets called once + done chan struct{} // Channel to signal termination completion + err error // Any error hit during sync (set before completion) } // newStateSync creates a new state trie download scheduler. This method does not // yet start the sync. The user needs to call run to initiate. func newStateSync(d *Downloader, root common.Hash) *stateSync { return &stateSync{ - d: d, - root: root, - sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil), - keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), - trieTasks: make(map[common.Hash]*trieTask), - codeTasks: make(map[common.Hash]*codeTask), - deliver: make(chan *stateReq), - cancel: make(chan struct{}), - done: make(chan struct{}), - started: make(chan struct{}), + d: d, + root: root, + cancel: make(chan struct{}), + done: make(chan struct{}), + started: make(chan struct{}), } } @@ -315,11 +104,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync { // finish. func (s *stateSync) run() { close(s.started) - if s.d.snapSync { - s.err = s.d.SnapSyncer.Sync(s.root, s.cancel) - } else { - s.err = s.loop() - } + s.err = s.d.SnapSyncer.Sync(s.root, s.cancel) close(s.done) } @@ -336,281 +121,3 @@ func (s *stateSync) Cancel() error { }) return s.Wait() } - -// loop is the main event loop of a state trie sync. It it responsible for the -// assignment of new tasks to peers (including sending it to them) as well as -// for the processing of inbound data. Note, that the loop does not directly -// receive data from peers, rather those are buffered up in the downloader and -// pushed here async. The reason is to decouple processing from data receipt -// and timeouts. -func (s *stateSync) loop() (err error) { - // Listen for new peer events to assign tasks to them - newPeer := make(chan *peerConnection, 1024) - peerSub := s.d.peers.SubscribeNewPeers(newPeer) - defer peerSub.Unsubscribe() - defer func() { - cerr := s.commit(true) - if err == nil { - err = cerr - } - }() - - // Keep assigning new tasks until the sync completes or aborts - for s.sched.Pending() > 0 { - if err = s.commit(false); err != nil { - return err - } - s.assignTasks() - // Tasks assigned, wait for something to happen - select { - case <-newPeer: - // New peer arrived, try to assign it download tasks - - case <-s.cancel: - return errCancelStateFetch - - case <-s.d.cancelCh: - return errCanceled - - case req := <-s.deliver: - // Response, disconnect or timeout triggered, drop the peer if stalling - log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut()) - if req.nItems <= 2 && !req.dropped && req.timedOut() { - // 2 items are the minimum requested, if even that times out, we've no use of - // this peer at the moment. - log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id) - if s.d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id) - } else { - s.d.dropPeer(req.peer.id) - - // If this peer was the master peer, abort sync immediately - s.d.cancelLock.RLock() - master := req.peer.id == s.d.cancelPeer - s.d.cancelLock.RUnlock() - - if master { - s.d.cancel() - return errTimeout - } - } - } - // Process all the received blobs and check for stale delivery - delivered, err := s.process(req) - req.peer.SetNodeDataIdle(delivered, req.delivered) - if err != nil { - log.Warn("Node data write error", "err", err) - return err - } - } - } - return nil -} - -func (s *stateSync) commit(force bool) error { - if !force && s.bytesUncommitted < ethdb.IdealBatchSize { - return nil - } - start := time.Now() - b := s.d.stateDB.NewBatch() - if err := s.sched.Commit(b); err != nil { - return err - } - if err := b.Write(); err != nil { - return fmt.Errorf("DB write error: %v", err) - } - s.updateStats(s.numUncommitted, 0, 0, time.Since(start)) - s.numUncommitted = 0 - s.bytesUncommitted = 0 - return nil -} - -// assignTasks attempts to assign new tasks to all idle peers, either from the -// batch currently being retried, or fetching new data from the trie sync itself. -func (s *stateSync) assignTasks() { - // Iterate over all idle peers and try to assign them state fetches - peers, _ := s.d.peers.NodeDataIdlePeers() - for _, p := range peers { - // Assign a batch of fetches proportional to the estimated latency/bandwidth - cap := p.NodeDataCapacity(s.d.peers.rates.TargetRoundTrip()) - req := &stateReq{peer: p, timeout: s.d.peers.rates.TargetTimeout()} - - nodes, _, codes := s.fillTasks(cap, req) - - // If the peer was assigned tasks to fetch, send the network request - if len(nodes)+len(codes) > 0 { - req.peer.log.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root) - select { - case s.d.trackStateReq <- req: - req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under eth/6x - case <-s.cancel: - case <-s.d.cancelCh: - } - } - } -} - -// fillTasks fills the given request object with a maximum of n state download -// tasks to send to the remote peer. -func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { - // Refill available tasks from the scheduler. - if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 { - nodes, paths, codes := s.sched.Missing(fill) - for i, hash := range nodes { - s.trieTasks[hash] = &trieTask{ - path: paths[i], - attempts: make(map[string]struct{}), - } - } - for _, hash := range codes { - s.codeTasks[hash] = &codeTask{ - attempts: make(map[string]struct{}), - } - } - } - // Find tasks that haven't been tried with the request's peer. Prefer code - // over trie nodes as those can be written to disk and forgotten about. - nodes = make([]common.Hash, 0, n) - paths = make([]trie.SyncPath, 0, n) - codes = make([]common.Hash, 0, n) - - req.trieTasks = make(map[common.Hash]*trieTask, n) - req.codeTasks = make(map[common.Hash]*codeTask, n) - - for hash, t := range s.codeTasks { - // Stop when we've gathered enough requests - if len(nodes)+len(codes) == n { - break - } - // Skip any requests we've already tried from this peer - if _, ok := t.attempts[req.peer.id]; ok { - continue - } - // Assign the request to this peer - t.attempts[req.peer.id] = struct{}{} - codes = append(codes, hash) - req.codeTasks[hash] = t - delete(s.codeTasks, hash) - } - for hash, t := range s.trieTasks { - // Stop when we've gathered enough requests - if len(nodes)+len(codes) == n { - break - } - // Skip any requests we've already tried from this peer - if _, ok := t.attempts[req.peer.id]; ok { - continue - } - // Assign the request to this peer - t.attempts[req.peer.id] = struct{}{} - - nodes = append(nodes, hash) - paths = append(paths, t.path) - - req.trieTasks[hash] = t - delete(s.trieTasks, hash) - } - req.nItems = uint16(len(nodes) + len(codes)) - return nodes, paths, codes -} - -// process iterates over a batch of delivered state data, injecting each item -// into a running state sync, re-queuing any items that were requested but not -// delivered. Returns whether the peer actually managed to deliver anything of -// value, and any error that occurred. -func (s *stateSync) process(req *stateReq) (int, error) { - // Collect processing stats and update progress if valid data was received - duplicate, unexpected, successful := 0, 0, 0 - - defer func(start time.Time) { - if duplicate > 0 || unexpected > 0 { - s.updateStats(0, duplicate, unexpected, time.Since(start)) - } - }(time.Now()) - - // Iterate over all the delivered data and inject one-by-one into the trie - for _, blob := range req.response { - hash, err := s.processNodeData(blob) - switch err { - case nil: - s.numUncommitted++ - s.bytesUncommitted += len(blob) - successful++ - case trie.ErrNotRequested: - unexpected++ - case trie.ErrAlreadyProcessed: - duplicate++ - default: - return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) - } - // Delete from both queues (one delivery is enough for the syncer) - delete(req.trieTasks, hash) - delete(req.codeTasks, hash) - } - // Put unfulfilled tasks back into the retry queue - npeers := s.d.peers.Len() - for hash, task := range req.trieTasks { - // If the node did deliver something, missing items may be due to a protocol - // limit or a previous timeout + delayed delivery. Both cases should permit - // the node to retry the missing items (to avoid single-peer stalls). - if len(req.response) > 0 || req.timedOut() { - delete(task.attempts, req.peer.id) - } - // If we've requested the node too many times already, it may be a malicious - // sync where nobody has the right data. Abort. - if len(task.attempts) >= npeers { - return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) - } - // Missing item, place into the retry queue. - s.trieTasks[hash] = task - } - for hash, task := range req.codeTasks { - // If the node did deliver something, missing items may be due to a protocol - // limit or a previous timeout + delayed delivery. Both cases should permit - // the node to retry the missing items (to avoid single-peer stalls). - if len(req.response) > 0 || req.timedOut() { - delete(task.attempts, req.peer.id) - } - // If we've requested the node too many times already, it may be a malicious - // sync where nobody has the right data. Abort. - if len(task.attempts) >= npeers { - return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) - } - // Missing item, place into the retry queue. - s.codeTasks[hash] = task - } - return successful, nil -} - -// processNodeData tries to inject a trie node data blob delivered from a remote -// peer into the state trie, returning whether anything useful was written or any -// error occurred. -func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) { - res := trie.SyncResult{Data: blob} - s.keccak.Reset() - s.keccak.Write(blob) - s.keccak.Read(res.Hash[:]) - err := s.sched.Process(res) - return res.Hash, err -} - -// updateStats bumps the various state sync progress counters and displays a log -// message for the user to see. -func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) { - s.d.syncStatsLock.Lock() - defer s.d.syncStatsLock.Unlock() - - s.d.syncStatsState.pending = uint64(s.sched.Pending()) - s.d.syncStatsState.processed += uint64(written) - s.d.syncStatsState.duplicate += uint64(duplicate) - s.d.syncStatsState.unexpected += uint64(unexpected) - - if written > 0 || duplicate > 0 || unexpected > 0 { - log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected) - } - if written > 0 { - rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed) - } -} diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go index 8757755ba..f47a10ed2 100644 --- a/eth/downloader/testchain_test.go +++ b/eth/downloader/testchain_test.go @@ -20,12 +20,14 @@ import ( "fmt" "math/big" "sync" + "time" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/consensus/ethash" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/core/vm" "github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/params" ) @@ -39,73 +41,110 @@ var ( ) // The common prefix of all test chains: -var testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis) +var testChainBase *testChain // Different forks on top of the base chain: var testChainForkLightA, testChainForkLightB, testChainForkHeavy *testChain +var pregenerated bool + func init() { + // Reduce some of the parameters to make the tester faster + fullMaxForkAncestry = 10000 + lightMaxForkAncestry = 10000 + blockCacheMaxItems = 1024 + fsHeaderSafetyNet = 256 + fsHeaderContCheck = 500 * time.Millisecond + + testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis) + var forkLen = int(fullMaxForkAncestry + 50) var wg sync.WaitGroup + + // Generate the test chains to seed the peers with wg.Add(3) go func() { testChainForkLightA = testChainBase.makeFork(forkLen, false, 1); wg.Done() }() go func() { testChainForkLightB = testChainBase.makeFork(forkLen, false, 2); wg.Done() }() go func() { testChainForkHeavy = testChainBase.makeFork(forkLen, true, 3); wg.Done() }() wg.Wait() + + // Generate the test peers used by the tests to avoid overloading during testing. + // These seemingly random chains are used in various downloader tests. We're just + // pre-generating them here. + chains := []*testChain{ + testChainBase, + testChainForkLightA, + testChainForkLightB, + testChainForkHeavy, + testChainBase.shorten(1), + testChainBase.shorten(blockCacheMaxItems - 15), + testChainBase.shorten((blockCacheMaxItems - 15) / 2), + testChainBase.shorten(blockCacheMaxItems - 15 - 5), + testChainBase.shorten(MaxHeaderFetch), + testChainBase.shorten(800), + testChainBase.shorten(800 / 2), + testChainBase.shorten(800 / 3), + testChainBase.shorten(800 / 4), + testChainBase.shorten(800 / 5), + testChainBase.shorten(800 / 6), + testChainBase.shorten(800 / 7), + testChainBase.shorten(800 / 8), + testChainBase.shorten(3*fsHeaderSafetyNet + 256 + fsMinFullBlocks), + testChainBase.shorten(fsMinFullBlocks + 256 - 1), + testChainForkLightA.shorten(len(testChainBase.blocks) + 80), + testChainForkLightB.shorten(len(testChainBase.blocks) + 81), + testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch), + testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch), + testChainForkHeavy.shorten(len(testChainBase.blocks) + 79), + } + wg.Add(len(chains)) + for _, chain := range chains { + go func(blocks []*types.Block) { + newTestBlockchain(blocks) + wg.Done() + }(chain.blocks[1:]) + } + wg.Wait() + + // Mark the chains pregenerated. Generating a new one will lead to a panic. + pregenerated = true } type testChain struct { - genesis *types.Block - chain []common.Hash - headerm map[common.Hash]*types.Header - blockm map[common.Hash]*types.Block - receiptm map[common.Hash][]*types.Receipt - tdm map[common.Hash]*big.Int + blocks []*types.Block } // newTestChain creates a blockchain of the given length. func newTestChain(length int, genesis *types.Block) *testChain { - tc := new(testChain).copy(length) - tc.genesis = genesis - tc.chain = append(tc.chain, genesis.Hash()) - tc.headerm[tc.genesis.Hash()] = tc.genesis.Header() - tc.tdm[tc.genesis.Hash()] = tc.genesis.Difficulty() - tc.blockm[tc.genesis.Hash()] = tc.genesis + tc := &testChain{ + blocks: []*types.Block{genesis}, + } tc.generate(length-1, 0, genesis, false) return tc } // makeFork creates a fork on top of the test chain. func (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain { - fork := tc.copy(tc.len() + length) - fork.generate(length, seed, tc.headBlock(), heavy) + fork := tc.copy(len(tc.blocks) + length) + fork.generate(length, seed, tc.blocks[len(tc.blocks)-1], heavy) return fork } // shorten creates a copy of the chain with the given length. It panics if the // length is longer than the number of available blocks. func (tc *testChain) shorten(length int) *testChain { - if length > tc.len() { - panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, tc.len())) + if length > len(tc.blocks) { + panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, len(tc.blocks))) } return tc.copy(length) } func (tc *testChain) copy(newlen int) *testChain { - cpy := &testChain{ - genesis: tc.genesis, - headerm: make(map[common.Hash]*types.Header, newlen), - blockm: make(map[common.Hash]*types.Block, newlen), - receiptm: make(map[common.Hash][]*types.Receipt, newlen), - tdm: make(map[common.Hash]*big.Int, newlen), + if newlen > len(tc.blocks) { + newlen = len(tc.blocks) } - for i := 0; i < len(tc.chain) && i < newlen; i++ { - hash := tc.chain[i] - cpy.chain = append(cpy.chain, tc.chain[i]) - cpy.tdm[hash] = tc.tdm[hash] - cpy.blockm[hash] = tc.blockm[hash] - cpy.headerm[hash] = tc.headerm[hash] - cpy.receiptm[hash] = tc.receiptm[hash] + cpy := &testChain{ + blocks: append([]*types.Block{}, tc.blocks[:newlen]...), } return cpy } @@ -115,17 +154,14 @@ func (tc *testChain) copy(newlen int) *testChain { // contains a transaction and every 5th an uncle to allow testing correct block // reassembly. func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) { - // start := time.Now() - // defer func() { fmt.Printf("test chain generated in %v\n", time.Since(start)) }() - - blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) { + blocks, _ := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) { block.SetCoinbase(common.Address{seed}) // If a heavy chain is requested, delay blocks to raise difficulty if heavy { - block.OffsetTime(-1) + block.OffsetTime(-9) } // Include transactions to the miner to make blocks more interesting. - if parent == tc.genesis && i%22 == 0 { + if parent == tc.blocks[0] && i%22 == 0 { signer := types.MakeSigner(params.TestChainConfig, block.Number()) tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) if err != nil { @@ -136,95 +172,56 @@ func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) // if the block number is a multiple of 5, add a bonus uncle to the block if i > 0 && i%5 == 0 { block.AddUncle(&types.Header{ - ParentHash: block.PrevBlock(i - 1).Hash(), + ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(block.Number().Int64() - 1), }) } }) - - // Convert the block-chain into a hash-chain and header/block maps - td := new(big.Int).Set(tc.td(parent.Hash())) - for i, b := range blocks { - td := td.Add(td, b.Difficulty()) - hash := b.Hash() - tc.chain = append(tc.chain, hash) - tc.blockm[hash] = b - tc.headerm[hash] = b.Header() - tc.receiptm[hash] = receipts[i] - tc.tdm[hash] = new(big.Int).Set(td) - } -} - -// len returns the total number of blocks in the chain. -func (tc *testChain) len() int { - return len(tc.chain) -} - -// headBlock returns the head of the chain. -func (tc *testChain) headBlock() *types.Block { - return tc.blockm[tc.chain[len(tc.chain)-1]] + tc.blocks = append(tc.blocks, blocks...) } -// td returns the total difficulty of the given block. -func (tc *testChain) td(hash common.Hash) *big.Int { - return tc.tdm[hash] -} +var ( + testBlockchains = make(map[common.Hash]*testBlockchain) + testBlockchainsLock sync.Mutex +) -// headersByHash returns headers in order from the given hash. -func (tc *testChain) headersByHash(origin common.Hash, amount int, skip int, reverse bool) []*types.Header { - num, _ := tc.hashToNumber(origin) - return tc.headersByNumber(num, amount, skip, reverse) +type testBlockchain struct { + chain *core.BlockChain + gen sync.Once } -// headersByNumber returns headers from the given number. -func (tc *testChain) headersByNumber(origin uint64, amount int, skip int, reverse bool) []*types.Header { - result := make([]*types.Header, 0, amount) - - if !reverse { - for num := origin; num < uint64(len(tc.chain)) && len(result) < amount; num += uint64(skip) + 1 { - if header, ok := tc.headerm[tc.chain[int(num)]]; ok { - result = append(result, header) - } - } - } else { - for num := int64(origin); num >= 0 && len(result) < amount; num -= int64(skip) + 1 { - if header, ok := tc.headerm[tc.chain[int(num)]]; ok { - result = append(result, header) - } - } +// newTestBlockchain creates a blockchain database built by running the given blocks, +// either actually running them, or reusing a previously created one. The returned +// chains are *shared*, so *do not* mutate them. +func newTestBlockchain(blocks []*types.Block) *core.BlockChain { + // Retrieve an existing database, or create a new one + head := testGenesis.Hash() + if len(blocks) > 0 { + head = blocks[len(blocks)-1].Hash() } - return result -} - -// receipts returns the receipts of the given block hashes. -func (tc *testChain) receipts(hashes []common.Hash) [][]*types.Receipt { - results := make([][]*types.Receipt, 0, len(hashes)) - for _, hash := range hashes { - if receipt, ok := tc.receiptm[hash]; ok { - results = append(results, receipt) - } + testBlockchainsLock.Lock() + if _, ok := testBlockchains[head]; !ok { + testBlockchains[head] = new(testBlockchain) } - return results -} + tbc := testBlockchains[head] + testBlockchainsLock.Unlock() -// bodies returns the block bodies of the given block hashes. -func (tc *testChain) bodies(hashes []common.Hash) ([][]*types.Transaction, [][]*types.Header) { - transactions := make([][]*types.Transaction, 0, len(hashes)) - uncles := make([][]*types.Header, 0, len(hashes)) - for _, hash := range hashes { - if block, ok := tc.blockm[hash]; ok { - transactions = append(transactions, block.Transactions()) - uncles = append(uncles, block.Uncles()) + // Ensure that the database is generated + tbc.gen.Do(func() { + if pregenerated { + panic("Requested chain generation outside of init") } - } - return transactions, uncles -} + db := rawdb.NewMemoryDatabase() + core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000000000)) -func (tc *testChain) hashToNumber(target common.Hash) (uint64, bool) { - for num, hash := range tc.chain { - if hash == target { - return uint64(num), true + chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) + if err != nil { + panic(err) } - } - return 0, false + if n, err := chain.InsertChain(blocks); err != nil { + panic(fmt.Sprintf("block %d: %v", n, err)) + } + tbc.chain = chain + }) + return tbc.chain } diff --git a/eth/downloader/types.go b/eth/downloader/types.go deleted file mode 100644 index 388fa16b5..000000000 --- a/eth/downloader/types.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "fmt" - - "github.com/scroll-tech/go-ethereum/core/types" -) - -// peerDropFn is a callback type for dropping a peer detected as malicious. -type peerDropFn func(id string) - -// dataPack is a data message returned by a peer for some query. -type dataPack interface { - PeerId() string - Items() int - Stats() string -} - -// headerPack is a batch of block headers returned by a peer. -type headerPack struct { - peerID string - headers []*types.Header -} - -func (p *headerPack) PeerId() string { return p.peerID } -func (p *headerPack) Items() int { return len(p.headers) } -func (p *headerPack) Stats() string { return fmt.Sprintf("%d", len(p.headers)) } - -// bodyPack is a batch of block bodies returned by a peer. -type bodyPack struct { - peerID string - transactions [][]*types.Transaction - uncles [][]*types.Header -} - -func (p *bodyPack) PeerId() string { return p.peerID } -func (p *bodyPack) Items() int { - if len(p.transactions) <= len(p.uncles) { - return len(p.transactions) - } - return len(p.uncles) -} -func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) } - -// receiptPack is a batch of receipts returned by a peer. -type receiptPack struct { - peerID string - receipts [][]*types.Receipt -} - -func (p *receiptPack) PeerId() string { return p.peerID } -func (p *receiptPack) Items() int { return len(p.receipts) } -func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) } - -// statePack is a batch of states returned by a peer. -type statePack struct { - peerID string - states [][]byte -} - -func (p *statePack) PeerId() string { return p.peerID } -func (p *statePack) Items() int { return len(p.states) } -func (p *statePack) Stats() string { return fmt.Sprintf("%d", len(p.states)) } diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 2ff66c0a6..4efadd19d 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -26,6 +26,7 @@ import ( "github.com/scroll-tech/go-ethereum/common/prque" "github.com/scroll-tech/go-ethereum/consensus" "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/eth/protocols/eth" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/metrics" "github.com/scroll-tech/go-ethereum/trie" @@ -74,10 +75,10 @@ type HeaderRetrievalFn func(common.Hash) *types.Header type blockRetrievalFn func(common.Hash) *types.Block // headerRequesterFn is a callback type for sending a header retrieval request. -type headerRequesterFn func(common.Hash) error +type headerRequesterFn func(common.Hash, chan *eth.Response) (*eth.Request, error) // bodyRequesterFn is a callback type for sending a body retrieval request. -type bodyRequesterFn func([]common.Hash) error +type bodyRequesterFn func([]common.Hash, chan *eth.Response) (*eth.Request, error) // headerVerifierFn is a callback type to verify a block's header for fast propagation. type headerVerifierFn func(header *types.Header) error @@ -461,15 +462,39 @@ func (f *BlockFetcher) loop() { // Create a closure of the fetch and schedule in on a new thread fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes - go func() { + go func(peer string) { if f.fetchingHook != nil { f.fetchingHook(hashes) } for _, hash := range hashes { headerFetchMeter.Mark(1) - fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals + go func(hash common.Hash) { + resCh := make(chan *eth.Response) + + req, err := fetchHeader(hash, resCh) + if err != nil { + return // Legacy code, yolo + } + defer req.Close() + + timeout := time.NewTimer(2 * fetchTimeout) // 2x leeway before dropping the peer + defer timeout.Stop() + + select { + case res := <-resCh: + res.Done <- nil + f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time)) + + case <-timeout.C: + // The peer didn't respond in time. The request + // was already rescheduled at this point, we were + // waiting for a catchup. With an unresponsive + // peer however, it's a protocol violation. + f.dropPeer(peer) + } + }(hash) } - }() + }(peer) } // Schedule the next fetch if blocks are still pending f.rescheduleFetch(fetchTimer) @@ -497,8 +522,36 @@ func (f *BlockFetcher) loop() { if f.completingHook != nil { f.completingHook(hashes) } + fetchBodies := f.completing[hashes[0]].fetchBodies bodyFetchMeter.Mark(int64(len(hashes))) - go f.completing[hashes[0]].fetchBodies(hashes) + + go func(peer string, hashes []common.Hash) { + resCh := make(chan *eth.Response) + + req, err := fetchBodies(hashes, resCh) + if err != nil { + return // Legacy code, yolo + } + defer req.Close() + + timeout := time.NewTimer(2 * fetchTimeout) // 2x leeway before dropping the peer + defer timeout.Stop() + + select { + case res := <-resCh: + res.Done <- nil + + txs, uncles := res.Res.(*eth.BlockBodiesPacket).Unpack() + f.FilterBodies(peer, txs, uncles, time.Now()) + + case <-timeout.C: + // The peer didn't respond in time. The request + // was already rescheduled at this point, we were + // waiting for a catchup. With an unresponsive + // peer however, it's a protocol violation. + f.dropPeer(peer) + } + }(peer, hashes) } // Schedule the next fetch if blocks are still pending f.rescheduleComplete(completeTimer) diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index 6553df3c2..0db5fbb31 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -30,6 +30,7 @@ import ( "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/crypto" + "github.com/scroll-tech/go-ethereum/eth/protocols/eth" "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/trie" ) @@ -60,8 +61,8 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common block.AddTx(tx) } // If the block number is a multiple of 5, add a bonus uncle to the block - if i%5 == 0 { - block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))}) + if i > 0 && i%5 == 0 { + block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i - 1))}) } }) hashes := make([]common.Hash, n+1) @@ -195,16 +196,26 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t closure[hash] = block } // Create a function that return a header from the closure - return func(hash common.Hash) error { + return func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) { // Gather the blocks to return headers := make([]*types.Header, 0, 1) if block, ok := closure[hash]; ok { headers = append(headers, block.Header()) } // Return on a new thread - go f.fetcher.FilterHeaders(peer, headers, time.Now().Add(drift)) - - return nil + req := ð.Request{ + Peer: peer, + } + res := ð.Response{ + Req: req, + Res: (*eth.BlockHeadersPacket)(&headers), + Time: drift, + Done: make(chan error, 1), // Ignore the returned status + } + go func() { + sink <- res + }() + return req, nil } } @@ -215,7 +226,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ closure[hash] = block } // Create a function that returns blocks from the closure - return func(hashes []common.Hash) error { + return func(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { // Gather the block bodies to return transactions := make([][]*types.Transaction, 0, len(hashes)) uncles := make([][]*types.Header, 0, len(hashes)) @@ -227,14 +238,33 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ } } // Return on a new thread - go f.fetcher.FilterBodies(peer, transactions, uncles, time.Now().Add(drift)) - - return nil + bodies := make([]*eth.BlockBody, len(transactions)) + for i, txs := range transactions { + bodies[i] = ð.BlockBody{ + Transactions: txs, + Uncles: uncles[i], + } + } + req := ð.Request{ + Peer: peer, + } + res := ð.Response{ + Req: req, + Res: (*eth.BlockBodiesPacket)(&bodies), + Time: drift, + Done: make(chan error, 1), // Ignore the returned status + } + go func() { + sink <- res + }() + return req, nil } } // verifyFetchingEvent verifies that one single event arrive on a fetching channel. func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) { + t.Helper() + if arrive { select { case <-fetching: @@ -252,6 +282,8 @@ func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) // verifyCompletingEvent verifies that one single event arrive on an completing channel. func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) { + t.Helper() + if arrive { select { case <-completing: @@ -269,6 +301,8 @@ func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive b // verifyImportEvent verifies that one single event arrive on an import channel. func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) { + t.Helper() + if arrive { select { case <-imported: @@ -287,6 +321,8 @@ func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) { // verifyImportCount verifies that exactly count number of events arrive on an // import hook channel. func verifyImportCount(t *testing.T, imported chan interface{}, count int) { + t.Helper() + for i := 0; i < count; i++ { select { case <-imported: @@ -299,6 +335,8 @@ func verifyImportCount(t *testing.T, imported chan interface{}, count int) { // verifyImportDone verifies that no more events are arriving on an import channel. func verifyImportDone(t *testing.T, imported chan interface{}) { + t.Helper() + select { case <-imported: t.Fatalf("extra block imported") @@ -308,6 +346,8 @@ func verifyImportDone(t *testing.T, imported chan interface{}) { // verifyChainHeight verifies the chain height is as expected. func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) { + t.Helper() + if fetcher.chainHeight() != height { t.Fatalf("chain height mismatch, got %d, want %d", fetcher.chainHeight(), height) } @@ -368,13 +408,13 @@ func testConcurrentAnnouncements(t *testing.T, light bool) { secondBodyFetcher := tester.makeBodyFetcher("second", blocks, 0) counter := uint32(0) - firstHeaderWrapper := func(hash common.Hash) error { + firstHeaderWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) { atomic.AddUint32(&counter, 1) - return firstHeaderFetcher(hash) + return firstHeaderFetcher(hash, sink) } - secondHeaderWrapper := func(hash common.Hash) error { + secondHeaderWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) { atomic.AddUint32(&counter, 1) - return secondHeaderFetcher(hash) + return secondHeaderFetcher(hash, sink) } // Iteratively announce blocks until all are imported imported := make(chan interface{}) @@ -468,15 +508,20 @@ func testPendingDeduplication(t *testing.T, light bool) { delay := 50 * time.Millisecond counter := uint32(0) - headerWrapper := func(hash common.Hash) error { + headerWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) { atomic.AddUint32(&counter, 1) // Simulate a long running fetch - go func() { - time.Sleep(delay) - headerFetcher(hash) - }() - return nil + resink := make(chan *eth.Response) + req, err := headerFetcher(hash, resink) + if err == nil { + go func() { + res := <-resink + time.Sleep(delay) + sink <- res + }() + } + return req, err } checkNonExist := func() bool { return tester.getBlock(hashes[0]) == nil diff --git a/eth/handler.go b/eth/handler.go index 496cb034e..898f1795a 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -37,7 +37,6 @@ import ( "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/p2p" "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/trie" ) const ( @@ -80,8 +79,8 @@ type handlerConfig struct { Chain *core.BlockChain // Blockchain to serve data from TxPool txPool // Transaction pool to propagate from Network uint64 // Network identifier to adfvertise - Sync downloader.SyncMode // Whether to fast or full sync - BloomCache uint64 // Megabytes to alloc for fast sync bloom + Sync downloader.SyncMode // Whether to snap or full sync + BloomCache uint64 // Megabytes to alloc for snap sync bloom EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges Whitelist map[uint64]common.Hash // Hard coded whitelist for sync challenged @@ -91,8 +90,7 @@ type handler struct { networkID uint64 forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node - fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks) - snapSync uint32 // Flag whether fast sync should operate on top of the snap protocol + snapSync uint32 // Flag whether snap sync is enabled (gets disabled if we already have blocks) acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing) checkpointNumber uint64 // Block number for the sync progress validator to cross reference @@ -104,7 +102,6 @@ type handler struct { maxPeers int downloader *downloader.Downloader - stateBloom *trie.SyncBloom blockFetcher *fetcher.BlockFetcher txFetcher *fetcher.TxFetcher peers *peerSet @@ -142,29 +139,26 @@ func newHandler(config *handlerConfig) (*handler, error) { quitSync: make(chan struct{}), } if config.Sync == downloader.FullSync { - // The database seems empty as the current block is the genesis. Yet the fast - // block is ahead, so fast sync was enabled for this node at a certain point. + // The database seems empty as the current block is the genesis. Yet the snap + // block is ahead, so snap sync was enabled for this node at a certain point. // The scenarios where this can happen is - // * if the user manually (or via a bad block) rolled back a fast sync node + // * if the user manually (or via a bad block) rolled back a snap sync node // below the sync point. - // * the last fast sync is not finished while user specifies a full sync this + // * the last snap sync is not finished while user specifies a full sync this // time. But we don't have any recent state for full sync. - // In these cases however it's safe to reenable fast sync. + // In these cases however it's safe to reenable snap sync. fullBlock, fastBlock := h.chain.CurrentBlock(), h.chain.CurrentFastBlock() if fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 { - h.fastSync = uint32(1) - log.Warn("Switch sync mode from full sync to fast sync") + h.snapSync = uint32(1) + log.Warn("Switch sync mode from full sync to snap sync") } } else { if h.chain.CurrentBlock().NumberU64() > 0 { - // Print warning log if database is not empty to run fast sync. - log.Warn("Switch sync mode from fast sync to full sync") + // Print warning log if database is not empty to run snap sync. + log.Warn("Switch sync mode from snap sync to full sync") } else { - // If fast sync was requested and our database is empty, grant it - h.fastSync = uint32(1) - if config.Sync == downloader.SnapSync { - h.snapSync = uint32(1) - } + // If snap sync was requested and our database is empty, grant it + h.snapSync = uint32(1) } } // If we have trusted checkpoints, enforce them on the chain @@ -172,17 +166,30 @@ func newHandler(config *handlerConfig) (*handler, error) { h.checkpointNumber = (config.Checkpoint.SectionIndex+1)*params.CHTFrequency - 1 h.checkpointHash = config.Checkpoint.SectionHead } - // Construct the downloader (long sync) and its backing state bloom if fast + // If sync succeeds, pass a callback to potentially disable snap sync mode + // and enable transaction propagation. + success := func() { + // If we were running snap sync and it finished, disable doing another + // round on next sync cycle + if atomic.LoadUint32(&h.snapSync) == 1 { + log.Info("Snap sync complete, auto disabling") + atomic.StoreUint32(&h.snapSync, 0) + } + // If we've successfully finished a sync cycle and passed any required + // checkpoint, enable accepting transactions from the network + head := h.chain.CurrentBlock() + if head.NumberU64() >= h.checkpointNumber { + // Checkpoint passed, sanity check the timestamp to have a fallback mechanism + // for non-checkpointed (number = 0) private networks. + if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) { + atomic.StoreUint32(&h.acceptTxs, 1) + } + } + } + // Construct the downloader (long sync) and its backing state bloom if snap // sync is requested. The downloader is responsible for deallocating the state // bloom when it's done. - // Note: we don't enable it if snap-sync is performed, since it's very heavy - // and the heal-portion of the snap sync is much lighter than fast. What we particularly - // want to avoid, is a 90%-finished (but restarted) snap-sync to begin - // indexing the entire trie - if atomic.LoadUint32(&h.fastSync) == 1 && atomic.LoadUint32(&h.snapSync) == 0 { - h.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database) - } - h.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer) + h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, h.chain, nil, h.removePeer, success) // Construct the fetcher (short sync) validator := func(header *types.Header) error { @@ -202,12 +209,12 @@ func newHandler(config *handlerConfig) (*handler, error) { log.Warn("Unsynced yet, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) return 0, nil } - // If fast sync is running, deny importing weird blocks. This is a problematic - // clause when starting up a new network, because fast-syncing miners might not + // If snap sync is running, deny importing weird blocks. This is a problematic + // clause when starting up a new network, because snap-syncing miners might not // accept each others' blocks until a restart. Unfortunately we haven't figured // out a way yet where nodes can decide unilaterally whether the network is new // or not. This should be fixed if we figure out a solution. - if atomic.LoadUint32(&h.fastSync) == 1 { + if atomic.LoadUint32(&h.snapSync) == 1 { log.Warn("Fast syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) return 0, nil } @@ -308,30 +315,103 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { // after this will be sent via broadcasts. h.syncTransactions(peer) + // Create a notification channel for pending requests if the peer goes down + dead := make(chan struct{}) + defer close(dead) + // If we have a trusted CHT, reject all peers below that (avoid fast sync eclipse) if h.checkpointHash != (common.Hash{}) { // Request the peer's checkpoint header for chain height/weight validation - if err := peer.RequestHeadersByNumber(h.checkpointNumber, 1, 0, false); err != nil { + resCh := make(chan *eth.Response) + + req, err := peer.RequestHeadersByNumber(h.checkpointNumber, 1, 0, false, resCh) + if err != nil { return err } // Start a timer to disconnect if the peer doesn't reply in time - p.syncDrop = time.AfterFunc(syncChallengeTimeout, func() { - peer.Log().Warn("Checkpoint challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name()) - h.removePeer(peer.ID()) - }) - // Make sure it's cleaned up if the peer dies off - defer func() { - if p.syncDrop != nil { - p.syncDrop.Stop() - p.syncDrop = nil + go func() { + // Ensure the request gets cancelled in case of error/drop + defer req.Close() + + timeout := time.NewTimer(syncChallengeTimeout) + defer timeout.Stop() + + select { + case res := <-resCh: + headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) + if len(headers) == 0 { + // If we're doing a snap sync, we must enforce the checkpoint + // block to avoid eclipse attacks. Unsynced nodes are welcome + // to connect after we're done joining the network. + if atomic.LoadUint32(&h.snapSync) == 1 { + peer.Log().Warn("Dropping unsynced node during sync", "addr", peer.RemoteAddr(), "type", peer.Name()) + res.Done <- errors.New("unsynced node cannot serve sync") + return + } + res.Done <- nil + return + } + // Validate the header and either drop the peer or continue + if len(headers) > 1 { + res.Done <- errors.New("too many headers in checkpoint response") + return + } + if headers[0].Hash() != h.checkpointHash { + res.Done <- errors.New("checkpoint hash mismatch") + return + } + res.Done <- nil + + case <-timeout.C: + peer.Log().Warn("Checkpoint challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name()) + h.removePeer(peer.ID()) + + case <-dead: + // Peer handler terminated, abort all goroutines } }() } // If we have any explicit whitelist block hashes, request them - for number := range h.whitelist { - if err := peer.RequestHeadersByNumber(number, 1, 0, false); err != nil { + for number, hash := range h.whitelist { + resCh := make(chan *eth.Response) + + req, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh) + if err != nil { return err } + go func(number uint64, hash common.Hash, req *eth.Request) { + // Ensure the request gets cancelled in case of error/drop + defer req.Close() + + timeout := time.NewTimer(syncChallengeTimeout) + defer timeout.Stop() + + select { + case res := <-resCh: + headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) + if len(headers) == 0 { + // Whitelisted blocks are allowed to be missing if the remote + // node is not yet synced + res.Done <- nil + return + } + // Validate the header and either drop the peer or continue + if len(headers) > 1 { + res.Done <- errors.New("too many headers in whitelist response") + return + } + if headers[0].Number.Uint64() != number || headers[0].Hash() != hash { + peer.Log().Info("Whitelist mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash) + res.Done <- errors.New("whitelist block mismatch") + return + } + peer.Log().Debug("Whitelist block verified", "number", number, "hash", hash) + + case <-timeout.C: + peer.Log().Warn("Whitelist challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name()) + h.removePeer(peer.ID()) + } + }(number, hash, req) } // Handle incoming messages until the connection is torn down return handler(peer) diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 1a3aff8aa..66cd7740d 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -17,7 +17,6 @@ package eth import ( - "errors" "fmt" "math/big" "sync/atomic" @@ -27,18 +26,15 @@ import ( "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/eth/protocols/eth" - "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/p2p/enode" - "github.com/scroll-tech/go-ethereum/trie" ) // ethHandler implements the eth.Backend interface to handle the various network // packets that are sent as replies or broadcasts. type ethHandler handler -func (h *ethHandler) Chain() *core.BlockChain { return h.chain } -func (h *ethHandler) StateBloom() *trie.SyncBloom { return h.stateBloom } -func (h *ethHandler) TxPool() eth.TxPool { return h.txpool } +func (h *ethHandler) Chain() *core.BlockChain { return h.chain } +func (h *ethHandler) TxPool() eth.TxPool { return h.txpool } // RunPeer is invoked when a peer joins on the `eth` protocol. func (h *ethHandler) RunPeer(peer *eth.Peer, hand eth.Handler) error { @@ -64,25 +60,6 @@ func (h *ethHandler) AcceptTxs() bool { func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { // Consume any broadcasts and announces, forwarding the rest to the downloader switch packet := packet.(type) { - case *eth.BlockHeadersPacket: - return h.handleHeaders(peer, *packet) - - case *eth.BlockBodiesPacket: - txset, uncleset := packet.Unpack() - return h.handleBodies(peer, txset, uncleset) - - case *eth.NodeDataPacket: - if err := h.downloader.DeliverNodeData(peer.ID(), *packet); err != nil { - log.Debug("Failed to deliver node state data", "err", err) - } - return nil - - case *eth.ReceiptsPacket: - if err := h.downloader.DeliverReceipts(peer.ID(), *packet); err != nil { - log.Debug("Failed to deliver receipts", "err", err) - } - return nil - case *eth.NewBlockHashesPacket: hashes, numbers := packet.Unpack() return h.handleBlockAnnounces(peer, hashes, numbers) @@ -104,79 +81,6 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { } } -// handleHeaders is invoked from a peer's message handler when it transmits a batch -// of headers for the local node to process. -func (h *ethHandler) handleHeaders(peer *eth.Peer, headers []*types.Header) error { - p := h.peers.peer(peer.ID()) - if p == nil { - return errors.New("unregistered during callback") - } - // If no headers were received, but we're expencting a checkpoint header, consider it that - if len(headers) == 0 && p.syncDrop != nil { - // Stop the timer either way, decide later to drop or not - p.syncDrop.Stop() - p.syncDrop = nil - - // If we're doing a fast (or snap) sync, we must enforce the checkpoint block to avoid - // eclipse attacks. Unsynced nodes are welcome to connect after we're done - // joining the network - if atomic.LoadUint32(&h.fastSync) == 1 { - peer.Log().Warn("Dropping unsynced node during sync", "addr", peer.RemoteAddr(), "type", peer.Name()) - return errors.New("unsynced node cannot serve sync") - } - } - // Filter out any explicitly requested headers, deliver the rest to the downloader - filter := len(headers) == 1 - if filter { - // If it's a potential sync progress check, validate the content and advertised chain weight - if p.syncDrop != nil && headers[0].Number.Uint64() == h.checkpointNumber { - // Disable the sync drop timer - p.syncDrop.Stop() - p.syncDrop = nil - - // Validate the header and either drop the peer or continue - if headers[0].Hash() != h.checkpointHash { - return errors.New("checkpoint hash mismatch") - } - return nil - } - // Otherwise if it's a whitelisted block, validate against the set - if want, ok := h.whitelist[headers[0].Number.Uint64()]; ok { - if hash := headers[0].Hash(); want != hash { - peer.Log().Info("Whitelist mismatch, dropping peer", "number", headers[0].Number.Uint64(), "hash", hash, "want", want) - return errors.New("whitelist block mismatch") - } - peer.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want) - } - // Irrelevant of the fork checks, send the header to the fetcher just in case - headers = h.blockFetcher.FilterHeaders(peer.ID(), headers, time.Now()) - } - if len(headers) > 0 || !filter { - err := h.downloader.DeliverHeaders(peer.ID(), headers) - if err != nil { - log.Debug("Failed to deliver headers", "err", err) - } - } - return nil -} - -// handleBodies is invoked from a peer's message handler when it transmits a batch -// of block bodies for the local node to process. -func (h *ethHandler) handleBodies(peer *eth.Peer, txs [][]*types.Transaction, uncles [][]*types.Header) error { - // Filter out any explicitly requested bodies, deliver the rest to the downloader - filter := len(txs) > 0 || len(uncles) > 0 - if filter { - txs, uncles = h.blockFetcher.FilterBodies(peer.ID(), txs, uncles, time.Now()) - } - if len(txs) > 0 || len(uncles) > 0 || !filter { - err := h.downloader.DeliverBodies(peer.ID(), txs, uncles) - if err != nil { - log.Debug("Failed to deliver bodies", "err", err) - } - } - return nil -} - // handleBlockAnnounces is invoked from a peer's message handler when it transmits a // batch of block announcements for the local node to process. func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash, numbers []uint64) error { diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index d24705cec..d5410fa84 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -37,7 +37,6 @@ import ( "github.com/scroll-tech/go-ethereum/p2p" "github.com/scroll-tech/go-ethereum/p2p/enode" "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/trie" ) // testEthHandler is a mock event handler to listen for inbound network requests @@ -49,7 +48,6 @@ type testEthHandler struct { } func (h *testEthHandler) Chain() *core.BlockChain { panic("no backing chain") } -func (h *testEthHandler) StateBloom() *trie.SyncBloom { panic("no backing state bloom") } func (h *testEthHandler) TxPool() eth.TxPool { panic("no backing tx pool") } func (h *testEthHandler) AcceptTxs() bool { return true } func (h *testEthHandler) RunPeer(*eth.Peer, eth.Handler) error { panic("not used in tests") } @@ -351,7 +349,7 @@ func testSendTransactions(t *testing.T, protocol uint) { seen := make(map[common.Hash]struct{}) for len(seen) < len(insert) { switch protocol { - case 65, 66: + case 66: select { case hashes := <-anns: for _, hash := range hashes { @@ -361,7 +359,7 @@ func testSendTransactions(t *testing.T, protocol uint) { seen[hash] = struct{}{} } case <-bcasts: - t.Errorf("initial tx broadcast received on post eth/65") + t.Errorf("initial tx broadcast received on post eth/66") } default: @@ -386,6 +384,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { // to receive them. We need multiple sinks since a one-to-one peering would // broadcast all transactions without announcement. source := newTestHandler() + source.handler.snapSync = 0 // Avoid requiring snap, otherwise some will be dropped below defer source.close() sinks := make([]*testHandler, 10) @@ -403,7 +402,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { defer sourcePipe.Close() defer sinkPipe.Close() - sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, source.txpool) + sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i + 1)}, "", nil, sourcePipe), sourcePipe, source.txpool) sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool) defer sourcePeer.Close() defer sinkPeer.Close() @@ -435,12 +434,13 @@ func testTransactionPropagation(t *testing.T, protocol uint) { // Iterate through all the sinks and ensure they all got the transactions for i := range sinks { - for arrived := 0; arrived < len(txs); { + for arrived, timeout := 0, false; arrived < len(txs) && !timeout; { select { case event := <-txChs[i]: arrived += len(event.Txs) - case <-time.NewTimer(time.Second).C: + case <-time.After(time.Second): t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs)) + timeout = true } } } @@ -460,23 +460,23 @@ func TestCheckpointChallenge(t *testing.T) { }{ // If checkpointing is not enabled locally, don't challenge and don't drop {downloader.FullSync, false, false, false, false, false}, - {downloader.FastSync, false, false, false, false, false}, + {downloader.SnapSync, false, false, false, false, false}, // If checkpointing is enabled locally and remote response is empty, only drop during fast sync {downloader.FullSync, true, false, true, false, false}, - {downloader.FastSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer + {downloader.SnapSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer // If checkpointing is enabled locally and remote response mismatches, always drop {downloader.FullSync, true, false, false, false, true}, - {downloader.FastSync, true, false, false, false, true}, + {downloader.SnapSync, true, false, false, false, true}, // If checkpointing is enabled locally and remote response matches, never drop {downloader.FullSync, true, false, false, true, false}, - {downloader.FastSync, true, false, false, true, false}, + {downloader.SnapSync, true, false, false, true, false}, // If checkpointing is enabled locally and remote times out, always drop {downloader.FullSync, true, true, false, true, true}, - {downloader.FastSync, true, true, false, true, true}, + {downloader.SnapSync, true, true, false, true, true}, } for _, tt := range tests { t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) { @@ -497,10 +497,10 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo handler := newTestHandler() defer handler.close() - if syncmode == downloader.FastSync { - atomic.StoreUint32(&handler.handler.fastSync, 1) + if syncmode == downloader.SnapSync { + atomic.StoreUint32(&handler.handler.snapSync, 1) } else { - atomic.StoreUint32(&handler.handler.fastSync, 0) + atomic.StoreUint32(&handler.handler.snapSync, 0) } var response *types.Header if checkpoint { diff --git a/eth/handler_test.go b/eth/handler_test.go index b9931a4b3..7ae12845c 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -150,7 +150,7 @@ func newTestHandlerWithBlocks(blocks int) *testHandler { Chain: chain, TxPool: txpool, Network: 1, - Sync: downloader.FastSync, + Sync: downloader.SnapSync, BloomCache: 1, }) handler.Start(1000) diff --git a/eth/peer.go b/eth/peer.go index 538d4cfcf..2675fc8cf 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -18,8 +18,6 @@ package eth import ( "math/big" - "sync" - "time" "github.com/scroll-tech/go-ethereum/eth/protocols/eth" "github.com/scroll-tech/go-ethereum/eth/protocols/snap" @@ -36,11 +34,8 @@ type ethPeerInfo struct { // ethPeer is a wrapper around eth.Peer to maintain a few extra metadata. type ethPeer struct { *eth.Peer - snapExt *snapPeer // Satellite `snap` connection - - syncDrop *time.Timer // Connection dropper if `eth` sync progress isn't validated in time + snapExt *snapPeer // Satellite `snap` connection snapWait chan struct{} // Notification channel for snap connections - lock sync.RWMutex // Mutex protecting the internal fields } // info gathers and returns some `eth` protocol metadata known about a peer. diff --git a/eth/peerset.go b/eth/peerset.go index fae769c3a..6f6b64d4f 100644 --- a/eth/peerset.go +++ b/eth/peerset.go @@ -230,7 +230,7 @@ func (ps *peerSet) snapLen() int { } // peerWithHighestTD retrieves the known peer with the currently highest total -// difficulty. +// difficulty, but below the given PoS switchover threshold. func (ps *peerSet) peerWithHighestTD() *eth.Peer { ps.lock.RLock() defer ps.lock.RUnlock() diff --git a/eth/protocols/eth/dispatcher.go b/eth/protocols/eth/dispatcher.go new file mode 100644 index 000000000..4de309412 --- /dev/null +++ b/eth/protocols/eth/dispatcher.go @@ -0,0 +1,253 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "errors" + "fmt" + "time" + + "github.com/scroll-tech/go-ethereum/p2p" +) + +var ( + // errDisconnected is returned if a request is attempted to be made to a peer + // that was already closed. + errDisconnected = errors.New("disconnected") + + // errDanglingResponse is returned if a response arrives with a request id + // which does not match to any existing pending requests. + errDanglingResponse = errors.New("response to non-existent request") + + // errMismatchingResponseType is returned if the remote peer sent a different + // packet type as a response to a request than what the local node expected. + errMismatchingResponseType = errors.New("mismatching response type") +) + +// Request is a pending request to allow tracking it and delivering a response +// back to the requester on their chosen channel. +type Request struct { + peer *Peer // Peer to which this request belogs for untracking + id uint64 // Request ID to match up replies to + + sink chan *Response // Channel to deliver the response on + cancel chan struct{} // Channel to cancel requests ahead of time + + code uint64 // Message code of the request packet + want uint64 // Message code of the response packet + data interface{} // Data content of the request packet + + Peer string // Demultiplexer if cross-peer requests are batched together + Sent time.Time // Timestamp when the request was sent +} + +// Close aborts an in-flight request. Although there's no way to notify the +// remote peer about the cancellation, this method notifies the dispatcher to +// discard any late responses. +func (r *Request) Close() error { + if r.peer == nil { // Tests mock out the dispatcher, skip internal cancellation + return nil + } + cancelOp := &cancel{ + id: r.id, + fail: make(chan error), + } + select { + case r.peer.reqCancel <- cancelOp: + if err := <-cancelOp.fail; err != nil { + return err + } + close(r.cancel) + return nil + case <-r.peer.term: + return errDisconnected + } +} + +// request is a wrapper around a client Request that has an error channel to +// signal on if sending the request already failed on a network level. +type request struct { + req *Request + fail chan error +} + +// cancel is a maintenance type on the dispatcher to stop tracking a pending +// request. +type cancel struct { + id uint64 // Request ID to stop tracking + fail chan error +} + +// Response is a reply packet to a previously created request. It is delivered +// on the channel assigned by the requester subsystem and contains the original +// request embedded to allow uniquely matching it caller side. +type Response struct { + id uint64 // Request ID to match up this reply to + recv time.Time // Timestamp when the request was received + code uint64 // Response packet type to cross validate with request + + Req *Request // Original request to cross-reference with + Res interface{} // Remote response for the request query + Meta interface{} // Metadata generated locally on the receiver thread + Time time.Duration // Time it took for the request to be served + Done chan error // Channel to signal message handling to the reader +} + +// response is a wrapper around a remote Response that has an error channel to +// signal on if processing the response failed. +type response struct { + res *Response + fail chan error +} + +// dispatchRequest schedules the request to the dispatcher for tracking and +// network serialization, blocking until it's successfully sent. +// +// The returned Request must either be closed before discarding it, or the reply +// must be waited for and the Response's Done channel signalled. +func (p *Peer) dispatchRequest(req *Request) error { + reqOp := &request{ + req: req, + fail: make(chan error), + } + req.cancel = make(chan struct{}) + req.peer = p + req.Peer = p.id + + select { + case p.reqDispatch <- reqOp: + return <-reqOp.fail + case <-p.term: + return errDisconnected + } +} + +// dispatchRequest fulfils a pending request and delivers it to the requested +// sink. +func (p *Peer) dispatchResponse(res *Response, metadata func() interface{}) error { + resOp := &response{ + res: res, + fail: make(chan error), + } + res.recv = time.Now() + res.Done = make(chan error) + + select { + case p.resDispatch <- resOp: + // Ensure the response is accepted by the dispatcher + if err := <-resOp.fail; err != nil { + return nil + } + // Request was accepted, run any postprocessing step to generate metadata + // on the receiver thread, not the sink thread + if metadata != nil { + res.Meta = metadata() + } + // Deliver the filled out response and wait until it's handled. This + // path is a bit funky as Go's select has no order, so if a response + // arrives to an already cancelled request, there's a 50-50% changes + // of picking on channel or the other. To avoid such cases delivering + // the packet upstream, check for cancellation first and only after + // block on delivery. + select { + case <-res.Req.cancel: + return nil // Request cancelled, silently discard response + default: + // Request not yet cancelled, attempt to deliver it, but do watch + // for fresh cancellations too + select { + case res.Req.sink <- res: + return <-res.Done // Response delivered, return any errors + case <-res.Req.cancel: + return nil // Request cancelled, silently discard response + } + } + + case <-p.term: + return errDisconnected + } +} + +// dispatcher is a loop that accepts requests from higher layer packages, pushes +// it to the network and tracks and dispatches the responses back to the original +// requester. +func (p *Peer) dispatcher() { + pending := make(map[uint64]*Request) + + for { + select { + case reqOp := <-p.reqDispatch: + req := reqOp.req + req.Sent = time.Now() + + requestTracker.Track(p.id, p.version, req.code, req.want, req.id) + err := p2p.Send(p.rw, req.code, req.data) + reqOp.fail <- err + + if err == nil { + pending[req.id] = req + } + + case cancelOp := <-p.reqCancel: + // Retrieve the pendign request to cancel and short circuit if it + // has already been serviced and is not available anymore + req := pending[cancelOp.id] + if req == nil { + cancelOp.fail <- nil + continue + } + // Stop tracking the request + delete(pending, cancelOp.id) + cancelOp.fail <- nil + + case resOp := <-p.resDispatch: + res := resOp.res + res.Req = pending[res.id] + + // Independent if the request exists or not, track this packet + requestTracker.Fulfil(p.id, p.version, res.code, res.id) + + switch { + case res.Req == nil: + // Response arrived with an untracked ID. Since even cancelled + // requests are tracked until fulfilment, a dangling repsponse + // means the remote peer implements the protocol badly. + resOp.fail <- errDanglingResponse + + case res.Req.want != res.code: + // Response arrived, but it's a different packet type than the + // one expected by the requester. Either the local code is bad, + // or the remote peer send junk. In neither cases can we handle + // the packet. + resOp.fail <- fmt.Errorf("%w: have %d, want %d", errMismatchingResponseType, res.code, res.Req.want) + + default: + // All dispatcher checks passed and the response was initialized + // with the matching request. Signal to the delivery routine that + // it can wait for a handler response and dispatch the data. + res.Time = res.recv.Sub(res.Req.Sent) + resOp.fail <- nil + + // Stop tracking the request, the response dispatcher will deliver + delete(pending, res.id) + } + + case <-p.term: + return + } + } +} diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 427e63a13..3c335cc3e 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -29,7 +29,6 @@ import ( "github.com/scroll-tech/go-ethereum/p2p/enode" "github.com/scroll-tech/go-ethereum/p2p/enr" "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/trie" ) const ( @@ -69,9 +68,6 @@ type Backend interface { // Chain retrieves the blockchain object to serve data. Chain() *core.BlockChain - // StateBloom retrieves the bloom filter - if any - for state trie nodes. - StateBloom() *trie.SyncBloom - // TxPool retrieves the transaction pool object to serve data. TxPool() TxPool diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index ec5a5e764..1f2035982 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -34,7 +34,6 @@ import ( "github.com/scroll-tech/go-ethereum/p2p" "github.com/scroll-tech/go-ethereum/p2p/enode" "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/trie" ) var ( @@ -91,9 +90,8 @@ func (b *testBackend) close() { b.chain.Stop() } -func (b *testBackend) Chain() *core.BlockChain { return b.chain } -func (b *testBackend) StateBloom() *trie.SyncBloom { return nil } -func (b *testBackend) TxPool() TxPool { return b.txpool } +func (b *testBackend) Chain() *core.BlockChain { return b.chain } +func (b *testBackend) TxPool() TxPool { return b.txpool } func (b *testBackend) RunPeer(peer *Peer, handler Handler) error { // Normally the backend would do peer mainentance and handshakes. All that diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index ba091f9ce..2d75c5e4f 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -21,6 +21,7 @@ import ( "fmt" "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/rlp" @@ -34,11 +35,13 @@ func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := answerGetBlockHeadersQuery(backend, query.GetBlockHeadersPacket, peer) + response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer) return peer.ReplyBlockHeaders(query.RequestId, response) } -func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, peer *Peer) []*types.Header { +// ServiceGetBlockHeadersQuery assembles the response to a header query. It is +// exposed to allow external packages to test protocol behavior. +func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []*types.Header { hashMode := query.Origin.Hash != (common.Hash{}) first := true maxNonCanonical := uint64(100) @@ -58,15 +61,15 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p if hashMode { if first { first = false - origin = backend.Chain().GetHeaderByHash(query.Origin.Hash) + origin = chain.GetHeaderByHash(query.Origin.Hash) if origin != nil { query.Origin.Number = origin.Number.Uint64() } } else { - origin = backend.Chain().GetHeader(query.Origin.Hash, query.Origin.Number) + origin = chain.GetHeader(query.Origin.Hash, query.Origin.Number) } } else { - origin = backend.Chain().GetHeaderByNumber(query.Origin.Number) + origin = chain.GetHeaderByNumber(query.Origin.Number) } if origin == nil { break @@ -82,7 +85,7 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p if ancestor == 0 { unknown = true } else { - query.Origin.Hash, query.Origin.Number = backend.Chain().GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical) + query.Origin.Hash, query.Origin.Number = chain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical) unknown = (query.Origin.Hash == common.Hash{}) } case hashMode && !query.Reverse: @@ -96,9 +99,9 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p peer.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos) unknown = true } else { - if header := backend.Chain().GetHeaderByNumber(next); header != nil { + if header := chain.GetHeaderByNumber(next); header != nil { nextHash := header.Hash() - expOldHash, _ := backend.Chain().GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical) + expOldHash, _ := chain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical) if expOldHash == query.Origin.Hash { query.Origin.Hash, query.Origin.Number = nextHash, next } else { @@ -130,11 +133,13 @@ func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := answerGetBlockBodiesQuery(backend, query.GetBlockBodiesPacket, peer) + response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket) return peer.ReplyBlockBodiesRLP(query.RequestId, response) } -func answerGetBlockBodiesQuery(backend Backend, query GetBlockBodiesPacket, peer *Peer) []rlp.RawValue { +// ServiceGetBlockBodiesQuery assembles the response to a body query. It is +// exposed to allow external packages to test protocol behavior. +func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue { // Gather blocks until the fetch or network limits is reached var ( bytes int @@ -145,7 +150,7 @@ func answerGetBlockBodiesQuery(backend Backend, query GetBlockBodiesPacket, peer lookups >= 2*maxBodiesServe { break } - if data := backend.Chain().GetBodyRLP(hash); len(data) != 0 { + if data := chain.GetBodyRLP(hash); len(data) != 0 { bodies = append(bodies, data) bytes += len(data) } @@ -159,11 +164,13 @@ func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := answerGetNodeDataQuery(backend, query.GetNodeDataPacket, peer) + response := ServiceGetNodeDataQuery(backend.Chain(), query.GetNodeDataPacket) return peer.ReplyNodeData(query.RequestId, response) } -func answerGetNodeDataQuery(backend Backend, query GetNodeDataPacket, peer *Peer) [][]byte { +// ServiceGetNodeDataQuery assembles the response to a node data query. It is +// exposed to allow external packages to test protocol behavior. +func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte { // Gather state data until the fetch or network limits is reached var ( bytes int @@ -175,14 +182,10 @@ func answerGetNodeDataQuery(backend Backend, query GetNodeDataPacket, peer *Peer break } // Retrieve the requested state entry - if bloom := backend.StateBloom(); bloom != nil && !bloom.Contains(hash[:]) { - // Only lookup the trie node if there's chance that we actually have it - continue - } - entry, err := backend.Chain().TrieNode(hash) + entry, err := chain.TrieNode(hash) if len(entry) == 0 || err != nil { // Read the contract code with prefix only to save unnecessary lookups. - entry, err = backend.Chain().ContractCodeWithPrefix(hash) + entry, err = chain.ContractCodeWithPrefix(hash) } if err == nil && len(entry) > 0 { nodes = append(nodes, entry) @@ -198,11 +201,13 @@ func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := answerGetReceiptsQuery(backend, query.GetReceiptsPacket, peer) + response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket) return peer.ReplyReceiptsRLP(query.RequestId, response) } -func answerGetReceiptsQuery(backend Backend, query GetReceiptsPacket, peer *Peer) []rlp.RawValue { +// ServiceGetReceiptsQuery assembles the response to a receipt query. It is +// exposed to allow external packages to test protocol behavior. +func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue { // Gather state data until the fetch or network limits is reached var ( bytes int @@ -214,9 +219,9 @@ func answerGetReceiptsQuery(backend Backend, query GetReceiptsPacket, peer *Peer break } // Retrieve the requested block's receipts - results := backend.Chain().GetReceiptsByHash(hash) + results := chain.GetReceiptsByHash(hash) if results == nil { - if header := backend.Chain().GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { + if header := chain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { continue } } @@ -277,9 +282,18 @@ func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - requestTracker.Fulfil(peer.id, peer.version, BlockHeadersMsg, res.RequestId) - - return backend.Handle(peer, &res.BlockHeadersPacket) + metadata := func() interface{} { + hashes := make([]common.Hash, len(res.BlockHeadersPacket)) + for i, header := range res.BlockHeadersPacket { + hashes[i] = header.Hash() + } + return hashes + } + return peer.dispatchResponse(&Response{ + id: res.RequestId, + code: BlockHeadersMsg, + Res: &res.BlockHeadersPacket, + }, metadata) } func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { @@ -288,9 +302,23 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - requestTracker.Fulfil(peer.id, peer.version, BlockBodiesMsg, res.RequestId) - - return backend.Handle(peer, &res.BlockBodiesPacket) + metadata := func() interface{} { + var ( + txsHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + uncleHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + ) + hasher := trie.NewStackTrie(nil) + for i, body := range res.BlockBodiesPacket { + txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) + uncleHashes[i] = types.CalcUncleHash(body.Uncles) + } + return [][]common.Hash{txsHashes, uncleHashes} + } + return peer.dispatchResponse(&Response{ + id: res.RequestId, + code: BlockBodiesMsg, + Res: &res.BlockBodiesPacket, + }, metadata) } func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error { @@ -299,9 +327,11 @@ func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - requestTracker.Fulfil(peer.id, peer.version, NodeDataMsg, res.RequestId) - - return backend.Handle(peer, &res.NodeDataPacket) + return peer.dispatchResponse(&Response{ + id: res.RequestId, + code: NodeDataMsg, + Res: &res.NodeDataPacket, + }, nil) // No post-processing, we're not using this packet anymore } func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { @@ -310,9 +340,19 @@ func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - requestTracker.Fulfil(peer.id, peer.version, ReceiptsMsg, res.RequestId) - - return backend.Handle(peer, &res.ReceiptsPacket) + metadata := func() interface{} { + hasher := trie.NewStackTrie(nil) + hashes := make([]common.Hash, len(res.ReceiptsPacket)) + for i, receipt := range res.ReceiptsPacket { + hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) + } + return hashes + } + return peer.dispatchResponse(&Response{ + id: res.RequestId, + code: ReceiptsMsg, + Res: &res.ReceiptsPacket, + }, metadata) } func handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer) error { diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index 20f959a8e..618115c51 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -85,6 +85,10 @@ type Peer struct { txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests + reqDispatch chan *request // Dispatch channel to send requests and track then until fulfilment + reqCancel chan *cancel // Dispatch channel to cancel pending requests and untrack them + resDispatch chan *response // Dispatch channel to fulfil pending requests and untrack them + term chan struct{} // Termination channel to stop the broadcasters lock sync.RWMutex // Mutex protecting the internal fields } @@ -103,6 +107,9 @@ func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Pe queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns), txBroadcast: make(chan []common.Hash), txAnnounce: make(chan []common.Hash), + reqDispatch: make(chan *request), + reqCancel: make(chan *cancel), + resDispatch: make(chan *response), txpool: txpool, term: make(chan struct{}), } @@ -110,6 +117,7 @@ func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Pe go peer.broadcastBlocks() go peer.broadcastTransactions() go peer.announceTransactions() + go peer.dispatcher() return peer } @@ -324,94 +332,148 @@ func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error { // RequestOneHeader is a wrapper around the header query functions to fetch a // single header. It is used solely by the fetcher. -func (p *Peer) RequestOneHeader(hash common.Hash) error { +func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching single header", "hash", hash) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id) - return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ - Origin: HashOrNumber{Hash: hash}, - Amount: uint64(1), - Skip: uint64(0), - Reverse: false, + req := &Request{ + id: id, + sink: sink, + code: GetBlockHeadersMsg, + want: BlockHeadersMsg, + data: &GetBlockHeadersPacket66{ + RequestId: id, + GetBlockHeadersPacket: &GetBlockHeadersPacket{ + Origin: HashOrNumber{Hash: hash}, + Amount: uint64(1), + Skip: uint64(0), + Reverse: false, + }, }, - }) + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestHeadersByHash fetches a batch of blocks' headers corresponding to the // specified header query, based on the hash of an origin block. -func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { +func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id) - return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ - Origin: HashOrNumber{Hash: origin}, - Amount: uint64(amount), - Skip: uint64(skip), - Reverse: reverse, + req := &Request{ + id: id, + sink: sink, + code: GetBlockHeadersMsg, + want: BlockHeadersMsg, + data: &GetBlockHeadersPacket66{ + RequestId: id, + GetBlockHeadersPacket: &GetBlockHeadersPacket{ + Origin: HashOrNumber{Hash: origin}, + Amount: uint64(amount), + Skip: uint64(skip), + Reverse: reverse, + }, }, - }) + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the // specified header query, based on the number of an origin block. -func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { +func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id) - return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ - Origin: HashOrNumber{Number: origin}, - Amount: uint64(amount), - Skip: uint64(skip), - Reverse: reverse, + req := &Request{ + id: id, + sink: sink, + code: GetBlockHeadersMsg, + want: BlockHeadersMsg, + data: &GetBlockHeadersPacket66{ + RequestId: id, + GetBlockHeadersPacket: &GetBlockHeadersPacket{ + Origin: HashOrNumber{Number: origin}, + Amount: uint64(amount), + Skip: uint64(skip), + Reverse: reverse, + }, }, - }) + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestBodies fetches a batch of blocks' bodies corresponding to the hashes // specified. -func (p *Peer) RequestBodies(hashes []common.Hash) error { +func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching batch of block bodies", "count", len(hashes)) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetBlockBodiesMsg, BlockBodiesMsg, id) - return p2p.Send(p.rw, GetBlockBodiesMsg, &GetBlockBodiesPacket66{ - RequestId: id, - GetBlockBodiesPacket: hashes, - }) + req := &Request{ + id: id, + sink: sink, + code: GetBlockBodiesMsg, + want: BlockBodiesMsg, + data: &GetBlockBodiesPacket66{ + RequestId: id, + GetBlockBodiesPacket: hashes, + }, + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestNodeData fetches a batch of arbitrary data from a node's known state // data, corresponding to the specified hashes. -func (p *Peer) RequestNodeData(hashes []common.Hash) error { +func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching batch of state data", "count", len(hashes)) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetNodeDataMsg, NodeDataMsg, id) - return p2p.Send(p.rw, GetNodeDataMsg, &GetNodeDataPacket66{ - RequestId: id, - GetNodeDataPacket: hashes, - }) + req := &Request{ + id: id, + sink: sink, + code: GetNodeDataMsg, + want: NodeDataMsg, + data: &GetNodeDataPacket66{ + RequestId: id, + GetNodeDataPacket: hashes, + }, + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestReceipts fetches a batch of transaction receipts from a remote node. -func (p *Peer) RequestReceipts(hashes []common.Hash) error { +func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching batch of receipts", "count", len(hashes)) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetReceiptsMsg, ReceiptsMsg, id) - return p2p.Send(p.rw, GetReceiptsMsg, &GetReceiptsPacket66{ - RequestId: id, - GetReceiptsPacket: hashes, - }) + req := &Request{ + id: id, + sink: sink, + code: GetReceiptsMsg, + want: ReceiptsMsg, + data: &GetReceiptsPacket66{ + RequestId: id, + GetReceiptsPacket: hashes, + }, + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestTxs fetches a batch of transactions from a remote node. diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index e04b6bd9d..59291f4e0 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -99,8 +99,8 @@ func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol { Version: version, Length: protocolLengths[version], Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { - return backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error { - return handle(backend, peer) + return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error { + return Handle(backend, peer) }) }, NodeInfo: func() interface{} { @@ -116,9 +116,9 @@ func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol { return protocols } -// handle is the callback invoked to manage the life cycle of a `snap` peer. +// Handle is the callback invoked to manage the life cycle of a `snap` peer. // When this function terminates, the peer is disconnected. -func handle(backend Backend, peer *Peer) error { +func Handle(backend Backend, peer *Peer) error { for { if err := handleMessage(backend, peer); err != nil { peer.Log().Debug("Message handling failed in `snap`", "err", err) @@ -161,60 +161,10 @@ func handleMessage(backend Backend, peer *Peer) error { if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - if req.Bytes > softResponseLimit { - req.Bytes = softResponseLimit - } - // Retrieve the requested state and bail out if non existent - tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) - if err != nil { - return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) - } - it, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin) - if err != nil { - return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) - } - // Iterate over the requested range and pile accounts up - var ( - accounts []*AccountData - size uint64 - last common.Hash - ) - for it.Next() && size < req.Bytes { - hash, account := it.Hash(), common.CopyBytes(it.Account()) - - // Track the returned interval for the Merkle proofs - last = hash + // Service the request, potentially returning nothing in case of errors + accounts, proofs := ServiceGetAccountRangeQuery(backend.Chain(), &req) - // Assemble the reply item - size += uint64(common.HashLength + len(account)) - accounts = append(accounts, &AccountData{ - Hash: hash, - Body: account, - }) - // If we've exceeded the request threshold, abort - if bytes.Compare(hash[:], req.Limit[:]) >= 0 { - break - } - } - it.Release() - - // Generate the Merkle proofs for the first and last account - proof := light.NewNodeSet() - if err := tr.Prove(req.Origin[:], 0, proof); err != nil { - log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) - return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) - } - if last != (common.Hash{}) { - if err := tr.Prove(last[:], 0, proof); err != nil { - log.Warn("Failed to prove account range", "last", last, "err", err) - return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) - } - } - var proofs [][]byte - for _, blob := range proof.NodeList() { - proofs = append(proofs, blob) - } - // Send back anything accumulated + // Send back anything accumulated (or empty in case of errors) return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ ID: req.ID, Accounts: accounts, @@ -243,111 +193,10 @@ func handleMessage(backend Backend, peer *Peer) error { if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - if req.Bytes > softResponseLimit { - req.Bytes = softResponseLimit - } - // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set? - // TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user - // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional) - - // Calculate the hard limit at which to abort, even if mid storage trie - hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack)) - - // Retrieve storage ranges until the packet limit is reached - var ( - slots [][]*StorageData - proofs [][]byte - size uint64 - ) - for _, account := range req.Accounts { - // If we've exceeded the requested data limit, abort without opening - // a new storage range (that we'd need to prove due to exceeded size) - if size >= req.Bytes { - break - } - // The first account might start from a different origin and end sooner - var origin common.Hash - if len(req.Origin) > 0 { - origin, req.Origin = common.BytesToHash(req.Origin), nil - } - var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") - if len(req.Limit) > 0 { - limit, req.Limit = common.BytesToHash(req.Limit), nil - } - // Retrieve the requested state and bail out if non existent - it, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin) - if err != nil { - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - // Iterate over the requested range and pile slots up - var ( - storage []*StorageData - last common.Hash - abort bool - ) - for it.Next() { - if size >= hardLimit { - abort = true - break - } - hash, slot := it.Hash(), common.CopyBytes(it.Slot()) - - // Track the returned interval for the Merkle proofs - last = hash + // Service the request, potentially returning nothing in case of errors + slots, proofs := ServiceGetStorageRangesQuery(backend.Chain(), &req) - // Assemble the reply item - size += uint64(common.HashLength + len(slot)) - storage = append(storage, &StorageData{ - Hash: hash, - Body: slot, - }) - // If we've exceeded the request threshold, abort - if bytes.Compare(hash[:], limit[:]) >= 0 { - break - } - } - slots = append(slots, storage) - it.Release() - - // Generate the Merkle proofs for the first and last storage slot, but - // only if the response was capped. If the entire storage trie included - // in the response, no need for any proofs. - if origin != (common.Hash{}) || abort { - // Request started at a non-zero hash or was capped prematurely, add - // the endpoint Merkle proofs - accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) - if err != nil { - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - var acc types.StateAccount - if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil { - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB()) - if err != nil { - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - proof := light.NewNodeSet() - if err := stTrie.Prove(origin[:], 0, proof); err != nil { - log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - if last != (common.Hash{}) { - if err := stTrie.Prove(last[:], 0, proof); err != nil { - log.Warn("Failed to prove storage range", "last", last, "err", err) - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - } - for _, blob := range proof.NodeList() { - proofs = append(proofs, blob) - } - // Proof terminates the reply as proofs are only added if a node - // refuses to serve more data (exception when a contract fetch is - // finishing, but that's that). - break - } - } - // Send back anything accumulated + // Send back anything accumulated (or empty in case of errors) return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ ID: req.ID, Slots: slots, @@ -378,31 +227,10 @@ func handleMessage(backend Backend, peer *Peer) error { if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - if req.Bytes > softResponseLimit { - req.Bytes = softResponseLimit - } - if len(req.Hashes) > maxCodeLookups { - req.Hashes = req.Hashes[:maxCodeLookups] - } - // Retrieve bytecodes until the packet size limit is reached - var ( - codes [][]byte - bytes uint64 - ) - for _, hash := range req.Hashes { - if hash == emptyKeccakCodeHash { - // Peers should not request the empty code, but if they do, at - // least sent them back a correct response without db lookups - codes = append(codes, []byte{}) - } else if blob, err := backend.Chain().ContractCode(hash); err == nil { - codes = append(codes, blob) - bytes += uint64(len(blob)) - } - if bytes > req.Bytes { - break - } - } - // Send back anything accumulated + // Service the request, potentially returning nothing in case of errors + codes := ServiceGetByteCodesQuery(backend.Chain(), &req) + + // Send back anything accumulated (or empty in case of errors) return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{ ID: req.ID, Codes: codes, @@ -424,80 +252,12 @@ func handleMessage(backend Backend, peer *Peer) error { if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - if req.Bytes > softResponseLimit { - req.Bytes = softResponseLimit - } - // Make sure we have the state associated with the request - triedb := backend.Chain().StateCache().TrieDB() - - accTrie, err := trie.NewSecure(req.Root, triedb) + // Service the request, potentially returning nothing in case of errors + nodes, err := ServiceGetTrieNodesQuery(backend.Chain(), &req, start) if err != nil { - // We don't have the requested state available, bail out - return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID}) - } - snap := backend.Chain().Snapshots().Snapshot(req.Root) - if snap == nil { - // We don't have the requested state snapshotted yet, bail out. - // In reality we could still serve using the account and storage - // tries only, but let's protect the node a bit while it's doing - // snapshot generation. - return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID}) - } - // Retrieve trie nodes until the packet size limit is reached - var ( - nodes [][]byte - bytes uint64 - loads int // Trie hash expansions to cound database reads - ) - for _, pathset := range req.Paths { - switch len(pathset) { - case 0: - // Ensure we penalize invalid requests - return fmt.Errorf("%w: zero-item pathset requested", errBadRequest) - - case 1: - // If we're only retrieving an account trie node, fetch it directly - blob, resolved, err := accTrie.TryGetNode(pathset[0]) - loads += resolved // always account database reads, even for failures - if err != nil { - break - } - nodes = append(nodes, blob) - bytes += uint64(len(blob)) - - default: - // Storage slots requested, open the storage trie and retrieve from there - account, err := snap.Account(common.BytesToHash(pathset[0])) - loads++ // always account database reads, even for failures - if err != nil || account == nil { - break - } - stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb) - loads++ // always account database reads, even for failures - if err != nil { - break - } - for _, path := range pathset[1:] { - blob, resolved, err := stTrie.TryGetNode(path) - loads += resolved // always account database reads, even for failures - if err != nil { - break - } - nodes = append(nodes, blob) - bytes += uint64(len(blob)) - - // Sanity check limits to avoid DoS on the store trie loads - if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { - break - } - } - } - // Abort request processing if we've exceeded our limits - if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { - break - } + return err } - // Send back anything accumulated + // Send back anything accumulated (or empty in case of errors) return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ ID: req.ID, Nodes: nodes, @@ -518,6 +278,285 @@ func handleMessage(backend Backend, peer *Peer) error { } } +// ServiceGetAccountRangeQuery assembles the response to an account range query. +// It is exposed to allow external packages to test protocol behavior. +func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePacket) ([]*AccountData, [][]byte) { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // Retrieve the requested state and bail out if non existent + tr, err := trie.New(req.Root, chain.StateCache().TrieDB()) + if err != nil { + return nil, nil + } + it, err := chain.Snapshots().AccountIterator(req.Root, req.Origin) + if err != nil { + return nil, nil + } + // Iterate over the requested range and pile accounts up + var ( + accounts []*AccountData + size uint64 + last common.Hash + ) + for it.Next() { + hash, account := it.Hash(), common.CopyBytes(it.Account()) + + // Track the returned interval for the Merkle proofs + last = hash + + // Assemble the reply item + size += uint64(common.HashLength + len(account)) + accounts = append(accounts, &AccountData{ + Hash: hash, + Body: account, + }) + // If we've exceeded the request threshold, abort + if bytes.Compare(hash[:], req.Limit[:]) >= 0 { + break + } + if size > req.Bytes { + break + } + } + it.Release() + + // Generate the Merkle proofs for the first and last account + proof := light.NewNodeSet() + if err := tr.Prove(req.Origin[:], 0, proof); err != nil { + log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) + return nil, nil + } + if last != (common.Hash{}) { + if err := tr.Prove(last[:], 0, proof); err != nil { + log.Warn("Failed to prove account range", "last", last, "err", err) + return nil, nil + } + } + var proofs [][]byte + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + return accounts, proofs +} + +func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set? + // TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user + // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional) + + // Calculate the hard limit at which to abort, even if mid storage trie + hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack)) + + // Retrieve storage ranges until the packet limit is reached + var ( + slots [][]*StorageData + proofs [][]byte + size uint64 + ) + for _, account := range req.Accounts { + // If we've exceeded the requested data limit, abort without opening + // a new storage range (that we'd need to prove due to exceeded size) + if size >= req.Bytes { + break + } + // The first account might start from a different origin and end sooner + var origin common.Hash + if len(req.Origin) > 0 { + origin, req.Origin = common.BytesToHash(req.Origin), nil + } + var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + if len(req.Limit) > 0 { + limit, req.Limit = common.BytesToHash(req.Limit), nil + } + // Retrieve the requested state and bail out if non existent + it, err := chain.Snapshots().StorageIterator(req.Root, account, origin) + if err != nil { + return nil, nil + } + // Iterate over the requested range and pile slots up + var ( + storage []*StorageData + last common.Hash + abort bool + ) + for it.Next() { + if size >= hardLimit { + abort = true + break + } + hash, slot := it.Hash(), common.CopyBytes(it.Slot()) + + // Track the returned interval for the Merkle proofs + last = hash + + // Assemble the reply item + size += uint64(common.HashLength + len(slot)) + storage = append(storage, &StorageData{ + Hash: hash, + Body: slot, + }) + // If we've exceeded the request threshold, abort + if bytes.Compare(hash[:], limit[:]) >= 0 { + break + } + } + slots = append(slots, storage) + it.Release() + + // Generate the Merkle proofs for the first and last storage slot, but + // only if the response was capped. If the entire storage trie included + // in the response, no need for any proofs. + if origin != (common.Hash{}) || abort { + // Request started at a non-zero hash or was capped prematurely, add + // the endpoint Merkle proofs + accTrie, err := trie.New(req.Root, chain.StateCache().TrieDB()) + if err != nil { + return nil, nil + } + var acc types.StateAccount + if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil { + return nil, nil + } + stTrie, err := trie.New(acc.Root, chain.StateCache().TrieDB()) + if err != nil { + return nil, nil + } + proof := light.NewNodeSet() + if err := stTrie.Prove(origin[:], 0, proof); err != nil { + log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) + return nil, nil + } + if last != (common.Hash{}) { + if err := stTrie.Prove(last[:], 0, proof); err != nil { + log.Warn("Failed to prove storage range", "last", last, "err", err) + return nil, nil + } + } + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + // Proof terminates the reply as proofs are only added if a node + // refuses to serve more data (exception when a contract fetch is + // finishing, but that's that). + break + } + } + return slots, proofs +} + +// ServiceGetByteCodesQuery assembles the response to a byte codes query. +// It is exposed to allow external packages to test protocol behavior. +func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [][]byte { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + if len(req.Hashes) > maxCodeLookups { + req.Hashes = req.Hashes[:maxCodeLookups] + } + // Retrieve bytecodes until the packet size limit is reached + var ( + codes [][]byte + bytes uint64 + ) + for _, hash := range req.Hashes { + if hash == emptyKeccakCodeHash { + // Peers should not request the empty code, but if they do, at + // least sent them back a correct response without db lookups + codes = append(codes, []byte{}) + } else if blob, err := chain.ContractCodeWithPrefix(hash); err == nil { + codes = append(codes, blob) + bytes += uint64(len(blob)) + } + if bytes > req.Bytes { + break + } + } + return codes +} + +// ServiceGetTrieNodesQuery assembles the response to a trie nodes query. +// It is exposed to allow external packages to test protocol behavior. +func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, start time.Time) ([][]byte, error) { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // Make sure we have the state associated with the request + triedb := chain.StateCache().TrieDB() + + accTrie, err := trie.NewSecure(req.Root, triedb) + if err != nil { + // We don't have the requested state available, bail out + return nil, nil + } + snap := chain.Snapshots().Snapshot(req.Root) + if snap == nil { + // We don't have the requested state snapshotted yet, bail out. + // In reality we could still serve using the account and storage + // tries only, but let's protect the node a bit while it's doing + // snapshot generation. + return nil, nil + } + // Retrieve trie nodes until the packet size limit is reached + var ( + nodes [][]byte + bytes uint64 + loads int // Trie hash expansions to cound database reads + ) + for _, pathset := range req.Paths { + switch len(pathset) { + case 0: + // Ensure we penalize invalid requests + return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest) + + case 1: + // If we're only retrieving an account trie node, fetch it directly + blob, resolved, err := accTrie.TryGetNode(pathset[0]) + loads += resolved // always account database reads, even for failures + if err != nil { + break + } + nodes = append(nodes, blob) + bytes += uint64(len(blob)) + + default: + // Storage slots requested, open the storage trie and retrieve from there + account, err := snap.Account(common.BytesToHash(pathset[0])) + loads++ // always account database reads, even for failures + if err != nil || account == nil { + break + } + stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb) + loads++ // always account database reads, even for failures + if err != nil { + break + } + for _, path := range pathset[1:] { + blob, resolved, err := stTrie.TryGetNode(path) + loads += resolved // always account database reads, even for failures + if err != nil { + break + } + nodes = append(nodes, blob) + bytes += uint64(len(blob)) + + // Sanity check limits to avoid DoS on the store trie loads + if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { + break + } + } + } + // Abort request processing if we've exceeded our limits + if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { + break + } + } + return nodes, nil +} + // NodeInfo represents a short summary of the `snap` sub-protocol metadata // known about the host peer. type NodeInfo struct{} diff --git a/eth/protocols/snap/peer.go b/eth/protocols/snap/peer.go index 14ee30111..aa092297a 100644 --- a/eth/protocols/snap/peer.go +++ b/eth/protocols/snap/peer.go @@ -33,9 +33,9 @@ type Peer struct { logger log.Logger // Contextual logger with the peer id injected } -// newPeer create a wrapper for a network connection and negotiated protocol +// NewPeer create a wrapper for a network connection and negotiated protocol // version. -func newPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer { +func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer { id := p.ID().String() return &Peer{ id: id, diff --git a/eth/protocols/snap/protocol.go b/eth/protocols/snap/protocol.go index ebe2c383f..2dbd27467 100644 --- a/eth/protocols/snap/protocol.go +++ b/eth/protocols/snap/protocol.go @@ -27,7 +27,7 @@ import ( // Constants to match up protocol versions and messages const ( - snap1 = 1 + SNAP1 = 1 ) // ProtocolName is the official short name of the `snap` protocol used during @@ -36,11 +36,11 @@ const ProtocolName = "snap" // ProtocolVersions are the supported versions of the `snap` protocol (first // is primary). -var ProtocolVersions = []uint{snap1} +var ProtocolVersions = []uint{SNAP1} // protocolLengths are the number of implemented message corresponding to // different protocol versions. -var protocolLengths = map[uint]uint64{snap1: 8} +var protocolLengths = map[uint]uint64{SNAP1: 8} // maxMessageSize is the maximum cap on the size of a protocol message. const maxMessageSize = 10 * 1024 * 1024 diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index a39ac4bc3..5b4f54101 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -330,10 +330,10 @@ type healTask struct { codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval } -// syncProgress is a database entry to allow suspending and resuming a snapshot state +// SyncProgress is a database entry to allow suspending and resuming a snapshot state // sync. Opposed to full and fast sync, there is no way to restart a suspended // snap sync without prior knowledge of the suspension point. -type syncProgress struct { +type SyncProgress struct { Tasks []*accountTask // The suspended account tasks (contract tasks within) // Status report during syncing phase @@ -347,12 +347,15 @@ type syncProgress struct { // Status report during healing phase TrienodeHealSynced uint64 // Number of state trie nodes downloaded TrienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk - TrienodeHealDups uint64 // Number of state trie nodes already processed - TrienodeHealNops uint64 // Number of state trie nodes not requested BytecodeHealSynced uint64 // Number of bytecodes downloaded BytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk - BytecodeHealDups uint64 // Number of bytecodes already processed - BytecodeHealNops uint64 // Number of bytecodes not requested +} + +// SyncPending is analogous to SyncProgress, but it's used to report on pending +// ephemeral sync progress that doesn't get persisted into the database. +type SyncPending struct { + TrienodeHeal uint64 // Number of state trie nodes pending + BytecodeHeal uint64 // Number of bytecodes pending } // SyncPeer abstracts out the methods required for a peer to be synced against @@ -548,7 +551,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error { s.lock.Lock() s.root = root s.healer = &healTask{ - scheduler: state.NewStateSync(root, s.db, nil, s.onHealState), + scheduler: state.NewStateSync(root, s.db, s.onHealState), trieTasks: make(map[common.Hash]trie.SyncPath), codeTasks: make(map[common.Hash]struct{}), } @@ -676,7 +679,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error { // loadSyncStatus retrieves a previously aborted sync status from the database, // or generates a fresh one if none is available. func (s *Syncer) loadSyncStatus() { - var progress syncProgress + var progress SyncProgress if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil { if err := json.Unmarshal(status, &progress); err != nil { @@ -780,7 +783,7 @@ func (s *Syncer) saveSyncStatus() { } } // Store the actual progress markers - progress := &syncProgress{ + progress := &SyncProgress{ Tasks: s.tasks, AccountSynced: s.accountSynced, AccountBytes: s.accountBytes, @@ -800,6 +803,31 @@ func (s *Syncer) saveSyncStatus() { rawdb.WriteSnapshotSyncStatus(s.db, status) } +// Progress returns the snap sync status statistics. +func (s *Syncer) Progress() (*SyncProgress, *SyncPending) { + s.lock.Lock() + defer s.lock.Unlock() + + progress := &SyncProgress{ + AccountSynced: s.accountSynced, + AccountBytes: s.accountBytes, + BytecodeSynced: s.bytecodeSynced, + BytecodeBytes: s.bytecodeBytes, + StorageSynced: s.storageSynced, + StorageBytes: s.storageBytes, + TrienodeHealSynced: s.trienodeHealSynced, + TrienodeHealBytes: s.trienodeHealBytes, + BytecodeHealSynced: s.bytecodeHealSynced, + BytecodeHealBytes: s.bytecodeHealBytes, + } + pending := new(SyncPending) + if s.healer != nil { + pending.TrienodeHeal = uint64(len(s.healer.trieTasks)) + pending.BytecodeHeal = uint64(len(s.healer.codeTasks)) + } + return progress, pending +} + // cleanAccountTasks removes account range retrieval tasks that have already been // completed. func (s *Syncer) cleanAccountTasks() { diff --git a/eth/sync.go b/eth/sync.go index 6a55a4be8..21df1b891 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -17,6 +17,7 @@ package eth import ( + "errors" "math/big" "sync/atomic" "time" @@ -65,6 +66,7 @@ type chainSyncer struct { handler *handler force *time.Timer forced bool // true when force timer fired + warned time.Time peerEventCh chan struct{} doneCh chan error // non-nil when sync is running } @@ -120,10 +122,18 @@ func (cs *chainSyncer) loop() { select { case <-cs.peerEventCh: // Peer information changed, recheck. - case <-cs.doneCh: + case err := <-cs.doneCh: cs.doneCh = nil cs.force.Reset(forceSyncCycle) cs.forced = false + + // If we've reached the merge transition but no beacon client is available, or + // it has not yet switched us over, keep warning the user that their infra is + // potentially flaky. + if errors.Is(err, downloader.ErrMergeTransition) && time.Since(cs.warned) > 10*time.Second { + log.Warn("Local chain is post-merge, waiting for beacon client sync switch-over...") + cs.warned = time.Now() + } case <-cs.force.C: cs.forced = true @@ -163,10 +173,7 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp { return nil } mode, ourTD := cs.modeAndLocalHead() - if mode == downloader.FastSync && atomic.LoadUint32(&cs.handler.snapSync) == 1 { - // Fast sync via the snap protocol - mode = downloader.SnapSync - } + op := peerToSyncOp(mode, peer) if op.td.Cmp(ourTD) <= 0 { return nil // We're in sync. @@ -180,19 +187,19 @@ func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp { } func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { - // If we're in fast sync mode, return that directly - if atomic.LoadUint32(&cs.handler.fastSync) == 1 { + // If we're in snap sync mode, return that directly + if atomic.LoadUint32(&cs.handler.snapSync) == 1 { block := cs.handler.chain.CurrentFastBlock() td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64()) - return downloader.FastSync, td + return downloader.SnapSync, td } // We are probably in full sync, but we might have rewound to before the - // fast sync pivot, check if we should reenable + // snap sync pivot, check if we should reenable if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot { block := cs.handler.chain.CurrentFastBlock() td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64()) - return downloader.FastSync, td + return downloader.SnapSync, td } } // Nope, we're really full syncing @@ -209,15 +216,15 @@ func (cs *chainSyncer) startSync(op *chainSyncOp) { // doSync synchronizes the local blockchain with a remote peer. func (h *handler) doSync(op *chainSyncOp) error { - if op.mode == downloader.FastSync || op.mode == downloader.SnapSync { - // Before launch the fast sync, we have to ensure user uses the same + if op.mode == downloader.SnapSync { + // Before launch the snap sync, we have to ensure user uses the same // txlookup limit. - // The main concern here is: during the fast sync Geth won't index the + // The main concern here is: during the snap sync Geth won't index the // block(generate tx indices) before the HEAD-limit. But if user changes - // the limit in the next fast sync(e.g. user kill Geth manually and + // the limit in the next snap sync(e.g. user kill Geth manually and // restart) then it will be hard for Geth to figure out the oldest block // has been indexed. So here for the user-experience wise, it's non-optimal - // that user can't change limit during the fast sync. If changed, Geth + // that user can't change limit during the snap sync. If changed, Geth // will just blindly use the original one. limit := h.chain.TxLookupLimit() if stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil { @@ -227,15 +234,11 @@ func (h *handler) doSync(op *chainSyncOp) error { log.Warn("Update txLookup limit", "provided", limit, "updated", *stored) } } - // Run the sync cycle, and disable fast sync if we're past the pivot block - err := h.downloader.Synchronise(op.peer.ID(), op.head, op.td, op.mode) + // Run the sync cycle, and disable snap sync if we're past the pivot block + err := h.downloader.LegacySync(op.peer.ID(), op.head, op.td, h.chain.Config().TerminalTotalDifficulty, op.mode) if err != nil { return err } - if atomic.LoadUint32(&h.fastSync) == 1 { - log.Info("Fast sync complete, auto disabling") - atomic.StoreUint32(&h.fastSync, 0) - } if atomic.LoadUint32(&h.snapSync) == 1 { log.Info("Snap sync complete, auto disabling") atomic.StoreUint32(&h.snapSync, 0) diff --git a/eth/sync_test.go b/eth/sync_test.go index 105f76efe..0283e1666 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -23,57 +23,74 @@ import ( "github.com/scroll-tech/go-ethereum/eth/downloader" "github.com/scroll-tech/go-ethereum/eth/protocols/eth" + "github.com/scroll-tech/go-ethereum/eth/protocols/snap" "github.com/scroll-tech/go-ethereum/p2p" "github.com/scroll-tech/go-ethereum/p2p/enode" ) -// Tests that fast sync is disabled after a successful sync cycle. -func TestFastSyncDisabling66(t *testing.T) { testFastSyncDisabling(t, eth.ETH66) } +// Tests that snap sync is disabled after a successful sync cycle. +func TestSnapSyncDisabling66(t *testing.T) { testSnapSyncDisabling(t, eth.ETH66, snap.SNAP1) } -// Tests that fast sync gets disabled as soon as a real block is successfully +// Tests that snap sync gets disabled as soon as a real block is successfully // imported into the blockchain. -func testFastSyncDisabling(t *testing.T, protocol uint) { +func testSnapSyncDisabling(t *testing.T, ethVer uint, snapVer uint) { t.Parallel() - // Create an empty handler and ensure it's in fast sync mode + // Create an empty handler and ensure it's in snap sync mode empty := newTestHandler() - if atomic.LoadUint32(&empty.handler.fastSync) == 0 { - t.Fatalf("fast sync disabled on pristine blockchain") + if atomic.LoadUint32(&empty.handler.snapSync) == 0 { + t.Fatalf("snap sync disabled on pristine blockchain") } defer empty.close() - // Create a full handler and ensure fast sync ends up disabled + // Create a full handler and ensure snap sync ends up disabled full := newTestHandlerWithBlocks(1024) - if atomic.LoadUint32(&full.handler.fastSync) == 1 { - t.Fatalf("fast sync not disabled on non-empty blockchain") + if atomic.LoadUint32(&full.handler.snapSync) == 1 { + t.Fatalf("snap sync not disabled on non-empty blockchain") } defer full.close() - // Sync up the two handlers - emptyPipe, fullPipe := p2p.MsgPipe() - defer emptyPipe.Close() - defer fullPipe.Close() + // Sync up the two handlers via both `eth` and `snap` + caps := []p2p.Cap{{Name: "eth", Version: ethVer}, {Name: "snap", Version: snapVer}} - emptyPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), emptyPipe, empty.txpool) - fullPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), fullPipe, full.txpool) - defer emptyPeer.Close() - defer fullPeer.Close() + emptyPipeEth, fullPipeEth := p2p.MsgPipe() + defer emptyPipeEth.Close() + defer fullPipeEth.Close() - go empty.handler.runEthPeer(emptyPeer, func(peer *eth.Peer) error { + emptyPeerEth := eth.NewPeer(ethVer, p2p.NewPeer(enode.ID{1}, "", caps), emptyPipeEth, empty.txpool) + fullPeerEth := eth.NewPeer(ethVer, p2p.NewPeer(enode.ID{2}, "", caps), fullPipeEth, full.txpool) + defer emptyPeerEth.Close() + defer fullPeerEth.Close() + + go empty.handler.runEthPeer(emptyPeerEth, func(peer *eth.Peer) error { return eth.Handle((*ethHandler)(empty.handler), peer) }) - go full.handler.runEthPeer(fullPeer, func(peer *eth.Peer) error { + go full.handler.runEthPeer(fullPeerEth, func(peer *eth.Peer) error { return eth.Handle((*ethHandler)(full.handler), peer) }) + + emptyPipeSnap, fullPipeSnap := p2p.MsgPipe() + defer emptyPipeSnap.Close() + defer fullPipeSnap.Close() + + emptyPeerSnap := snap.NewPeer(snapVer, p2p.NewPeer(enode.ID{1}, "", caps), emptyPipeSnap) + fullPeerSnap := snap.NewPeer(snapVer, p2p.NewPeer(enode.ID{2}, "", caps), fullPipeSnap) + + go empty.handler.runSnapExtension(emptyPeerSnap, func(peer *snap.Peer) error { + return snap.Handle((*snapHandler)(empty.handler), peer) + }) + go full.handler.runSnapExtension(fullPeerSnap, func(peer *snap.Peer) error { + return snap.Handle((*snapHandler)(full.handler), peer) + }) // Wait a bit for the above handlers to start time.Sleep(250 * time.Millisecond) - // Check that fast sync was disabled - op := peerToSyncOp(downloader.FastSync, empty.handler.peers.peerWithHighestTD()) + // Check that snap sync was disabled + op := peerToSyncOp(downloader.SnapSync, empty.handler.peers.peerWithHighestTD()) if err := empty.handler.doSync(op); err != nil { t.Fatal("sync failed:", err) } - if atomic.LoadUint32(&empty.handler.fastSync) == 1 { - t.Fatalf("fast sync not disabled after successful synchronisation") + if atomic.LoadUint32(&empty.handler.snapSync) == 1 { + t.Fatalf("snap sync not disabled after successful synchronisation") } } diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 9378b5f57..8a007997f 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -286,14 +286,6 @@ func (ec *Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (* return r, err } -type rpcProgress struct { - StartingBlock hexutil.Uint64 - CurrentBlock hexutil.Uint64 - HighestBlock hexutil.Uint64 - PulledStates hexutil.Uint64 - KnownStates hexutil.Uint64 -} - // SyncProgress retrieves the current progress of the sync algorithm. If there's // no sync currently running, it returns nil. func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error) { @@ -306,17 +298,11 @@ func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, err if err := json.Unmarshal(raw, &syncing); err == nil { return nil, nil // Not syncing (always false) } - var progress *rpcProgress + var progress *ethereum.SyncProgress if err := json.Unmarshal(raw, &progress); err != nil { return nil, err } - return ðereum.SyncProgress{ - StartingBlock: uint64(progress.StartingBlock), - CurrentBlock: uint64(progress.CurrentBlock), - HighestBlock: uint64(progress.HighestBlock), - PulledStates: uint64(progress.PulledStates), - KnownStates: uint64(progress.KnownStates), - }, nil + return progress, nil } // SubscribeNewHead subscribes to notifications about the current blockchain head diff --git a/ethdb/batch.go b/ethdb/batch.go index 135369331..541f40c83 100644 --- a/ethdb/batch.go +++ b/ethdb/batch.go @@ -43,6 +43,9 @@ type Batcher interface { // NewBatch creates a write-only database that buffers changes to its host db // until a final write is called. NewBatch() Batch + + // NewBatchWithSize creates a write-only database batch with pre-allocated buffer. + NewBatchWithSize(size int) Batch } // HookedBatch wraps an arbitrary batch where each operation may be hooked into diff --git a/ethdb/database.go b/ethdb/database.go index 0a5729c6c..105763676 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -64,6 +64,7 @@ type KeyValueStore interface { Iteratee Stater Compacter + Snapshotter io.Closer } @@ -153,5 +154,6 @@ type Database interface { Iteratee Stater Compacter + Snapshotter io.Closer } diff --git a/ethdb/dbtest/testsuite.go b/ethdb/dbtest/testsuite.go index c49be9a99..93f039402 100644 --- a/ethdb/dbtest/testsuite.go +++ b/ethdb/dbtest/testsuite.go @@ -313,6 +313,68 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) { } }) + t.Run("Snapshot", func(t *testing.T) { + db := New() + defer db.Close() + + initial := map[string]string{ + "k1": "v1", "k2": "v2", "k3": "", "k4": "", + } + for k, v := range initial { + db.Put([]byte(k), []byte(v)) + } + snapshot, err := db.NewSnapshot() + if err != nil { + t.Fatal(err) + } + for k, v := range initial { + got, err := snapshot.Get([]byte(k)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(got, []byte(v)) { + t.Fatalf("Unexpected value want: %v, got %v", v, got) + } + } + + // Flush more modifications into the database, ensure the snapshot + // isn't affected. + var ( + update = map[string]string{"k1": "v1-b", "k3": "v3-b"} + insert = map[string]string{"k5": "v5-b"} + delete = map[string]string{"k2": ""} + ) + for k, v := range update { + db.Put([]byte(k), []byte(v)) + } + for k, v := range insert { + db.Put([]byte(k), []byte(v)) + } + for k := range delete { + db.Delete([]byte(k)) + } + for k, v := range initial { + got, err := snapshot.Get([]byte(k)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(got, []byte(v)) { + t.Fatalf("Unexpected value want: %v, got %v", v, got) + } + } + for k := range insert { + got, err := snapshot.Get([]byte(k)) + if err == nil || len(got) != 0 { + t.Fatal("Unexpected value") + } + } + for k := range delete { + got, err := snapshot.Get([]byte(k)) + if err != nil || len(got) == 0 { + t.Fatal("Unexpected deletion") + } + } + }) } func iterateKeys(it ethdb.Iterator) []string { diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index 33ff84b54..144960b14 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -214,6 +214,14 @@ func (db *Database) NewBatch() ethdb.Batch { } } +// NewBatchWithSize creates a write-only database batch with pre-allocated buffer. +func (db *Database) NewBatchWithSize(size int) ethdb.Batch { + return &batch{ + db: db.db, + b: leveldb.MakeBatch(size), + } +} + // NewIterator creates a binary-alphabetical iterator over a subset // of database content with a particular key prefix, starting at a particular // initial key (or after, if it does not exist). @@ -221,6 +229,19 @@ func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { return db.db.NewIterator(bytesPrefixRange(prefix, start), nil) } +// NewSnapshot creates a database snapshot based on the current state. +// The created snapshot will not be affected by all following mutations +// happened on the database. +// Note don't forget to release the snapshot once it's used up, otherwise +// the stale data will never be cleaned up by the underlying compactor. +func (db *Database) NewSnapshot() (ethdb.Snapshot, error) { + snap, err := db.db.GetSnapshot() + if err != nil { + return nil, err + } + return &snapshot{db: snap}, nil +} + // Stat returns a particular internal stat of the database. func (db *Database) Stat(property string) (string, error) { return db.db.GetProperty(property) @@ -520,3 +541,26 @@ func bytesPrefixRange(prefix, start []byte) *util.Range { r.Start = append(r.Start, start...) return r } + +// snapshot wraps a leveldb snapshot for implementing the Snapshot interface. +type snapshot struct { + db *leveldb.Snapshot +} + +// Has retrieves if a key is present in the snapshot backing by a key-value +// data store. +func (snap *snapshot) Has(key []byte) (bool, error) { + return snap.db.Has(key, nil) +} + +// Get retrieves the given key if it's present in the snapshot backing by +// key-value data store. +func (snap *snapshot) Get(key []byte) ([]byte, error) { + return snap.db.Get(key, nil) +} + +// Release releases associated resources. Release should always succeed and can +// be called multiple times without causing error. +func (snap *snapshot) Release() { + snap.db.Release() +} diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go index 8406526e0..b1641e6df 100644 --- a/ethdb/memorydb/memorydb.go +++ b/ethdb/memorydb/memorydb.go @@ -35,6 +35,10 @@ var ( // errMemorydbNotFound is returned if a key is requested that is not found in // the provided memory database. errMemorydbNotFound = errors.New("not found") + + // errSnapshotReleased is returned if callers want to retrieve data from a + // released snapshot. + errSnapshotReleased = errors.New("snapshot released") ) // Database is an ephemeral key-value store. Apart from basic data storage @@ -53,7 +57,7 @@ func New() *Database { } } -// NewWithCap returns a wrapped map pre-allocated to the provided capcity with +// NewWithCap returns a wrapped map pre-allocated to the provided capacity with // all the required database interface methods implemented. func NewWithCap(size int) *Database { return &Database{ @@ -129,6 +133,13 @@ func (db *Database) NewBatch() ethdb.Batch { } } +// NewBatchWithSize creates a write-only database batch with pre-allocated buffer. +func (db *Database) NewBatchWithSize(size int) ethdb.Batch { + return &batch{ + db: db, + } +} + // NewIterator creates a binary-alphabetical iterator over a subset // of database content with a particular key prefix, starting at a particular // initial key (or after, if it does not exist). @@ -163,6 +174,13 @@ func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { } } +// NewSnapshot creates a database snapshot based on the current state. +// The created snapshot will not be affected by all following mutations +// happened on the database. +func (db *Database) NewSnapshot() (ethdb.Snapshot, error) { + return newSnapshot(db), nil +} + // Stat returns a particular internal stat of the database. func (db *Database) Stat(property string) (string, error) { return "", errors.New("unknown property") @@ -313,3 +331,59 @@ func (it *iterator) Value() []byte { func (it *iterator) Release() { it.keys, it.values = nil, nil } + +// snapshot wraps a batch of key-value entries deep copied from the in-memory +// database for implementing the Snapshot interface. +type snapshot struct { + db map[string][]byte + lock sync.RWMutex +} + +// newSnapshot initializes the snapshot with the given database instance. +func newSnapshot(db *Database) *snapshot { + db.lock.RLock() + defer db.lock.RUnlock() + + copied := make(map[string][]byte) + for key, val := range db.db { + copied[key] = common.CopyBytes(val) + } + return &snapshot{db: copied} +} + +// Has retrieves if a key is present in the snapshot backing by a key-value +// data store. +func (snap *snapshot) Has(key []byte) (bool, error) { + snap.lock.RLock() + defer snap.lock.RUnlock() + + if snap.db == nil { + return false, errSnapshotReleased + } + _, ok := snap.db[string(key)] + return ok, nil +} + +// Get retrieves the given key if it's present in the snapshot backing by +// key-value data store. +func (snap *snapshot) Get(key []byte) ([]byte, error) { + snap.lock.RLock() + defer snap.lock.RUnlock() + + if snap.db == nil { + return nil, errSnapshotReleased + } + if entry, ok := snap.db[string(key)]; ok { + return common.CopyBytes(entry), nil + } + return nil, errMemorydbNotFound +} + +// Release releases associated resources. Release should always succeed and can +// be called multiple times without causing error. +func (snap *snapshot) Release() { + snap.lock.Lock() + defer snap.lock.Unlock() + + snap.db = nil +} diff --git a/ethdb/snapshot.go b/ethdb/snapshot.go new file mode 100644 index 000000000..753e0f6b1 --- /dev/null +++ b/ethdb/snapshot.go @@ -0,0 +1,41 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethdb + +type Snapshot interface { + // Has retrieves if a key is present in the snapshot backing by a key-value + // data store. + Has(key []byte) (bool, error) + + // Get retrieves the given key if it's present in the snapshot backing by + // key-value data store. + Get(key []byte) ([]byte, error) + + // Release releases associated resources. Release should always succeed and can + // be called multiple times without causing error. + Release() +} + +// Snapshotter wraps the Snapshot method of a backing data store. +type Snapshotter interface { + // NewSnapshot creates a database snapshot based on the current state. + // The created snapshot will not be affected by all following mutations + // happened on the database. + // Note don't forget to release the snapshot once it's used up, otherwise + // the stale data will never be cleaned up by the underlying compactor. + NewSnapshot() (Snapshot, error) +} diff --git a/graphql/graphql.go b/graphql/graphql.go index ecc704af2..94a962638 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -1220,32 +1220,66 @@ type SyncState struct { func (s *SyncState) StartingBlock() hexutil.Uint64 { return hexutil.Uint64(s.progress.StartingBlock) } - func (s *SyncState) CurrentBlock() hexutil.Uint64 { return hexutil.Uint64(s.progress.CurrentBlock) } - func (s *SyncState) HighestBlock() hexutil.Uint64 { return hexutil.Uint64(s.progress.HighestBlock) } - -func (s *SyncState) PulledStates() *hexutil.Uint64 { - ret := hexutil.Uint64(s.progress.PulledStates) - return &ret +func (s *SyncState) SyncedAccounts() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedAccounts) } - -func (s *SyncState) KnownStates() *hexutil.Uint64 { - ret := hexutil.Uint64(s.progress.KnownStates) - return &ret +func (s *SyncState) SyncedAccountBytes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedAccountBytes) +} +func (s *SyncState) SyncedBytecodes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedBytecodes) +} +func (s *SyncState) SyncedBytecodeBytes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedBytecodeBytes) +} +func (s *SyncState) SyncedStorage() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedStorage) +} +func (s *SyncState) SyncedStorageBytes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedStorageBytes) +} +func (s *SyncState) HealedTrienodes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealedTrienodes) +} +func (s *SyncState) HealedTrienodeBytes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealedTrienodeBytes) +} +func (s *SyncState) HealedBytecodes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealedBytecodes) +} +func (s *SyncState) HealedBytecodeBytes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealedBytecodeBytes) +} +func (s *SyncState) HealingTrienodes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealingTrienodes) +} +func (s *SyncState) HealingBytecode() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealingBytecode) } // Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not // yet received the latest block headers from its pears. In case it is synchronizing: -// - startingBlock: block number this node started to synchronise from -// - currentBlock: block number this node is currently importing -// - highestBlock: block number of the highest block header this node has received from peers -// - pulledStates: number of state entries processed until now -// - knownStates: number of known state entries that still need to be pulled +// - startingBlock: block number this node started to synchronise from +// - currentBlock: block number this node is currently importing +// - highestBlock: block number of the highest block header this node has received from peers +// - syncedAccounts: number of accounts downloaded +// - syncedAccountBytes: number of account trie bytes persisted to disk +// - syncedBytecodes: number of bytecodes downloaded +// - syncedBytecodeBytes: number of bytecode bytes downloaded +// - syncedStorage: number of storage slots downloaded +// - syncedStorageBytes: number of storage trie bytes persisted to disk +// - healedTrienodes: number of state trie nodes downloaded +// - healedTrienodeBytes: number of state trie bytes persisted to disk +// - healedBytecodes: number of bytecodes downloaded +// - healedBytecodeBytes: number of bytecodes persisted to disk +// - healingTrienodes: number of state trie nodes pending +// - healingBytecode: number of bytecodes pending func (r *Resolver) Syncing() (*SyncState, error) { progress := r.backend.SyncProgress() diff --git a/graphql/schema.go b/graphql/schema.go index dfd094a42..86060cd23 100644 --- a/graphql/schema.go +++ b/graphql/schema.go @@ -297,12 +297,6 @@ const schema string = ` currentBlock: Long! # HighestBlock is the latest known block number. highestBlock: Long! - # PulledStates is the number of state entries fetched so far, or null - # if this is not known or not relevant. - pulledStates: Long - # KnownStates is the number of states the node knows of so far, or null - # if this is not known or not relevant. - knownStates: Long } # Pending represents the current pending state. diff --git a/interfaces.go b/interfaces.go index 4ccb2e9c7..a43161e70 100644 --- a/interfaces.go +++ b/interfaces.go @@ -101,8 +101,22 @@ type SyncProgress struct { StartingBlock uint64 // Block number where sync began CurrentBlock uint64 // Current block number where sync is at HighestBlock uint64 // Highest alleged block number in the chain - PulledStates uint64 // Number of state trie entries already downloaded - KnownStates uint64 // Total number of state trie entries known about + + // Fields belonging to snap sync + SyncedAccounts uint64 // Number of accounts downloaded + SyncedAccountBytes uint64 // Number of account trie bytes persisted to disk + SyncedBytecodes uint64 // Number of bytecodes downloaded + SyncedBytecodeBytes uint64 // Number of bytecode bytes downloaded + SyncedStorage uint64 // Number of storage slots downloaded + SyncedStorageBytes uint64 // Number of storage trie bytes persisted to disk + + HealedTrienodes uint64 // Number of state trie nodes downloaded + HealedTrienodeBytes uint64 // Number of state trie bytes persisted to disk + HealedBytecodes uint64 // Number of bytecodes downloaded + HealedBytecodeBytes uint64 // Number of bytecodes persisted to disk + + HealingTrienodes uint64 // Number of state trie nodes pending + HealingBytecode uint64 // Number of bytecodes pending } // ChainSyncReader wraps access to the node's current sync status. If there's no diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index d6e2591a1..574ec6619 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -133,11 +133,21 @@ func (s *PublicEthereumAPI) Syncing() (interface{}, error) { } // Otherwise gather the block sync stats return map[string]interface{}{ - "startingBlock": hexutil.Uint64(progress.StartingBlock), - "currentBlock": hexutil.Uint64(progress.CurrentBlock), - "highestBlock": hexutil.Uint64(progress.HighestBlock), - "pulledStates": hexutil.Uint64(progress.PulledStates), - "knownStates": hexutil.Uint64(progress.KnownStates), + "startingBlock": hexutil.Uint64(progress.StartingBlock), + "currentBlock": hexutil.Uint64(progress.CurrentBlock), + "highestBlock": hexutil.Uint64(progress.HighestBlock), + "syncedAccounts": hexutil.Uint64(progress.SyncedAccounts), + "syncedAccountBytes": hexutil.Uint64(progress.SyncedAccountBytes), + "syncedBytecodes": hexutil.Uint64(progress.SyncedBytecodes), + "syncedBytecodeBytes": hexutil.Uint64(progress.SyncedBytecodeBytes), + "syncedStorage": hexutil.Uint64(progress.SyncedStorage), + "syncedStorageBytes": hexutil.Uint64(progress.SyncedStorageBytes), + "healedTrienodes": hexutil.Uint64(progress.HealedTrienodes), + "healedTrienodeBytes": hexutil.Uint64(progress.HealedTrienodeBytes), + "healedBytecodes": hexutil.Uint64(progress.HealedBytecodes), + "healedBytecodeBytes": hexutil.Uint64(progress.HealedBytecodeBytes), + "healingTrienodes": hexutil.Uint64(progress.HealingTrienodes), + "healingBytecode": hexutil.Uint64(progress.HealingBytecode), }, nil } diff --git a/internal/jsre/deps/bindata.go b/internal/jsre/deps/bindata.go index 6f079c2ba..3e7d3a136 100644 --- a/internal/jsre/deps/bindata.go +++ b/internal/jsre/deps/bindata.go @@ -1,7 +1,7 @@ // Code generated by go-bindata. DO NOT EDIT. // sources: // bignumber.js (17.273kB) -// web3.js (401.764kB) +// web3.js (402.466kB) package deps @@ -90,7 +90,7 @@ func bignumberJs() (*asset, error) { return a, nil } -var _web3Js = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xfd\x6b\x7b\x13\x39\xd2\x38\x0e\xbf\xcf\xa7\x50\xfc\xdc\x0f\xb6\x89\xb1\x73\x60\x18\xc6\x99\x0c\x1b\x02\x33\x64\x6f\x20\x5c\x40\x76\x76\xef\x6c\x96\xab\xed\x96\xed\x1e\xda\xdd\xfe\x75\xb7\x73\x18\x92\xef\xfe\xbf\x54\x3a\x95\x0e\x7d\x70\x12\xe6\xb4\xc9\x0b\x70\x4b\xa5\x53\xa9\x54\x2a\x95\x4a\x55\x19\xfd\x7f\xcb\x28\xa3\x7b\x9d\xc9\x32\x19\x17\x51\x9a\x10\xda\x29\x7a\x49\x2f\xeb\x7e\x51\x29\x79\x27\xed\x2d\xbb\x5f\xa2\x49\x67\x3d\x39\x49\x4f\xf9\xaf\x02\x7e\x9d\x05\x19\x09\xf6\x8a\xcb\x05\x4d\x27\x44\xd6\xb5\xd7\x92\x45\x5b\x0f\x1e\x88\xc4\x5d\x56\x66\xf9\xe0\x41\xd0\xcd\x68\xb1\xcc\x12\x12\x74\xd2\xde\xfa\x66\x97\xa5\x47\x32\x2d\x12\x69\xac\xd6\xc9\x5e\x42\xcf\xc9\xcb\x2c\x4b\xb3\x4e\xeb\x20\x48\x92\xb4\x20\x93\x28\x09\xc9\x3c\x0d\x97\x31\x25\xed\xd6\x46\xba\xd1\x6a\xb7\xba\xbb\xc5\x2c\x4b\xcf\xc9\xa4\x3f\x4e\x43\xba\xd7\x7a\x73\xf4\xe2\xf8\xf5\xcb\x4f\x6f\x8f\x3e\x7e\xfa\xf1\xe8\xf8\xed\x8b\x56\x6f\x72\xcd\xea\x8b\xf7\x58\xdf\xf7\xbe\xd0\x8b\x45\x9a\x15\xf9\xf0\xcb\xf5\xf5\x2e\x1b\xc3\xc9\xe6\x69\x7f\x1c\xc4\x71\x27\xee\x8b\xac\x9e\xec\x7d\x87\xf2\x01\x26\x7b\x00\xb8\x75\x7a\x42\x4f\x77\x45\x57\xf3\x4e\xf2\x2c\x19\xd2\xee\x75\x2f\xee\xe9\x92\xb4\xc7\x71\x77\x2d\xa0\x58\x93\x32\x13\x7a\x11\x35\xc2\xd5\x24\xcd\x3a\x0c\x3a\xdd\xdb\xdc\x4d\xbf\xcf\xfa\x31\x4d\xa6\xc5\x6c\x37\xdd\xd8\xe8\xe6\x9d\x8c\x21\x5e\x75\xe3\xba\xdb\xf9\xb2\x35\x3c\x51\x5d\x16\x55\xf4\x38\x96\x7a\xa2\xed\xee\x97\x35\x9e\x20\x3b\xb3\x77\xb2\x46\xc8\x97\x35\x42\x08\x69\x8d\xd3\x24\x2f\x82\xa4\x68\x0d\x49\x91\x2d\x69\x8f\xa7\x46\xc9\x62\x59\xe4\xad\x21\x39\x81\x6f\x09\x0d\x79\x49\x30\xa7\xad\x21\x69\x7d\x4a\xcf\x13\x9a\xb5\x7a\x3a\x87\x8d\x8e\xe5\x04\x61\x98\xd1\x3c\x6f\x89\x9c\x6b\xf8\xff\x54\x54\x2d\x8b\xc3\xff\x22\x2d\x5d\x16\xf5\xed\xa5\x9f\x50\x11\xa3\xbd\xd1\x65\x41\xf3\x9d\x6d\x7f\x7b\x12\x48\x61\x7a\x8d\x90\xeb\xde\x9d\x20\xe0\x46\xfd\x51\xc3\x41\xd8\x6b\x86\x80\x95\x51\xfd\x47\x1d\xfa\x38\x4d\x0a\x9a\x14\xb7\x1e\xfc\x9f\x72\xde\xd9\x8c\xfd\x61\xa6\x7d\x12\xc4\xf9\x6f\x37\xf4\x8c\xe6\x34\x3b\xf3\xad\xfa\x3f\xfa\xa4\xe5\xcb\xd1\x7b\x3a\x8d\xf2\x22\x0b\xfe\x0b\x26\xaf\x57\x55\x07\x3d\x3f\xba\x15\xdf\x2f\xb2\x20\xc9\x27\x5e\xd6\xf7\x67\xc1\x41\x66\x91\xc2\xea\x48\xc8\x69\xf1\xa1\x9a\xa4\xee\x0c\x17\x76\xd3\xbf\x49\xa3\x5f\x79\x02\x82\x26\x88\xaf\xaa\x60\x91\x45\xf3\x20\xbb\xf4\xf6\x23\x4d\xe3\xda\xc9\xdb\x17\x6d\xfd\x79\x51\x68\xee\xc1\x95\xd5\x94\x21\xe1\xa0\x74\x1b\xff\x23\x21\xc1\xdb\xfb\x30\xca\xd3\xf3\xe4\x16\x3d\x0f\x92\x34\xb9\x9c\xa7\xcb\x7c\x85\xae\x47\x49\x48\x2f\x68\x68\xec\x5d\x77\x36\xb1\xba\x72\xd4\x1d\xb3\xf6\xf3\x28\xb9\x0d\xe3\xde\x5f\x02\x26\x5e\x26\x21\x0d\x5b\x16\x9a\xe8\x19\x23\x84\xbf\x00\x8e\x46\x51\x18\x36\xc3\xd1\xcd\xea\x3f\x0b\xe2\xa5\xb7\xfb\xcb\x28\x29\xb6\xbf\x79\x52\x3d\x05\x6f\xe9\xf9\xf3\xe8\x77\x44\xfe\xad\xd6\xdc\xc1\x2c\x48\xa6\xbf\x27\xe9\xdc\x09\xe5\x94\xd4\x8d\xa4\xfa\x4a\xaa\xf1\x62\xe6\x1d\xdf\x8d\x6a\x11\xb4\x76\xba\xb6\x76\xdd\xfb\x72\x7d\xda\xdb\xfe\xdd\x0e\xfd\x7f\xa1\x33\xef\xef\x24\x3b\x4e\x96\x49\x78\x63\x52\xb9\xf5\xc6\x75\x7f\xec\xfd\x73\x1f\x7b\xef\x0f\x7d\x7f\xe4\x33\x87\x77\xf0\xe2\xbc\xf0\x47\x93\x36\xbf\xee\x66\xae\xf7\xaa\x9d\x3b\xdb\xab\x56\x9d\xf7\x49\x96\xce\x6f\x39\xed\x45\x7a\xcb\xa3\xe6\xed\x04\xbe\xdf\x77\xdd\xfc\x11\xf0\x17\x25\x61\x94\xd1\x71\x71\xe8\xdd\x33\x57\xe8\xc9\xed\x26\x22\x1a\x07\x8b\x8f\xbf\xeb\x64\xf8\x31\xd9\xec\xb4\x4b\x17\x69\x1e\x55\x1d\xd4\x17\xc1\x65\x30\x8a\xa9\x29\x14\xfc\x2e\x5c\xa9\x8c\xe6\xee\xe4\xf8\x75\x3b\x1a\xd8\x97\xe3\x7d\x61\xe2\xf3\xb7\x3f\xc9\xdc\x09\x92\x4a\xea\x6e\x46\x67\xbf\x03\xfa\xff\xb0\x58\xbf\x8b\xf3\xe3\x8d\xf9\xe4\xd7\xc6\xba\xcd\xf4\xee\xd1\xde\x10\xed\xb7\xde\xb8\xbe\xf6\xcc\x1e\x7a\xb6\xb4\x2a\x39\xee\x71\x13\x39\x0e\x8c\x37\xc8\x9e\xb4\x70\xe8\xb4\xfb\x83\x49\x9a\xcd\x83\xa2\xa0\x59\xde\xee\xee\x02\xc0\x87\x34\x8e\xc2\xa8\xb8\xfc\x78\xb9\xa0\x26\x2c\x6b\x9f\x41\xad\x0d\x1e\x3e\x5c\x23\x0f\x0d\x48\xa1\x73\x27\x51\x4e\x02\xb2\xc8\xd2\x94\x01\x93\x62\x16\x14\x24\xa3\x0b\x76\xc8\x4a\x8a\x9c\x88\xb9\x23\x2c\x93\xd5\x70\x58\x90\x79\x50\x8c\x67\x34\x1f\xb2\x4f\x91\x8d\x7e\x9e\x9c\xe2\x8f\xc7\xc6\xd7\xa9\x99\xb9\x63\x7d\x9f\x9e\x3c\x39\x3d\x39\xed\x91\x7e\xbf\xbf\x46\x1e\x0e\x9c\xb1\xc9\x1e\xef\x11\x65\x4d\xd3\xe9\x8a\x29\x2e\x66\x51\xde\xff\x04\x0b\xe3\x47\x89\x20\x06\xd8\xe7\xe8\x3a\x64\x19\x87\x49\xb1\x8b\x80\xf9\xbe\xed\x83\x3e\x82\x1c\xd1\xdc\xee\xda\xf5\xee\xda\x9a\xa7\x1f\xfd\x45\x96\x16\x1c\x6b\x7b\x24\xa1\xe7\x46\x5f\x3b\x5f\xae\xbb\xbb\xd5\xa5\xfa\x20\xbd\x64\xcb\x71\x91\xb2\xc6\x3d\xb0\x75\xed\xf6\xa3\x5c\xcc\xb9\x46\x08\x23\x47\x89\x14\x61\xd7\xb2\xbe\xce\x12\xfb\x30\x6f\x9d\x81\xc0\x76\xe7\xdf\x27\x9d\x93\xcd\x47\xdf\x9d\x3e\xec\xfe\xfb\xb4\xfb\x6c\xd0\xe5\xe3\x34\x0f\x0e\xa5\xdd\xba\xee\x7d\x69\x61\x52\x6c\x0d\xbf\xeb\xb5\x38\xbd\xb5\x86\x5b\x8f\xaf\x4f\x7b\xdf\xfc\xce\xe4\xfd\x3c\x4d\xe3\x1a\xda\x1e\x31\x90\x12\xc2\x66\x79\xf2\x7f\x4e\xa5\xf0\xeb\xb1\xfe\x79\x8a\x92\x77\xf0\x47\x1d\x19\x43\xcf\x6e\x4a\xc3\xac\xf0\x2a\x44\xcc\xe1\x6d\x0a\x66\xa9\x2b\x92\xaf\x59\xa4\x82\x76\x79\x8b\x55\x65\x6f\x42\xb5\xff\x61\xa8\x35\x69\xf6\xe1\xff\x34\x22\x5a\xd1\x9f\x7a\x8a\x7d\xf2\x7b\x53\x2c\xdb\xc3\x14\xc9\x16\x7e\x9a\x2d\x66\x94\xc0\x66\x07\x84\xdb\xf7\x51\x2e\xcb\x55\x3f\x04\x5d\xc2\xcf\xc7\xe8\xf7\x29\xce\xd8\x31\xbe\x4c\xfa\x25\x62\x6b\x55\x3f\x9f\x1a\xf5\x88\xa2\x1e\x2a\x87\x4e\xde\x98\xcc\x59\xe9\x95\xe8\x9c\x17\x70\x08\x9d\x25\xaf\x4a\xe9\x66\x99\x2a\x52\xe7\x8d\x56\x96\xbe\x19\xb1\xb3\x4a\x38\xa9\x7f\xd9\xea\x5d\x77\x6f\x46\xf8\xa2\x77\xf5\x94\xff\x6d\x13\xca\x1f\x3c\x84\x0e\x7f\x9c\x45\x39\x99\x44\x31\x65\x94\xba\x08\xb2\x82\xa4\x13\x72\x4e\x47\x3b\xfd\x5f\xf2\xfe\x1a\x80\x88\x2f\x06\x30\xc9\x28\x25\x79\x3a\x29\xce\x83\x8c\x0e\xc9\x65\xba\x24\xe3\x20\x21\x19\x0d\xa3\xbc\xc8\xa2\xd1\xb2\xa0\x24\x2a\x48\x90\x84\x83\x34\x23\xf3\x34\x8c\x26\x97\x50\x47\x54\x90\x65\x12\xd2\x0c\x08\xbe\xa0\xd9\x3c\x67\xed\xb0\x8f\x9f\xde\x1e\x93\xd7\x34\xcf\x69\x46\x7e\xa2\x09\xcd\x82\x98\xbc\x5b\x8e\xe2\x68\x4c\x5e\x47\x63\x9a\xe4\x94\x04\x39\x59\xb0\x94\x7c\x46\x43\x32\xba\x14\x54\x44\xc9\x8f\xac\x33\x1f\x44\x67\xc8\x8f\xe9\x32\x09\x03\x36\xe6\x1e\xa1\x51\x31\xa3\x19\x39\xa3\x59\xce\x66\x68\x47\xb6\x25\x6a\xec\x91\x34\x83\x5a\x3a\x41\xc1\xc6\x90\x91\x74\xc1\x0a\x76\x49\x90\x5c\x92\x38\x28\x74\x59\x17\x05\x7a\xa4\x21\x89\x12\xa8\x76\x96\xca\x95\x1d\x15\xe4\x3c\x8a\x63\x32\xa2\x64\x99\xd3\xc9\x32\xe6\x82\xe3\x68\x59\x90\x9f\x0f\x3f\xbe\x3a\x3a\xfe\x48\xf6\xdf\xfe\x8b\xfc\xbc\xff\xfe\xfd\xfe\xdb\x8f\xff\xda\x25\xe7\x51\x31\x4b\x97\x05\x61\x12\x25\xd4\x15\xcd\x17\x71\x44\x43\x72\x1e\x64\x59\x90\x14\x97\x24\x9d\x40\x15\x6f\x5e\xbe\x3f\x78\xb5\xff\xf6\xe3\xfe\xf3\xc3\xd7\x87\x1f\xff\x45\xd2\x8c\xfc\x78\xf8\xf1\xed\xcb\x0f\x1f\xc8\x8f\x47\xef\xc9\x3e\x79\xb7\xff\xfe\xe3\xe1\xc1\xf1\xeb\xfd\xf7\xe4\xdd\xf1\xfb\x77\x47\x1f\x5e\xf6\x09\xf9\x40\x59\xc7\x28\xd4\x50\x8f\xe8\x09\xcc\x59\x46\x49\x48\x8b\x20\x8a\xe5\xfc\xff\x2b\x5d\x92\x7c\x96\x2e\xe3\x90\xcc\x82\x33\x4a\x32\x3a\xa6\xd1\x19\x0d\x49\x40\xc6\xe9\xe2\xb2\xf1\x44\x42\x65\x41\x9c\x26\x53\x18\xb6\xa2\x32\x42\x0e\x27\x24\x49\x8b\x1e\xc9\x29\x25\xdf\xcf\x8a\x62\x31\x1c\x0c\xce\xcf\xcf\xfb\xd3\x64\xd9\x4f\xb3\xe9\x20\xe6\x15\xe4\x83\x1f\xfa\x6b\x0f\x07\x92\xd9\xfe\x0d\xc8\x76\x9c\x86\x34\xeb\xff\x02\x2c\xf2\x6f\xc1\xb2\x98\xa5\x19\x79\x13\x64\xf4\x33\xf9\xdf\xb4\xa0\xe7\xd1\xf8\x57\xf2\xfd\x9c\x7d\xff\x8d\x16\xb3\x90\x9e\xf5\xc7\xe9\xfc\x07\x00\x0e\x83\x82\x92\xed\xcd\xad\x6f\x80\xe1\xd5\x6f\x05\x15\x02\x2c\x2a\x23\xe4\x31\xdf\xde\x21\x24\x05\x04\xcc\x76\x41\x1f\xe4\x61\x52\x98\x80\x51\x52\xf8\xe0\x8e\x1d\xc0\x65\x09\xe4\x8b\xcb\x24\x98\x47\x63\xc9\xc6\x51\x89\x90\xe7\x00\x8f\xf2\x95\xfc\x50\x64\x51\x32\x35\xcb\xe4\x90\xe6\x83\x7e\x4f\x03\x6b\x8c\x19\x0d\xbc\x63\x3c\x76\x41\x97\x65\xb0\x9e\x6e\xab\xfe\x02\x70\x94\x8b\x01\x1a\x9c\x39\x47\x55\xf4\x60\x87\x15\x7c\x5a\x5a\x88\xa3\xfc\xbe\xaa\x02\xb6\x11\x0e\x7c\x75\xa5\x4e\x8f\xa4\x04\x7a\x3f\xcb\x82\x4b\x0e\xce\x99\xb8\x25\x0a\x1c\x30\xfa\x44\x12\x80\x58\x49\x9c\x43\x84\xa4\x48\x09\x4d\x18\x0d\x0f\x42\xca\xfe\x53\xad\x30\x66\x1c\x70\x36\xc9\xb8\x92\x90\x6b\xcd\x8d\x99\xd7\x8d\x47\xcc\xc0\x72\x73\x67\x86\x24\xb2\x07\x35\xe4\x46\x17\x81\xf7\xcf\x69\x31\x4b\x43\x4f\xb7\xb8\x72\x3d\xcd\xe6\x84\x4b\x2e\xa9\x31\x23\x6b\x84\xaf\x41\x51\xfc\x93\x98\x19\x91\x45\xfe\x06\xbd\x27\x5f\x38\xf1\x5c\x2b\xb1\xfc\x6f\x1c\xf3\x39\xf9\x82\x2b\xbb\x86\x2c\x78\xab\x90\x93\x2f\xf0\xae\xe1\x9a\x88\xcf\x88\xf1\x06\x2e\x11\x31\x32\x84\xbe\xb0\x9d\x88\xb1\x7b\x40\x88\x81\x0c\xb4\x53\xe3\x2e\x39\x38\x92\x28\x62\xd8\xcc\x4d\xf1\x0e\x61\xad\x3f\x89\xe2\x82\x66\x1d\x54\xb6\x8b\x74\x10\x82\x8a\x0a\x21\x14\x48\x22\x00\x9d\x42\xf7\x64\xf3\x74\x97\xf3\xcf\x68\x42\x3a\xeb\xb8\x11\x5c\x07\x7f\xa0\xc1\x9f\x72\xb4\xa3\xe4\x2c\x88\xa3\x50\xd3\x00\xab\x71\x7d\x48\xda\x64\x83\xe0\xca\xd7\xb0\xac\x81\x6b\x36\x29\xb0\x84\xd2\xc8\x22\x0e\xa2\x84\xd3\x97\x35\x8d\x1c\xe0\x9d\xc8\x29\x9f\x45\x91\x7e\x34\xfa\x85\x8e\x8b\x6b\xab\x42\x39\xc9\xba\x1c\xaf\x36\xb4\xe0\xca\xa7\x0e\x75\xc3\x99\xb9\x1e\x2f\x6f\x09\x5c\x30\x69\xa8\x58\xde\x39\x61\xc0\xa7\x3d\x72\x02\xe0\xa7\xdd\x66\xa8\x89\xa3\x1c\x24\x20\xbe\xf8\xca\xb1\x93\x63\x34\x00\x0b\xe0\xd8\xf1\xa5\x2f\x74\x81\x32\xc4\x38\xcd\x36\xc2\x4d\xee\x2e\x7d\x81\x9d\xbc\x8c\xbe\x73\x49\xe0\x53\x5a\xe0\x15\x98\x0b\xce\x21\x48\x96\x15\x13\x7d\x63\x25\x8c\x1a\xfa\xf3\x60\xd1\x29\xe3\xb1\xa0\x95\xf3\xac\x11\x83\x77\xf2\x9a\x3b\xbc\xa7\x27\x50\xe4\x94\xb3\x67\xf9\xa5\x56\x11\xea\x8f\xd8\xa7\x8e\x26\x93\x9c\x16\x4e\xa7\x32\x1a\x2e\xc7\x14\xf5\x2b\x18\x8f\x7b\xa4\xa6\x73\x80\x9d\x22\x28\xa2\xf1\xbb\x20\x2b\x5e\xc3\x4b\x22\xab\xe6\xbe\x9d\xdf\xf1\xf4\x53\xd6\x95\x31\xa6\x44\xc3\x0f\x6e\x95\x6f\x82\x62\xd6\x9f\xc4\x69\x9a\x75\x3a\x4e\x8b\x1b\x64\x67\xab\x4b\x06\x64\x67\xbb\x4b\x1e\x92\x9d\x6d\x31\x68\x84\xbe\x60\x3c\x26\x1b\xa4\xa3\x36\x1d\x03\xeb\x25\x28\x24\xcf\xd0\xde\x45\xc8\xce\x36\x19\x1a\x09\x25\x9d\x95\xa8\xef\x91\x4d\x8c\xfd\x8c\xe6\xcb\xb8\x90\xd4\xc3\x67\xf0\xcd\x32\x2e\xa2\x9f\xa3\x62\xc6\xe7\x44\x52\xa0\xd1\xb7\x9e\xa2\xa3\x9e\x39\x83\xb2\x72\x31\x42\x5e\xbf\x79\xe2\xf3\x93\xbe\xd5\xaa\x6f\x0d\x34\xec\x01\x5a\x23\x6a\x78\xad\xd6\xae\x5e\x38\x34\x9e\x88\x11\x8b\xce\x8a\x5d\x21\xcd\x5e\x06\xe3\x59\xc7\x66\x4c\x11\xa6\x2d\xc6\xf5\x4b\xe7\x4b\xcf\xd5\x69\x17\x17\xe2\x08\x81\xae\x6c\xb8\xda\xce\x8e\xd9\x7d\xb9\x8e\x10\x11\xaa\xb5\xcb\xa8\x98\xc6\x13\x01\x62\xcf\x11\x74\xc0\xed\x92\xc4\x13\x7c\xd8\x93\x85\x9b\x30\x97\xe2\xc6\x1e\xa1\xe2\x19\x1e\x19\x90\x6d\x0d\x7a\x4d\x68\x9c\x53\x6b\x78\x83\x01\x09\xd3\xa4\x5d\x90\x20\x0c\x89\x28\x55\xa4\x66\x95\x7d\x12\x15\xed\x9c\x04\x71\x46\x83\xf0\x92\x8c\xd3\x65\x52\xd0\xb0\x04\x4b\x5f\x69\x9c\xd7\x7a\x11\x0e\x06\xe4\xe3\xd1\x8b\xa3\x21\x99\x44\xd3\x65\x46\x09\x3b\xb0\x25\x34\x67\x27\x40\x76\x4a\xbb\xcc\x4d\x66\xf5\x5b\x10\xc9\x1f\x67\x92\xcd\xc9\xa0\x18\x81\x12\x2b\x25\xcb\x5c\xa1\x35\xa3\x93\x00\xd4\x31\xe7\xb3\x34\xa6\xbc\x87\x51\x32\x5d\xaf\x61\x04\x15\x3c\xc0\xe6\xfc\x62\xd0\x3d\x92\x3a\x2b\xdf\x58\xe4\x72\x4e\x6a\x45\x7d\xcf\x16\xd7\x71\x55\x63\x88\x80\x78\xc3\xe4\x3c\xd0\x64\x9d\xd3\xc2\x99\x53\x4e\x56\x6f\x83\x39\xb5\xf7\x21\x9d\x83\xe5\x4c\xb7\xac\x67\xf3\xa9\xde\xcf\x74\xc5\x9e\x3a\x15\x5f\x14\x18\xd4\x52\xad\xfc\xab\x18\xb6\xac\x64\x91\xd1\xb3\x28\x5d\xe6\xaa\x43\xdb\xbb\x0c\x25\x51\x42\xa2\xa4\x70\x4a\xd4\xe1\x1f\xf5\xd7\xd7\x20\xfb\x9b\xa4\x19\x81\x47\xc2\x11\xd9\x23\x5b\xbb\x24\x22\xdf\xcb\x01\xc8\xf7\xc2\x24\xda\xd8\x28\x2b\xce\xfe\xac\x3e\x6f\xec\x91\x8d\x8e\xc4\x41\x44\x1e\x91\xad\x53\x26\xe1\x93\xab\x2b\xb2\xb9\x5b\x5a\x49\x05\x2b\x17\xf4\xb0\x41\x22\xf2\xb0\x6c\xe6\x36\xec\x5e\x30\xe1\xa0\x8c\xed\xcb\xbf\x6b\x27\xd5\x4c\xb9\xee\x76\xba\xd6\x14\x0e\x06\x64\x12\x65\x79\x41\x68\x4c\xe7\x34\x29\xd8\xf9\x8a\xa3\xa9\x47\xf2\xcf\xd1\x82\x44\xc5\x2a\x53\x6e\x60\x7f\xd3\x87\x7d\x86\xbf\xca\x19\x80\xa7\xf3\x61\x18\xb1\x46\x82\x58\x2d\x72\x81\x4f\x87\xff\xb8\xf8\xf6\xf3\x45\x4d\x3a\x25\x0c\xe2\x24\x22\x1b\x64\xeb\x54\xf2\x09\xb2\x41\x9c\x6e\x78\xd0\x5e\x8b\x60\x8b\xf9\x79\x20\xc5\x56\xe9\xa1\x7d\x4e\x15\x37\x66\x3d\x7f\x68\xa6\xc2\x84\x2d\x13\x53\xb7\x5c\xfc\x35\x94\x49\xca\x18\xd2\x66\x15\x43\x22\x8d\x68\xba\x96\xa3\x0c\x06\x64\x1c\xc4\xe3\x65\x1c\x14\x54\x0a\x3e\xec\xc8\x27\xfa\x42\xa2\x82\xce\x6f\xc1\x8e\x18\x2b\x3a\xf9\x13\x31\xa5\xae\x0d\x7b\xbd\xd2\xbe\x72\xcb\x09\xf9\xfd\x18\x0c\x66\x2e\x5f\x9d\xb7\x10\x47\x5b\x24\xfa\x51\xa3\x0d\x11\xba\x48\x71\x33\x99\x56\x68\x8c\x38\x64\x63\x8d\x91\x4c\x57\xb7\x9a\x4a\x25\xe2\xd7\x25\x95\xeb\x41\x50\xc3\x1e\xf1\x0f\xea\xf7\xe9\x88\x50\x31\xad\x23\xe2\xd0\x20\xdb\x34\x41\x4b\xa5\x92\xa8\x04\x21\x65\x3a\xa2\x72\x84\x88\x12\x70\xc2\x80\xd6\x34\x62\xaa\x35\x44\x78\x88\xbe\xd3\xb1\x81\x9b\xd5\x15\x44\xb2\x14\xa7\x62\x0c\xcf\x89\x38\xf7\x9e\xc2\xad\xe3\xfe\x1d\x6b\x94\xf8\x90\x3b\x30\x32\xb9\xbe\xb4\x5a\xc4\xd0\x8b\xc8\x1a\xb5\x86\xa9\x4a\xe5\xa0\x47\x55\xab\x67\xc0\x18\xe5\x1c\x88\x95\xb9\xeb\x91\x36\x51\x47\xa9\x93\xa8\x4f\x0e\x16\x5d\x2b\x65\x92\x83\x01\xc9\x97\x73\x7e\x43\xe7\xd9\xa5\x84\x88\xa8\xe0\x45\x75\x27\xd1\x29\xe3\x8a\xea\x0b\xb6\x24\x1f\xff\x91\xcd\x9b\x88\x90\xd2\xa6\x83\x82\xc1\x80\x64\x74\x9e\x9e\xc1\x35\x26\x19\x2f\xb3\x8c\xc9\xa7\x4a\x38\x4d\x21\x59\x74\x33\xca\xa1\xe7\x9e\xde\xe6\xab\x68\xfc\x24\x32\x1b\x6b\xfe\x8c\x91\x91\x47\x4e\xfd\x8d\x29\xed\x83\xb5\x0e\x4b\xae\x75\xbc\xa7\x56\xc9\xe3\x3c\x54\x56\x58\x57\x0e\x92\xac\xd8\x0e\x86\x2f\x49\xcc\xfb\x0b\xde\x5b\xd6\xd6\x58\xdc\x32\x61\x53\x0b\xe8\x7d\x87\xdb\xab\xda\x26\x18\xe2\x5a\xb4\xd3\xed\x79\xb3\x9f\xa7\x69\x5c\x96\xc7\x84\x90\x92\xac\xe3\x8a\x3c\x7c\xb9\x59\xda\x6c\x55\x26\xe7\xc2\x65\xb9\xef\x69\x50\xda\xe3\x63\x9e\xb9\xc6\x08\xc2\xb5\xdf\x00\xd4\x29\x9b\x0d\x69\x38\x3b\x7c\xdc\x6b\xf1\xbb\xdf\xd6\xf0\x1b\xf8\xc9\xfa\xd6\x1a\x3e\x61\xbf\xf1\x75\x6c\x6b\xf8\xb4\xe7\xb3\xf5\x88\x92\xa2\x35\xdc\xda\x64\x3f\x33\x1a\xc4\xad\xe1\xd6\x36\xfb\xcd\x6f\x65\x5b\xc3\xad\x1d\xf6\xb5\xe4\x50\xd0\xc0\x52\x80\x3d\xb9\x3e\xed\x3d\xfd\x2d\xed\xa2\x6a\xae\xa1\x6f\x66\x4d\x84\x2b\x59\xc5\xa8\xc8\x2c\x67\xdb\x16\xe1\xdc\x15\x4d\x8c\xfc\x45\x2b\x2c\x8d\xcc\x9e\x34\xa9\xeb\x16\x76\x47\x25\xc6\x46\x8d\x1a\x45\x57\xe2\xde\xe9\x92\x6c\x27\x5b\xd2\x06\x26\x4c\xd6\xb0\xeb\x2d\x99\xbe\xbb\xb7\x64\xba\xb7\x64\xfa\x6f\xb1\x64\xd2\x0b\xe1\xae\xcc\x99\x9e\x47\xd3\xb7\xcb\xf9\x08\x58\xa1\xe2\xce\xa3\x68\x9a\x40\x62\xff\x17\xc5\xc9\x97\x45\x14\x9b\xf6\x35\xfd\x01\xa4\xf1\x7f\x25\xd8\xd8\x0b\x32\x4e\x93\x49\xe4\x18\x03\xc9\x93\x19\xda\x15\xe0\xec\x02\xdb\x82\x1c\x38\xe7\xd5\x39\x01\x7e\x4f\xe0\xc1\x06\x3b\x67\x31\xbe\xa5\xad\x64\x61\x29\xb0\xb9\x01\xe5\xcc\x43\x86\x63\x0e\x19\xe5\x24\xa1\xd3\xa0\x88\xce\x68\x4f\x72\x22\xb8\x38\x2a\xce\xd3\x76\x4e\xc6\xe9\x7c\x21\xa5\x55\x28\xc5\xe6\x56\x95\x9c\xc4\x69\x50\x44\xc9\x94\x2c\xd2\x28\x29\x7a\xfc\x3a\x94\x91\x7d\x98\x9e\x27\xd6\x99\xce\x54\x93\xb8\xc7\xb7\x2b\x8e\xe5\x2b\x85\xef\x6b\x39\x16\xb6\x94\x12\x4a\x43\x38\x45\x8f\xf4\x1c\x87\x7e\x63\x18\x40\xda\xb5\xb2\xf3\x31\xdb\x35\x18\x30\xd4\x2f\xb9\xb0\x6a\xb7\xcf\xe7\xa2\x33\xee\xbf\xfc\xf8\xea\xd3\xf3\xc3\x9f\xde\x1e\xbf\x79\xfe\xf2\xfd\xa7\xf7\x47\xc7\x6f\x5f\x1c\xbe\xfd\xe9\xd3\x9b\xa3\x17\x2f\xd1\x19\x4e\x69\xe2\x60\x26\xfb\x8b\x20\x7c\x4d\x27\x45\x87\x7f\x15\xe9\xc7\xf3\x34\x3f\x50\x58\x14\x6d\xf6\x8b\x54\x88\x4b\x5b\x4f\xba\x3d\xf2\xe4\xb1\x79\xc3\x83\x77\x4b\x18\x4e\x87\x37\x62\x1a\x60\x98\x13\x2f\x0f\xbf\x25\x38\x7f\xae\xce\xc6\xe6\xa1\x79\x55\x1c\xba\x52\x87\x81\x45\x0f\x42\x8a\xf4\x15\xbd\x90\xe3\xce\x97\xa3\xbc\xc8\x3a\xdb\x08\x7f\xb1\x75\xb5\xcf\x8b\x4b\x2d\xf7\x06\x79\xb2\xd3\x25\x03\x8c\x22\x1b\xdd\xef\xa3\xe9\xac\x10\xc5\x7a\x24\x26\x0f\xbf\x32\x3e\xc5\x0e\x7c\xa7\x68\x2d\x95\xe9\x6e\x8d\x5d\x79\x3c\x33\xd1\xaa\xb4\x73\xbf\xdb\x0c\x58\x6a\x53\xde\x58\xb7\xcf\xd7\xfc\x06\xa9\x9f\xa0\x3a\x4e\xc7\x25\xf9\xf2\x15\xf1\x41\xe6\xdf\x76\xee\x94\x71\x67\xf3\x59\x9b\x64\xe9\xfc\xb8\x98\x3c\xbd\x9f\x38\xcf\xc4\x89\x77\x46\x65\x8c\x4c\xbc\x42\x92\x93\xc6\xbe\x69\x90\xac\xce\xc8\xec\x27\x47\xe5\x73\xd6\xde\xbc\xdd\x5f\x9b\x6c\x88\xea\xc9\x33\x42\xda\x5b\x6d\x32\x24\xed\xcd\xf6\xed\x79\x54\x1d\x26\xd9\x89\x95\x95\xfa\x07\x83\xcb\x09\x13\x8c\xe7\xcb\xb8\x88\xb8\x50\x39\xba\x24\xdb\xff\x99\x33\xf1\x5c\xd9\xd0\x05\xac\xe6\x82\x4e\x69\x56\xb1\x95\xbc\x17\xb5\xd6\xed\xdf\xab\xce\x88\xb0\x65\x2e\x99\x11\x81\x26\x8b\xfa\x18\xd6\x54\x8b\x6a\x73\x8d\xe6\x34\xb7\xb2\xb6\xbb\xfd\x45\x7a\xde\xd9\xda\x7e\xda\xed\x9a\x28\x3d\x98\xd1\xf1\x67\x12\x4d\x0c\x9c\x22\xb1\xc8\x42\x44\x1e\x4d\x13\x1a\x1e\xe6\x6f\x75\xb6\xa3\x88\x56\x75\xcc\xe8\x85\xe8\xb1\x89\x0c\x49\xb4\x70\xe8\x83\xb6\x0b\x53\x12\x4b\xd9\x91\xe5\x3c\x62\x62\x78\x10\xe7\xda\x6a\xd9\x6e\xbd\x16\x5f\x3e\x0c\x49\x76\xb3\xd9\x23\x5b\xdd\x1e\xd9\x7a\x82\xe4\x91\xed\xae\x91\xdb\x25\x7b\x7b\x7b\x8c\x64\xbd\x54\x98\x31\xf6\xf1\x28\x88\xa1\x53\x84\xab\x0e\xf4\x85\x07\x17\x35\x5d\x22\xe2\x8a\x04\x5b\x08\x34\xc8\xc3\xb1\x83\x65\x38\xd3\x82\x61\x45\xbb\x4a\x38\x84\x65\x11\x4d\x09\x97\xd3\x2d\x7a\x53\x5d\x30\xf0\x67\x18\xc5\x32\x60\x3e\x8f\x7b\xbc\x37\x48\x97\xd9\xe9\x92\xab\x2b\xd2\xda\x6c\x09\x1d\xf1\x60\x40\xc6\x8a\x8a\x98\xf0\x2c\x27\x52\xb5\xce\x81\x60\x96\x95\x98\xed\x4a\xd8\xf2\xf2\xd6\x9a\x64\x31\xb1\x1e\xfd\xa3\x67\x72\xf9\x7c\xce\xa3\x64\x69\x2f\x81\xf6\xe4\x96\x7f\x6d\xa8\x5b\x56\xbe\xa5\xee\xc6\x1a\x74\xe8\x06\xe4\xb3\xac\xa6\x9f\xe3\x4a\x02\xf2\x91\x0e\x5d\x89\x76\x44\xf3\x2e\xd5\x1c\xdf\x05\xd9\x7c\x1d\x94\x09\x7e\x5f\x86\x32\x87\x71\xd7\xa2\x0c\x30\x86\xe4\x61\x13\x45\xa2\x39\x17\x45\x0e\x27\xf7\x99\x9b\x5b\x2b\x51\xc0\xf4\xc3\xe8\x2c\x0a\x69\xf8\xfc\xb2\x82\x81\xdf\x84\x9a\x6a\x70\x73\x7c\xd7\xc8\x59\x96\x62\xe7\x78\x65\xf4\x1c\xdf\x06\x3f\xee\x15\x2c\xaf\x5a\xa1\xa8\x4c\xdc\xd2\xaf\xa5\x1b\xe3\x45\x6e\x6b\xe6\x5c\x94\xe2\x48\x34\xed\xa2\xc8\x11\xce\x7c\x18\xf2\x2c\x2f\xd8\xac\x6e\x29\xad\x6d\xb5\xc9\x33\xbe\x2f\x0b\xb7\x18\xab\x61\xb3\xf4\xd8\x88\x1e\xe5\x56\x6c\x7c\x31\x9d\x68\xc4\x31\xf1\xa1\xe2\x60\xe3\xc8\x1d\x49\x30\xa7\xfc\x75\x0f\xfb\x65\xc9\x5f\x02\x86\xd5\xa9\x6a\xf0\x60\xde\x39\x81\x42\x1b\x3d\x82\x35\xe5\xac\x90\x78\x5f\x4d\xf6\x48\xd9\x33\xdd\x87\xdd\x01\x3a\xcf\xe4\xd1\xaf\x82\x27\xe6\x70\x45\x25\xca\x9f\x6c\x9d\x9a\x72\x70\x7b\xf3\x82\xc9\xcb\xee\xe4\xf6\xf3\x38\x1a\x53\x26\x96\x6c\x93\x87\x50\xdd\x8a\x74\x5e\x33\x33\xf8\x08\x7e\x67\x13\xb4\x2a\xfa\x4b\xf5\x00\xce\x26\xa3\xce\x87\x16\x1f\xe0\x88\x13\x37\x60\x36\xe6\x9e\x3c\xee\x8a\x3d\xbc\x48\x05\x7c\x97\x3c\x94\x47\x4a\xdf\x0c\x58\x15\x71\xd1\xf0\xc9\xe3\x9e\x68\x7f\xb5\x29\xa8\x38\x92\xf3\xe1\x7b\xce\xe4\x77\x8a\xfd\x20\x1f\x47\x51\x15\xfe\x3d\x67\xf9\xdf\x10\xf3\x52\xa5\x03\xaa\x81\x66\xf8\x5f\x6d\x02\xb4\x6f\x9a\xb2\x19\xd8\xd7\xde\x6b\x4a\xa6\xa0\x94\xb7\x97\xa0\x5c\x55\xe8\x62\xdb\xe7\xbd\x66\x05\x69\xca\xc0\x5d\x6b\xf3\xa2\x45\x36\x88\x38\xe0\x00\xda\xf9\x6f\x65\x53\xf0\x78\xb3\x47\x70\x52\x99\xc3\x80\x2f\xd2\xee\x03\x1d\x34\x87\xd6\x77\xcf\x86\x81\x15\x3b\x74\x52\x1c\x38\xbc\xc0\x87\x65\x19\x4e\x29\x8e\xcc\xa1\x9b\xe4\xf6\x23\x4d\xe3\xa1\x9d\xe0\x40\x31\x09\x64\x68\x27\x60\x28\x25\x96\x0d\xed\x04\x17\xea\xd8\x01\x3b\xf6\xc2\xe1\x46\x75\x8a\xa7\x3e\x17\xf0\xd8\x0f\x89\x07\xab\x53\x3c\x70\x18\xdb\x28\xc9\x85\xf4\x4d\x8f\x9b\xe3\x96\x33\x27\x08\xa7\xb9\xb0\x82\xea\x87\xde\x75\x77\x2d\xef\x74\xcd\x9b\xa1\xd6\x70\xeb\x69\xaf\x65\xde\x28\xb5\x86\xdb\x60\xbe\x00\x0b\xa3\x35\xdc\xda\xea\xb5\xf0\xbd\x54\x6b\x68\x7e\x5e\x9f\xf6\xb6\x36\x7f\x67\x7f\x2e\x87\xdc\x30\xbe\xc2\x01\x51\x94\x14\x65\xfe\x87\xc4\xd5\x55\x94\x14\xdc\x35\x0b\xfb\xf1\x58\xfd\x3a\xd5\x89\x3b\xe8\xb7\xe5\xb9\x25\x4a\x0a\xee\xb7\x25\x4a\x8a\x27\x8f\x15\xd8\x53\x5d\xd1\xf6\x37\x4f\x4a\xea\x62\xf0\x35\x7e\x8c\xec\xa3\xe1\x57\x74\xc5\x05\xe0\xb6\x0d\xc2\x61\x52\xac\x68\x76\x61\x94\xa8\xb0\xb6\x80\xe6\x2a\x4a\xde\xc8\xb6\x22\x4a\x0a\x29\x2a\x3e\xbb\x91\x3f\x17\xde\xab\x7a\x1b\x88\xad\x46\x21\xec\xee\x8d\x20\xee\x8d\x20\xfe\xbc\x46\x10\x44\x5b\x41\x70\x51\xe9\x8e\x0c\x20\x1a\xd8\x35\xd8\xac\x9e\xdb\x2d\xa4\x60\x8d\xae\xdd\x76\xf4\x3d\x12\xea\xf9\x8c\x26\xea\xb1\x62\x8f\x1b\x7e\x33\x01\x5c\x79\x6f\x90\x92\xe5\xc0\x6b\x18\x61\xe9\xbe\xed\xb7\x89\xc0\x49\xa5\xfc\xc8\xff\xbf\xba\x22\xed\x36\xe2\xb3\xa9\x7c\xb6\xc0\x7f\xec\xa2\x77\x86\x51\x22\x5a\x6f\xec\xee\x63\x4a\x0b\x6c\xef\x0b\xd6\xe3\xed\x5c\xbe\x02\x05\x5e\xc2\x2a\x31\x4c\xdd\xb5\x7c\xcf\x2d\x5d\x4d\x29\x5a\xaa\x99\x74\xad\xb8\x32\xd2\x91\x7d\xec\x1a\xd6\xec\x80\x1e\x6c\xcd\x6e\x37\x52\x69\x87\x06\x26\xfe\xc6\xb1\x03\xdf\x3d\x36\x46\xc6\x38\xa3\x8c\x98\xe4\x7a\x30\x7d\xb2\x70\x72\x0f\xa3\xc9\x84\x82\x35\x32\x47\xb9\x75\x2e\x39\x57\x8f\x42\xf0\x71\x44\xa2\x44\xcc\x92\x34\x5c\x4e\xbc\x87\x10\xf3\xe8\xc2\xb6\x43\x5f\x3f\x82\x05\xe7\x30\xaa\x17\xe5\xa8\x3c\xf7\x3f\x98\x35\xe9\xae\xf4\x4a\x4f\x13\xa4\x22\xd5\x55\x30\x9a\xce\x47\x51\xe2\xba\xb7\x29\xd2\x29\x65\xdc\x9d\xd5\x40\xa7\x7d\xbe\xa8\x82\xc5\x82\x26\xb0\x96\x82\x84\x3f\x80\xb0\xb0\x2b\x6a\xab\xbb\x84\x11\x8c\x69\x16\x8d\x19\x7b\x92\xbd\xaa\x2f\x2c\x6e\x4f\xd3\x89\x80\x85\x7d\xa8\x12\xb5\x72\x78\x75\x7a\xbf\x2a\xb4\x2a\xbd\x05\xbf\x32\xd9\x25\xf5\xd8\x1d\x07\x71\x2c\xf0\x2b\xef\x70\xf8\x88\x66\x81\x5e\xba\x79\xf4\xab\xf0\x2c\x08\x77\x75\xb3\x20\xef\xb1\xff\x25\xa1\x81\xef\x5f\xcf\xa5\x1d\xc6\xb7\x32\x04\xf5\xeb\x4c\x2b\x51\xe3\x77\xcd\xe4\x5b\xb8\x62\x55\xac\xef\xed\x81\x74\x31\x89\x12\xeb\xa1\x52\x1d\x12\xb4\xcb\x22\x51\x95\xb8\x5e\xb6\x95\x06\x3c\x77\x3f\x7f\x5e\x7e\xf4\xe7\x1a\x5f\x57\x43\xd3\x60\x99\x19\xb5\x57\x0d\x7a\x1d\x46\xad\xdf\xff\x77\xc9\x33\xd2\x6e\x93\x61\x33\x6b\x2c\x84\x32\xaf\x4d\xd6\x0a\x78\x63\xbc\x9f\x2b\x27\x94\xcc\xe8\x7b\xeb\xa5\xf5\x17\x7e\x9c\xc9\xbd\x47\x5e\x09\x07\x98\xe1\x07\x73\x4c\x64\x40\xe2\x95\x58\xd4\x8d\x79\x51\x08\x4e\x95\x6c\xfc\xf9\x9c\x33\xa9\xe5\xb5\x4b\xf8\x95\x1f\x29\xa1\x3b\x31\x61\x9d\xd5\x51\x67\x6c\x6b\x25\xb8\x43\x9b\x92\x1f\x79\x32\x21\x90\x37\xf0\x0d\xb0\x48\xe7\x8b\xe2\x12\xab\x04\x1b\x6c\xa2\xb5\xab\xd0\xa4\x47\xc4\x9e\x86\x20\x7d\xac\x80\x1b\xe9\x6e\xaa\xd4\xd1\x94\x17\x13\x95\x03\x11\x55\xd6\x8d\xc1\xb8\x58\xd9\xf0\x88\x05\x37\x19\x87\x7e\x89\x57\xee\x1c\xea\x75\x94\x17\xce\xb3\xbf\x13\x63\x34\xa7\x1e\x8f\x50\x95\xa3\xd7\x35\xbb\xdb\x8b\x7a\x14\x24\xaf\xe9\x97\x8b\x90\x9b\xb5\x8a\x47\x70\x4a\x15\x59\xa4\x05\x7a\xe8\xca\x0b\x4b\xe1\x88\x3b\x1d\x22\xc6\xc3\x3e\xf5\x7e\x50\x80\x9a\x6f\x8a\x8c\xbd\x4d\xad\x47\xbe\x7d\x95\x2c\x48\xfb\xf6\xcb\xf6\x14\x62\x36\x4f\xf6\x70\x8f\x35\x2c\x1e\xc6\xc6\x9e\xab\xe8\x17\x4f\xb5\xdc\xe7\x59\x1c\x52\x8b\x40\x9d\x14\x3f\xb9\x55\x4f\xe6\x06\x03\x39\xdd\xf4\x8c\x66\x97\xc5\x0c\x1c\x91\xa0\x7a\x30\x76\x5c\xaf\x53\xd2\x1c\xcd\xc1\x8f\xf1\x4c\xd7\x7f\x43\xa1\x1c\x2f\xdd\x69\x13\xae\xd2\xf9\xba\x47\xda\x6d\xa9\x7c\xaf\x50\x52\xbc\xe3\xb3\x64\xe9\xf4\x94\xfa\xee\xfa\xb4\xb7\xd5\x28\xd0\xde\x57\xd4\xc9\xc1\x6d\x74\xb5\x52\x2e\x63\x20\x25\x5a\x39\x69\x63\xc6\xfe\xe7\xaa\x32\xf8\xf5\x58\xff\x3c\x45\xc9\x3b\xf8\xc3\xd2\xcd\xb1\x34\xae\x9c\x63\xbf\xa4\x76\x8e\xfd\x7e\x8a\xaa\x43\xfa\x39\xa7\xc6\x06\x1a\x3a\xe7\xee\x7d\x15\x15\x1d\x2b\xbc\x8a\x8e\x8e\xc3\xdb\x4a\x3a\x96\xba\xa2\x96\xce\x2c\x52\xa1\xa6\xe3\x2d\x56\x95\xbd\x89\xa2\x8e\xe1\xb6\x44\x51\xd7\xcc\x4b\xbe\xe8\x56\x03\x45\x5d\xa3\x50\x5e\x5f\xeb\x65\x9d\xe7\xf6\x6f\x15\xf2\xe0\xc5\x57\x21\x10\x59\xc2\x26\x11\x9e\xbe\x22\x91\xd8\x85\x2a\xc8\x44\xb6\x5b\x5d\xfe\x46\x3a\x5d\x2e\x49\x35\x79\x30\xe7\x69\xef\x6e\x9f\xca\xa9\x51\x36\xa0\xbb\xbb\x0f\x3d\x52\xf9\x78\xc7\xc3\x87\x91\x7f\xdb\x28\x6f\xee\xd8\x76\x4c\xb3\x22\x88\x12\xbf\x73\x5b\x07\x91\xfc\x36\xa9\x86\xa8\x39\x50\xdf\x4c\xaf\x26\x6b\x51\xc4\xca\xa8\x75\x05\x51\xd0\x6c\xce\x8e\xfc\xd1\x04\x6a\x36\xfb\x1d\x0a\x97\xb5\x64\x1a\x9d\xd1\x44\x9a\xb4\x98\x47\xea\x32\x5f\xb9\x96\xfd\x0b\x3f\x66\x6b\x73\x5b\xc0\x32\xaf\xdc\x69\xd7\x6f\x7c\x8b\x21\x9a\x2f\x11\xee\x99\xb6\x55\x78\x85\xe3\xf4\x8c\x66\xd9\x79\x16\x15\x05\x05\x73\x2f\xde\xab\x16\xd9\x80\xde\x37\xc6\xdd\x39\x68\xd9\x73\xfc\x8a\x1f\xac\x20\xf4\x51\x34\x4a\x04\x0a\x0b\xd7\xe9\xb0\xfd\xd0\xbe\x11\x32\x5d\xad\xa4\xd5\x9c\xd6\xda\x96\xe0\xcd\xe3\x3f\xc0\x8f\xc1\xc1\x00\x54\xe1\xc1\x9c\xad\x0a\x70\x79\x28\xb4\x59\x6c\xbc\x8c\x13\x50\x7e\xc7\x10\x47\x9f\x29\x09\x48\x1e\x25\xd3\x98\x2a\x27\x5c\x00\xd9\x37\xec\xa1\x81\x82\xb9\x8f\x19\xee\x93\x83\xb7\x76\x75\x45\x4e\xda\x27\x5b\xa7\xed\xd3\xae\x12\x06\x6b\x7c\x00\x88\xee\x99\x78\x67\x5f\xd8\xaf\x61\x89\xe8\xce\x6d\xa0\x38\x2a\xc0\x56\x61\xab\x47\x1e\x81\x31\xf6\x26\xf4\x65\x0b\x7b\xa1\xd1\x1d\x72\x04\x59\xe9\xa5\xa1\x27\xfd\x3a\x94\x9d\x16\xa4\x37\x87\x87\x12\x50\x37\x30\x18\x90\x20\x8e\xc9\x28\xc8\xa3\x31\x77\x7e\x00\x2f\x05\x76\xb6\x85\x02\x27\x4e\xd9\xc9\x58\xf6\xa6\x47\x76\xb6\xeb\x8c\x4e\xcc\x85\x2d\x38\x9a\x3c\x81\x4b\x5d\x24\xa1\x53\x10\x20\x21\x22\xd4\xc9\x69\x8b\xec\xfd\x00\xeb\x53\xa7\x3d\xe6\x89\x95\xca\xb4\x7d\x59\xdb\xaa\x1c\x60\x46\x4b\x7b\x56\xb1\xda\x71\xab\xa5\x34\xab\x7d\x7e\x19\xde\x60\x1c\xa2\xdb\xb5\xb6\x51\x54\xe4\xc1\x03\x82\xbf\x4f\xd0\x6f\xe4\xff\xed\x54\xee\xba\x2a\x2c\xc6\x60\x7a\xa3\xb9\x11\xcb\xb7\x6a\x6a\xe4\x2c\x98\x73\x23\x26\xcc\x9c\x1a\xe4\x6e\xed\x96\x33\x63\xf5\xab\x62\x62\x50\x9b\x5f\x7b\x5e\xee\x72\x62\x4c\xbf\x27\x9a\x91\xa2\x99\x80\xb3\x51\x0b\x6c\x11\xb6\x39\xd2\xf9\x21\xa9\x25\x8c\x15\xb6\xc4\x54\x6c\x3d\x56\x80\xdb\xa7\x27\x3b\x02\x54\xa6\x71\x10\x05\xb1\x75\x6a\x25\xe8\x6f\x77\x77\x00\xac\xde\x60\x7b\xc0\x63\x11\x43\xac\xdf\x13\x50\x63\x77\x34\x91\xd1\x84\x74\x50\x16\xe2\x90\x36\x3f\xbe\xe1\xc4\x02\xc3\xf6\xbd\x86\xd8\xaa\x98\x72\xb1\x49\xc8\x53\xb5\x6f\x9e\x61\xde\x7c\x53\xdd\x52\xc1\xf7\x9c\x09\x17\x9f\x2d\x63\xde\x8d\x8a\x4e\xcc\xca\xf1\x74\x6b\xd7\x6b\x8d\xe6\x59\x65\xf0\xa1\x88\xfc\xd2\xf9\x35\x5c\x28\x96\xee\xf6\xc2\x55\x51\x1c\xe4\x05\x39\x39\x65\xc2\x04\xaf\xf7\x46\xd3\xbe\xee\x9f\x77\x35\x07\x20\x67\x11\xc7\xc1\x12\x1c\x68\xf4\x33\x28\xf8\x54\x34\xd0\x84\x48\x2a\x8c\x63\xd1\x11\x46\x71\x60\xfb\xa6\x89\x8c\x2e\x49\x48\x27\xc1\x32\x06\x45\x68\xbe\x64\x72\xaa\xda\x98\x5b\xc2\x47\x4d\x4f\xc4\x78\xb4\x67\xd1\x38\x46\xdd\x80\x01\xeb\x1d\x71\x45\x51\xb8\xe1\xe9\xad\xd4\xa8\x5e\x3a\x6a\x97\x3a\x62\xb4\x44\x72\x7b\x8d\x00\xc5\x0b\x52\x3e\x69\x31\x8a\xef\x91\x16\x5b\x04\xec\xbf\xd3\xd6\xa9\xa6\x76\x01\x81\xd2\xa0\x50\xb2\x8c\xed\x67\x0f\x68\x36\x1b\xa1\xcd\xf6\x2e\x67\xf5\xb7\x66\x21\xb8\x1e\xaa\x9c\x95\xc0\xf7\x06\xe1\x29\x8f\xcf\x7a\x0e\x37\xbc\x6c\x38\xc6\x78\xd9\xbf\xb0\xea\x2d\x22\x16\xdc\xaa\xf3\xef\x13\x7e\x1a\xff\xf7\x69\xb7\x5e\x44\x10\xca\x5b\xe5\xea\xa1\xfc\xde\xc1\x8a\x61\x21\xa1\x9b\xb3\x0e\xf9\xf0\xd4\xbd\xcb\xb2\x70\xe6\xb9\xb4\x10\xf7\xe8\xf6\xc6\xe0\x75\x46\x6d\xde\xca\x08\x3f\xa8\xd2\x03\xaa\xcd\x16\x6a\x5c\xc1\x2a\xfb\x6f\x6c\x4c\xbc\x4b\x4a\xff\xfc\x5e\x51\x5d\xa7\xb2\x34\x9e\x60\x67\xb2\x82\x95\x39\x85\xd4\xb3\xe4\x93\x53\x9f\x07\xf1\xfe\x62\x99\xcf\x3a\x8e\x5b\x52\xf9\x4c\x5b\xfa\x18\x75\x6b\x66\x63\x71\x1d\xae\x9f\xf9\xbc\x7f\xe2\x96\x90\x13\xcf\xce\x59\x8f\x60\xe7\xb2\x96\x6f\xd2\x5b\x79\xf4\x15\x13\x88\x3d\xf9\xde\x7a\xfe\xa0\xeb\x8e\xd4\x21\x10\xff\xdb\xcf\x9f\xcf\x1d\x6b\x8d\x1b\xd6\xd2\x89\x60\xb3\x09\x7e\x52\x2b\xe6\x63\xe5\xd9\x58\x73\xee\x08\x2d\xdd\x91\xb1\x24\x91\x3b\xdb\x26\x0e\x41\xf9\xfd\xe8\x24\x4b\xe7\x5e\x73\x03\x0e\xe5\xe3\x2d\x23\xfb\xc1\x8e\x65\x20\x64\x58\x06\xad\xf0\x60\x4a\x32\x35\xde\x72\x03\x16\x25\x06\x82\x59\x94\xe1\x4c\xb3\x86\x55\x7d\x15\x5e\x05\x7b\x13\xbe\xb1\xe4\x82\xae\x78\xe2\x03\xdd\x93\x82\x8e\x40\xd7\x43\xb2\x0d\xc6\x0f\x5d\xe9\xce\x59\x20\xaf\x6c\x11\x55\xd6\x89\x9b\x77\x2a\xf6\xad\x28\x28\xf0\xa1\xe0\x77\xec\xb8\xf4\x06\xd9\xe1\x1e\xef\xf9\x6e\x9b\x33\x90\x9c\x04\x93\x82\x66\x6a\x91\xe0\xfe\xde\x68\xad\xfa\xcb\xf8\x1c\x77\x6b\xce\x51\xe2\xb0\x9b\x54\x62\x4f\xc4\x8d\x79\x5b\x56\x3f\x76\xea\x51\xea\x43\xda\x0e\x78\x53\xc9\x68\x1a\x72\x1a\xf2\xb0\xba\x6f\x0c\x76\x63\xaf\x1a\xa6\x11\xa3\x32\xbd\xcd\xa2\x69\xdf\x20\xd1\xdd\x72\xad\x3f\xc4\x1e\x82\xff\x1a\x52\xbf\x34\x48\x6d\xf8\xf7\x87\x22\xfe\x7b\xda\x47\x7f\xbf\x0b\xed\x13\x2f\xe9\xe3\xe8\x8c\x37\x25\x7d\x3b\x86\xd8\x8a\x9b\x8a\x43\xac\x76\xfd\xcd\x76\x16\xb3\x17\xab\xd4\x2f\xe6\xcf\x4b\x6f\xb1\x43\x5f\xfe\xf5\x57\xbe\x84\x17\xe2\xd6\xcf\x35\x52\xad\xeb\x7e\x87\x6c\x91\x0d\xb3\x77\x5d\xee\x90\x89\x87\x11\xf3\x4c\x3d\x77\x3f\x6c\x5d\xba\x19\x0f\xb6\x2b\x9c\xd9\x1b\xb8\xb6\x2c\xbe\x0c\x2e\xb6\xb6\xe2\xd8\xf0\x9c\xab\x95\xb5\xdd\x35\xd5\xaa\xde\x8b\x44\xab\xeb\xb5\x17\xbc\xe5\x57\xbb\xea\x4d\xdc\xf5\x69\x6f\xeb\xf7\x8e\xbb\x7f\x5c\xff\xec\x6d\x59\xf1\xee\x4d\x78\x22\x81\xff\xb9\xad\xcb\x52\x3f\x7d\x5b\xa2\xb7\x6f\x4b\xfc\x60\x6d\xe9\x79\xfd\xb6\x54\xcf\xdf\x96\xe8\xfd\xdb\x12\x3d\x80\x5b\x9a\x2f\xe0\x9c\x1a\x1b\x58\xd8\x38\xfe\x51\xbe\xe2\x23\xb8\x63\xef\x2b\xb8\xe3\xd5\x9f\xc1\x1d\x37\x7d\x07\x77\xec\x3e\x84\x3b\xbe\x83\x97\x70\xcb\x5b\x3f\x85\x3b\x6e\xfc\x16\xee\xf7\x0e\xea\x7f\xdc\xc0\xe2\x6c\x59\x65\x72\x26\x5d\xab\xf0\x1f\x82\x38\x91\xd5\xd9\x12\x9b\x9d\x2d\x0d\x2b\xb1\xa5\xcf\xf0\x6c\xa9\x2d\xcf\x96\xd8\xf4\x6c\x89\x6d\xcf\x96\x96\xf1\x99\xa7\xde\x26\x8b\xe3\x37\xb5\x3f\x3b\xf6\x1b\xa0\x1d\xdf\xc0\x02\xed\xb8\xb1\x09\xda\xb1\xc7\x06\xcd\x2e\x7d\xb3\x35\x52\x61\x86\xd6\x74\x91\x34\x37\x44\xfb\xb6\xc9\x2a\x69\x2f\x73\x0a\x8a\xd9\x71\xd1\xe6\xd1\xf8\xa6\x29\xa1\xc9\x19\x09\x53\x0a\xd6\x0a\xf0\x3a\x30\x48\x42\x70\x60\x4b\xfe\xf9\xe6\xf5\xab\xa2\x58\xbc\xa7\xff\x6f\x49\xf3\x62\x0d\x04\xb3\xcb\x05\x4d\x27\x56\x0e\xf7\x63\xa3\xde\x6f\xb4\x25\x5e\x44\xc3\x7d\x1b\x9a\x7c\xb9\xde\x5d\x33\x22\x45\x96\x42\x9a\x09\x20\xa9\xff\x92\xcf\xd8\xee\x13\x4d\x93\x34\xa3\xc3\x38\x4a\xe8\xda\x35\xb7\x58\x65\x78\x68\xe4\xea\xfe\xfe\xe5\xec\xfd\xcb\xd9\x3f\xf1\xcb\x59\xfe\x6a\x56\xd8\xb0\x19\xcf\x66\xf9\x86\x43\x6e\xf6\x7a\x56\xec\x7d\xc7\x45\x14\x43\x9d\x5c\x9f\x09\x6b\x87\x3f\x4f\x72\xc0\xa2\xe2\x52\xb1\x44\x5d\x64\x1c\x07\x79\x4e\x4e\xa0\xc8\xa9\xe8\x26\xcf\xd0\x4c\x98\x57\xb5\x36\x80\x7b\x23\x58\xa5\x42\xb9\xca\x38\x08\xa9\xf0\x64\xdd\xdc\xc9\x39\x40\xb2\x9a\x8e\xdf\x1e\x7e\xfc\xc0\xce\xd6\x30\x09\xed\x73\x1a\xb5\x39\x69\xb6\x3f\xa3\xdf\x6f\xd0\xef\x9f\xd0\xef\xfc\xd7\x60\x94\xca\x8f\x49\x94\x24\xf4\x52\x7d\xd1\x79\x91\xc2\x53\x46\x99\xb2\x88\xc6\x66\x42\x12\x24\x66\xc2\x3c\x1a\x67\x76\x4a\x1c\x47\x4e\x21\x03\xde\x00\x95\x1f\x46\x91\x69\x16\x24\xa1\x1a\x8a\x91\xf5\x93\xf1\xf5\xd1\xf8\x7a\x67\x7c\xbd\x34\xbe\xfe\xcf\xf8\xfa\x97\xf1\xf5\xd6\xf8\x7a\x61\x7c\xfd\xc3\xf8\x3a\xe6\x5f\x6b\xa7\xe5\xae\x6b\xd8\x1c\xbd\xdb\x7f\xc1\xa6\x78\x48\x76\xb6\x7b\x2a\xf1\xc3\xe1\x4f\x6f\xf7\x3f\x1e\xbf\x7f\xf9\xe9\xf5\xcb\xb7\x3f\x7d\x7c\x35\x24\x8f\x75\x26\xcc\xea\x50\xff\xd4\x39\x25\x94\x33\x24\x5f\x88\x95\xa0\x9d\xa8\x43\xc6\xa7\x17\x47\x3f\xbf\x25\xd7\xba\xa6\x77\x47\xaf\x5f\x33\xe8\x8f\x87\x6f\x5e\x1e\x1d\x7f\x1c\x92\xad\xcd\xcd\xcd\x81\xe8\xa1\xb8\xf1\x7e\x1e\xa7\xe3\xcf\x43\xd2\x66\xac\x33\x2f\xda\x46\xde\xfe\x18\xe2\x18\x0f\xf5\xdb\x46\xfe\x00\x83\xed\xe7\x75\xbe\x4f\xee\xe3\x60\xdc\x6f\x64\x7f\xf5\x8d\x6c\x4d\xb9\x80\xc8\x67\xc1\xce\x5d\x79\x80\x38\xc8\x2e\x17\x45\xfa\xf7\x0f\x78\x73\x18\x43\xda\x23\x1d\xfe\x82\x35\xe8\x05\x18\xb0\x9c\xb6\x37\xb4\x93\xeb\xbe\x01\x28\x2e\xc7\x0f\x54\x45\x12\x79\xf0\x40\xe6\xf6\xa5\xbf\x08\x2e\x26\xcf\xe8\x45\xdb\x7e\x45\x67\x78\xfe\xfa\x81\x6c\xb3\xd2\xb6\xeb\xe3\x6d\xe9\x2e\xd2\x2c\x4e\xe4\x65\xb8\xba\xe0\xb7\x9c\xb3\x13\xeb\xb5\x1d\x07\x95\x38\x62\x9d\xeb\xbf\xa2\x17\x7d\xd0\x5e\x0a\xcf\xbd\x3e\x1b\x23\x86\x15\x39\x6c\xdd\x3a\x3f\xd1\x71\xf5\xdb\x90\x6c\x7f\xf3\x84\x97\x44\x8f\x93\xe5\x9b\x33\xc6\xf2\x14\x8e\x5b\xc3\x6f\xbe\xeb\xb5\x4c\x94\xb7\x86\x4f\x37\xaf\x4f\x7b\xdb\x8d\x7c\x3e\xdd\xf3\xbd\x7b\xbe\xf7\xe7\xe5\x7b\x9a\xed\xf1\x77\xfe\x77\xc0\xf7\x2c\xd9\x7d\x75\xd1\xdd\x23\xb9\xcb\x82\x3e\xc1\x7d\xa5\x50\x43\x36\xaf\xed\x0f\x04\xbb\xd7\xb1\x88\x26\x4f\x31\x00\xfb\x56\x22\xfc\x32\x89\x8a\x37\xc1\x42\x89\x8b\x6d\x29\x51\x0f\x39\x0f\x6a\x6f\x4a\x59\x93\x49\xed\x43\xcd\x16\xdb\x5b\x86\x9c\x3f\x44\x19\x9b\x9b\xaa\xd0\xff\x56\xe4\x8d\x82\xd1\x28\x98\x52\xd5\x12\xce\x43\xc2\xff\xd0\xce\x9b\x7b\xea\x44\xd9\x6f\xaa\xb3\xe3\xf4\x8c\xc6\xc1\x58\x36\x6b\x67\xeb\x33\xc6\xd0\x97\x3d\xf5\x57\x8e\x20\x7e\xaa\x85\xc8\x67\x41\x92\xa4\x89\x31\x6e\x13\x42\x9f\x6b\x86\x15\x10\x35\xad\xc0\xc9\x6a\xe8\x81\xc0\xa8\xd4\xe7\xa5\x61\x35\x50\x5d\x4d\xe2\xec\x36\xf4\x02\x19\x95\xa9\xf3\x98\x3d\x36\x0f\xa0\x7f\x88\x26\xa0\x41\xae\x1e\x38\x04\xfa\xd9\x84\xf5\x81\xe2\xb9\x86\x53\x5f\x65\xc5\xb8\xbf\x8d\xea\xc6\xd5\x37\x2d\x80\xca\x14\x2b\x94\x61\xc5\xfc\xc6\x56\xda\x11\xc3\x22\x08\x85\x29\x29\x98\x7a\x5e\x2c\xe8\x98\x6d\x5e\xca\x3c\x1f\x1b\x5d\x09\xef\x29\x3e\xcb\x29\x5d\xc5\x88\x32\xb8\x50\x84\xe3\xb2\x6c\xb0\xc6\xb3\x20\x0b\xc6\x05\xcd\x72\xa9\xe2\x87\x7b\x79\x51\x1a\xed\x23\xde\x36\xa2\x69\xd2\x43\xb6\xd0\x64\x73\xcd\xef\xf6\x23\x9a\xce\x0a\x22\x3d\xd2\x5a\xde\x7d\xc5\x18\x0c\x69\x93\x83\xf4\xa0\x77\x79\x0f\xda\xf1\xf8\x18\xe2\x16\x22\x00\x03\x11\x69\xe1\xb5\xaa\xba\x21\xde\xea\xf6\x7f\x49\xa3\x04\x82\x35\x90\x67\x50\x07\x19\x92\xd6\x66\xab\x4b\x36\x04\x70\x89\xe1\xdb\x8d\xe7\x02\xa2\xf5\xfc\xd9\x27\x03\x06\xb1\xe2\x6c\x88\x1e\x6e\x70\x8f\xcb\x37\x9d\x97\x32\x43\x44\xd3\x11\x0d\x6c\x9d\x60\x86\x08\x91\x3c\x5c\x1f\xd3\xd6\xbc\x70\x6f\xcd\x15\xb3\x12\x25\xac\x12\x3f\xb2\xb0\x3f\x6a\x8f\xa3\x24\xd6\xb8\x36\x3b\xe4\x1e\x48\x8e\xf9\xd6\xae\x44\xfa\x19\x0f\xf6\x3c\x18\x90\x1f\xa3\x24\x24\xfc\x71\x97\xe8\xa8\x0a\xd6\xcc\x24\x8a\x56\x4b\xdf\xe4\x83\xed\x4b\x0f\xe2\x47\xcd\xe8\x85\x34\x61\x56\x67\x2e\x96\xc6\x4f\x3d\xec\xc4\x51\x7e\x56\x62\xd5\x6c\xe3\x77\x2f\x60\x5c\x23\x6c\x6a\x76\x49\xb4\xb1\xb7\x8d\xc1\x65\x20\x64\x6c\xdb\xa1\x9b\xea\x44\xac\x1d\x11\xfa\x42\xb5\x30\x21\x1d\x5e\x64\x6f\x8f\x6c\x76\x8d\x53\xda\x28\xa3\xc1\x67\x0d\xca\x46\xb9\xb1\x47\xc4\xab\x72\x36\x83\x07\xb3\x20\x3b\x48\x43\x0a\x35\x78\x0f\x61\x6c\xb2\xa5\x39\x4e\x5e\x64\xcd\x28\x84\x4f\xda\x4a\x24\xb2\xcf\x8a\xfc\x76\x34\x02\xcd\xfd\xf7\x10\xc9\x4d\x66\x3e\x2f\xca\x5e\xa7\x9b\x93\xed\xf1\x31\xdf\x59\x64\x74\x12\x5d\xf0\x08\x5a\x9b\x17\x5d\x36\x0b\xc0\x35\xfc\xee\xed\x45\xa8\xb7\xf2\xd9\xf7\xda\x2e\xc3\x11\x34\x88\x81\x9b\x57\x06\x13\xf0\x85\xf8\x34\x7c\xed\x0b\xb7\xeb\xa2\x1b\x98\x2a\x18\xc5\x0b\xcc\xf3\xd9\x87\xe5\x20\xcc\xb6\xf9\x72\x90\x33\xc2\x5a\xd2\xd4\x31\x49\x33\xdb\x84\x2e\x2f\xb2\xb2\x70\xf8\x68\x46\x19\xd4\x58\xcc\xcd\x7e\xd1\x89\x6e\xb6\xd2\xc1\x3a\x51\x44\x06\x37\xbc\xb6\x69\x10\xd6\xdf\x8d\x3d\x92\xc8\x7d\xe1\x7b\xb2\x4d\x9e\xb1\x93\x0d\xd9\x20\x6c\x3f\x48\x7c\x34\x21\x5c\xc8\xcf\xe8\xc5\x5d\x92\x86\x15\x73\xc0\xa6\x8d\x1a\xd6\xf0\x9b\x11\x87\xc3\x33\x10\x75\xfc\x36\x14\xf0\xbb\x4d\xab\xe5\xb1\x74\xb2\x8c\x63\x85\x86\x01\x3d\xa3\x49\xc1\x1f\x0a\x00\xcb\xff\x25\x4f\x13\x12\x8c\x22\x9b\xc7\x4b\xb7\x89\x1f\xd3\x1f\x97\x71\x6c\xbf\xa1\x94\x8f\x09\x58\xe9\x47\xbc\xb4\xfb\x18\x8a\x37\xec\xb4\xab\x19\xbb\xdb\x86\x21\x48\xb1\xca\xb1\xea\x94\x7d\xf7\xc1\x84\x22\x4a\x42\x7a\x71\x34\xe9\xb4\x3b\xed\x2e\xf8\x86\x7c\xb4\xe5\x79\x0e\xa9\xe0\x1d\x3b\xc1\xe2\x72\x41\x45\x73\x00\x04\x54\x64\xfa\x33\xeb\x44\xdd\x2f\x32\x7e\x70\x9f\xc1\xef\x92\x6b\x21\x8a\x99\x96\x7f\xaa\x15\xb2\x41\xda\x1d\x36\x73\xaa\xf6\x0d\xd2\xee\xb6\x1b\xad\xbd\x30\xca\x17\x71\x70\xc9\xe7\x05\x7c\x8c\x26\x05\x93\x6d\x15\x36\xec\x37\x6b\x17\x90\xfd\x82\x17\xab\x7a\xe1\xca\x6a\x33\x27\xdf\xbf\xbc\x8c\x1e\xb0\x2d\xcd\xa2\x18\x3a\xed\xcb\x60\x8b\x97\x1d\x61\x56\xd7\x25\x8f\x7e\x50\x89\x6a\x5a\xdd\xbe\x55\x3e\x7c\x56\x36\x9b\xce\xcc\x1a\x68\x16\x60\x7c\xb2\xc9\x33\xfb\x4d\xab\x78\x0f\xc6\xd6\x8c\x76\x36\x32\x18\xe8\x81\xa6\x67\x34\x8b\xd3\x20\xa4\xa1\x52\x04\x7b\xd6\x04\x1e\xc0\x47\x4d\x24\x65\x6f\x1a\x07\xe4\xe3\xd1\x8b\xa3\x21\x99\x07\x9f\x41\x35\x1c\x25\x67\xcb\x38\xa1\x59\x30\x8a\xe9\x5d\x0e\x50\x9f\x06\xec\xd7\xbb\x5b\xe4\x11\x41\xd9\xdd\x6e\x3f\xa3\x8b\x38\x18\xd3\x4e\x9b\xb4\xc1\xa9\x1b\x3b\x2d\xb4\xcc\x08\x91\x69\x72\x46\xb3\x22\xd7\xf1\x36\x41\xee\x0b\xe9\x38\x9a\x07\xb1\xcd\x64\xa3\xc4\xcf\xec\x8b\xf4\x05\x2f\xe0\x52\x5e\x65\xec\x4c\xd3\xad\x21\x17\xf0\x44\x4d\xb5\xd1\x1f\x8b\xd4\x0d\x8e\xa9\xc2\xcf\x34\x19\x63\xad\x6c\xcb\x78\xe2\x5d\x8d\x0b\xd5\x55\x1d\x99\x35\x91\x5a\x52\x77\x7c\x9e\xb8\xdc\x42\x7d\x6a\xee\x28\xc6\x61\x9f\x03\xc4\x34\xcf\x3f\xce\x82\xa4\xb3\x09\x4e\x64\x1f\x71\xab\x73\x61\xbd\x2f\x08\x6b\xab\x0b\xb1\x5b\x51\x8e\x81\xc5\xfd\x25\xb8\x69\x16\xa8\x0c\x92\x4b\xe1\x78\x47\xb8\x23\x4d\xca\xd1\xda\x17\x78\xdd\x4f\x42\xae\xfe\xe7\x34\x14\x4d\x2e\x73\xe1\x48\x3d\x27\x23\x3a\x49\x33\xda\x77\xe8\xea\x95\x38\x3a\x54\xe3\xfe\x4a\xec\x41\x35\xa4\xf5\x0a\xf6\x79\x03\xf9\x6a\xfd\x3e\x14\xa6\x62\xf3\xe0\x82\x87\xad\xbc\x88\x8a\xcb\x21\x79\x0a\x2a\x6c\xb9\xeb\x44\xb9\x70\x69\x0c\x45\xbb\xf6\x26\x83\x26\xb9\xb3\xc1\x20\x76\x8d\xa2\x78\x3a\xab\x0b\x5b\x65\x85\x21\xdd\x19\xa3\x1d\x76\x0a\xe1\x48\x6b\x7b\xab\x80\xf8\x4a\x7f\xff\x70\xf4\xb6\xaf\xb0\xcc\xdb\xd3\x0e\x2c\xc1\x75\x6c\x4e\x02\x3b\x94\x67\x8f\x2c\x82\x3c\x67\xbc\xab\x98\x65\xe9\x72\x3a\x33\x57\x80\x1a\x88\xa0\x35\xa8\xd5\xbd\x9c\xd4\x5c\xed\x11\x9c\x96\x3c\x32\x6f\xe9\x88\x25\x80\x78\xdb\x61\x56\x57\x53\xdb\x99\xb4\x1f\x45\x15\x90\xce\x7a\x94\xff\x18\x25\x51\x41\x2d\xa4\x5b\xdd\x00\x09\x11\x75\xc2\x94\xb2\xdc\x8e\xa2\x75\xf1\x5e\x6c\x2a\x7c\x1d\xb0\xf3\x52\x02\xdc\x9f\xfc\x4c\x6d\x41\x6a\x4a\x0b\x08\x57\x7c\x34\x39\x4e\x22\xaf\xb6\x0b\xca\x16\x33\x2a\x7e\xa8\x05\x47\x8a\xb4\xa7\xb4\x53\xca\x21\xba\x37\x6a\xa3\xea\x87\xaa\xa6\xc3\x3b\xd3\x85\x22\xe0\xb6\x2b\x27\x34\xcb\xd2\x4c\xba\xa4\xe1\x3d\xce\x49\x92\x16\x64\x9c\x66\x19\x1d\x17\xc3\x73\xb5\x6e\xcc\x5e\x1b\x0b\x88\x15\x94\x24\xb0\xe4\x99\xf0\xdf\x33\xf8\xaf\x5f\xa4\xaf\xd3\x73\x9a\x1d\x04\x39\xed\x00\x73\xe1\xfa\x5e\xcd\xc7\x18\xd4\x3f\xc4\x2d\xb3\xb8\xba\x39\x61\xff\x9f\xea\xa3\x38\x02\xc1\x7e\xbf\x31\xe1\x71\x4f\x64\x09\x3d\x27\x2f\xd9\xa8\x3a\x6d\xb8\xea\x85\x8e\x80\xad\xea\xbf\xdb\x05\xa1\x17\x51\x5e\xe4\x3d\xb2\x88\x69\x90\x83\x58\x0c\x23\x4f\x13\x85\xaa\x49\x1a\xc7\xe9\x79\x94\x4c\xa1\x64\xce\xb8\xa0\xb5\x8c\x44\x0f\x7b\xe0\x5f\xa1\xa7\x9f\x7d\x54\x44\x89\x55\xbd\x07\xef\x57\xa6\x57\xe1\xe0\x33\x85\x45\xc8\x19\x3e\x5c\x46\x47\x60\x4f\xab\x98\x2c\x27\x01\xc6\x6a\xc1\x57\x05\x9f\x78\x8e\x5a\x41\x59\xef\xd2\x3c\x8f\x46\x31\x9f\x42\x70\xa1\x21\x8c\xfa\x3e\x1c\x32\xf9\x32\x2b\xf8\x4f\x26\x52\x4b\x6c\xbd\x9c\x4c\xa2\xe9\xa5\xf8\x38\x92\xa4\xf4\x88\x7c\x66\xcd\xf3\x3f\x7d\x5d\x05\x9f\xe2\x66\x8b\x83\xcd\x35\x98\xba\x5c\xe2\x9f\xf2\x2a\x8a\xc3\x4d\x35\x9c\xba\xff\xe1\x9f\xe2\xc2\x48\xe7\xf1\x02\x8f\x1e\xa9\x85\xa9\xef\x71\x78\x81\x5f\x83\x51\x6a\xe4\x79\x4a\xc8\x7b\x18\x3e\x00\xb8\xbe\xc1\x79\xbc\x04\xea\x05\x2a\xcc\x3f\x05\x16\x10\x08\xb1\x20\xd0\x07\x5c\xa6\x08\x84\x50\x8d\xc3\x29\xfa\x5d\xc8\xdf\xb6\x48\xc1\xf9\x82\x75\xf2\xbd\x52\x72\x3a\x27\x87\x71\x90\xb0\x93\x41\xa0\x58\xb3\x48\x17\xba\xb2\x34\x23\x01\x79\xf5\xf2\x9f\x70\x08\x97\xd2\xda\x9d\x31\x14\xb5\xcf\xca\xa3\xdd\xcf\x33\x2a\xfd\xec\x05\xe8\x2a\x57\x44\x41\x41\xc1\x02\xd8\x7a\x0a\x72\x72\x4e\xd9\x02\xd1\x0e\x56\xe4\x30\xd6\x90\x34\xf4\x33\x35\x8e\xe4\x72\x9c\x98\xa5\x70\x51\x87\xd5\x2c\x99\x04\x16\x8a\x78\x09\x1c\x35\xd6\xe4\x54\x9c\x3b\x59\xf2\x10\xde\x86\x45\x05\xe4\x99\xd1\xc8\x10\x7f\x21\xc9\xaa\x76\xf9\x06\x1c\xc7\x9e\x15\x7c\x4e\xa3\xfb\x05\xfb\xdf\xb2\xc4\x8b\xb4\x6a\x81\xa3\xf3\xc2\x6f\xb6\xd4\xd9\x6a\xfb\x1d\x17\x3b\x20\xe4\x6e\x96\x7a\x11\xcd\x69\xfe\x7b\x2c\xf3\x44\x28\x17\xd9\xe2\x56\xaa\xaa\x9c\x1f\xf3\xd9\x16\x4d\x94\x29\x8b\x43\x0d\xaa\x23\x8d\x68\x42\x53\x81\xbc\x3a\x64\x53\xaf\x49\xc1\xac\x4d\x39\xb9\xd2\x15\x68\x00\x85\x7e\x6c\x7b\x63\x4d\x42\xcd\xf1\xe7\x1b\x26\x03\xc2\xaa\x97\xe5\xc5\x8f\xab\x2b\xb2\xb9\xeb\x3d\xdb\x88\x7a\x9d\xb3\x09\x4f\x37\x0e\x44\x02\xe5\xb2\x27\x0f\x1e\x10\xf1\xdb\x27\xf3\xb3\x26\xed\x5c\x7c\xc0\xf0\xb9\x40\x33\x44\x31\x51\x58\xa9\x44\x36\x2f\xda\xbd\x76\x1b\xdf\xb7\x58\x8e\xd2\x7c\xa5\x31\x9d\x94\x8a\x74\x89\x0c\x1d\xeb\xa1\x14\x45\x27\x1c\x4c\x06\xf1\x50\x27\x31\x61\x35\x09\xb0\xc5\x79\xda\xce\xc9\x58\x85\x74\x71\x48\xcb\x8c\xf8\xd2\x84\xbe\x4a\xa8\x06\x9d\x91\xcd\x3a\x4d\x7d\x97\x41\x32\x0c\x7c\x84\x28\xcb\xb7\x5e\xe1\xc5\x77\x07\x39\xad\x53\x05\xb0\x46\xa2\x76\xea\x5a\x93\x5b\xfe\xb5\x60\x96\xfb\x8b\x78\x99\xeb\x2e\x88\x6f\xaf\x77\x43\x05\x64\x2a\x92\x66\x74\xfc\x39\x97\xa7\x26\xce\x22\xe5\x2d\x67\x2e\xde\xca\xc5\x97\xe0\xc6\xd7\x1b\x8c\x98\x93\xfc\xd8\x1b\x88\xd8\x0c\x29\x8c\x1a\x60\xeb\x3f\x40\x05\xb0\x63\x3b\x08\xae\x24\xa6\xce\xaa\xdc\x98\x39\x51\xde\xd2\xa0\x0d\xfe\xb3\x79\x71\xb2\xf9\xe8\xbb\xe0\xd1\xe4\xf4\xcb\xe3\xcd\xeb\xff\x19\x44\xfd\x82\xe6\x85\x02\x5f\x61\xf0\x15\x63\xfe\x4a\xa3\x6d\x30\x4e\x50\x00\x0c\xfe\xd3\xd9\xbc\xe8\x3e\xab\x1c\x28\xa6\xc0\xc1\x40\x07\xcb\xe2\xe1\xb0\xa0\x7b\xdc\x85\xb0\xb0\x3a\x9c\xc3\x43\x5e\xb6\x21\xa3\x61\x9b\x14\x2c\x3c\x01\x12\xd3\x57\x85\xb7\x33\x66\x5f\x18\xa3\x43\x60\xfb\x8f\x7e\xf4\x82\x59\x5d\x86\xd8\x5d\xed\x1c\xbc\x1d\xe7\x73\xf6\xef\x38\x58\xe4\x20\x3c\x88\xdf\x3d\xec\x9e\xd1\xee\x2d\xf7\x3a\x8f\x3a\x6b\x54\x7e\xa4\xf6\x76\x8e\x19\x1a\x8c\x67\x64\x1c\xe4\x4e\x35\x51\xce\xa9\x64\x39\x17\xb3\x83\x48\x89\xaf\xb1\xe6\x04\xc5\xdb\xca\x97\xf3\x39\x0d\x4b\x69\xcb\x6a\xee\xae\x69\xcc\xaa\xbe\x8a\xd6\x06\x03\x3e\x20\x0b\x39\x81\x2a\x29\x7e\x39\x1b\x90\xd6\x86\x08\x88\x57\x41\x0e\xae\x68\x66\xc1\x8e\x6c\xc4\xd4\xa4\x48\x59\xc7\xe7\xee\xe5\xf1\x26\xdc\x50\x12\x8b\x3c\xc0\x75\x77\x31\x23\x31\x85\xc7\xd4\x28\xfe\xde\x62\x41\x33\xd6\x5b\x39\x0f\x09\xc4\x2e\x9c\x46\x3c\xbc\x5d\x90\xd3\x79\xb0\x60\xf3\xb1\x65\xe8\xf9\x3a\xca\x7e\x01\x75\x1a\x9c\xb2\x6d\x3d\xe9\x92\x1f\xc8\xb7\x6c\x37\x17\x59\x27\xd1\x69\xbf\x48\x8f\x59\x43\x42\x13\xb4\xbe\xb7\x87\x32\x81\xe2\xab\x2b\xfc\x7e\xcf\x53\x23\xd6\x2d\x59\x35\x96\x78\x0a\x47\x6b\x52\x73\x7c\x83\xef\xeb\xe8\x0b\x8a\x4c\xdf\x88\x83\x9e\x24\xc7\x12\x5a\x2c\xd2\x3b\xa5\x45\xa9\xbc\x56\xfb\xf2\x0a\xa4\x88\x54\xc6\x8a\xfc\xec\x47\xd7\xa2\x9d\x76\x5b\xd0\x92\x4b\xa7\x06\x82\x6f\x44\xb5\x08\x68\xec\xf4\x9e\x55\x54\x41\xc7\xb2\x17\xe8\xd6\xdd\xa6\x69\x60\x79\x33\x6d\xf9\xc7\xa8\xf4\x3b\x76\xee\x99\x70\xff\xf9\xf2\x22\x4e\x91\xb8\x41\xc1\x75\x04\x6c\x92\x90\xdd\xff\x8d\xbd\x52\xea\x46\xf4\x65\xb3\xd2\xda\x9a\x2a\x69\xd3\x2a\x69\x4a\x9e\x5a\xd2\x34\x18\x69\x91\x32\x89\x32\x0a\xc9\xf6\x26\x77\x19\xf4\x48\xdc\x0f\xf2\x36\xf9\xf3\x84\xcd\x0b\xc2\x6d\x3b\x5c\xdb\xae\x5a\x52\xf6\x5f\xf6\x0b\xe7\x03\x98\x6f\x2b\xfb\xad\x66\xf4\x6b\x49\x33\xde\x6d\x4f\xfa\xd4\x95\xf8\x40\x32\x3c\xdf\x6b\xab\xb6\x59\x4f\x45\xe2\xee\xcb\x57\x9f\x09\x21\x23\x2f\xc2\x8d\x92\xaa\x51\x3f\xa6\xea\x91\xc7\x9b\xfe\x4b\x02\xe9\x87\x58\x1e\xa6\x73\x2d\xe5\xd6\xc7\xd8\xf4\x9e\x24\x7d\x37\x5f\x46\xdc\x4d\xbe\x93\xf9\xce\x80\xa4\xc3\xbb\x61\x89\x85\xb2\x6f\x49\x5e\x04\xc9\x98\x71\x11\x5d\xf8\xea\x4a\x21\x4d\x14\x86\xc7\x6b\xf0\xcb\xf0\x9b\xe1\x4d\xe5\xa6\x11\xc0\x8b\x54\x95\xed\xa6\x88\x92\xe7\xe1\x3a\x2c\x7d\x70\x6c\x8b\x1a\xa2\xc8\x13\x21\xc9\x8b\x1f\xc1\x5a\x45\xcf\x60\x34\xbc\x6f\xed\xbb\x43\x0f\xef\x4b\x63\xdc\xc8\x1e\xd7\x63\xe7\x47\x6d\x43\xb2\x2a\x7e\x64\xd1\x1b\x61\x48\x96\x68\x37\x1c\x11\xeb\x53\x51\x3f\x1c\xde\xf5\x1b\x0c\xe6\x48\xf4\xad\xe1\x62\x60\xf2\x45\xb2\x8c\x63\x08\x92\xd0\x71\x57\x08\xd8\x6d\x83\x0a\xc3\x33\x76\x71\x5d\xdb\x70\xe4\x23\xde\xd9\x06\xec\x80\x03\xde\x84\x19\xf0\xa4\x1b\x4d\xa4\xe8\x5e\xd3\xd1\x80\x07\xc0\xfa\xb1\x38\x01\x35\x1a\x8e\xc4\x0d\x8a\xd1\x90\xa5\x41\xc1\xca\x31\xd8\x07\x12\xbe\x8f\x82\x89\x5c\x2a\xa9\xce\x1c\xc4\xdf\x73\x73\x5d\x69\x03\x84\xca\x31\xb0\x62\xf6\xa3\x01\xe5\x39\x29\xbb\x74\xf7\xa9\xf5\x75\xb8\x98\xe4\xaf\x70\xb5\x2d\xeb\x35\x19\x43\xd4\xa7\x0e\xf5\xec\x6d\xf8\x38\xba\xca\xa8\x03\x31\xee\x97\x6c\x02\xe9\x72\x4e\x46\x71\x3a\xfe\x4c\x66\x34\x08\x69\xc6\x3e\xd2\xb9\x6d\xb4\x11\xe5\xcf\x59\xb2\x4f\x68\x98\xd1\x0b\xe5\x16\x1d\xca\x92\x49\x14\x17\xb6\x32\xd3\x43\xb0\x00\x6b\x78\x1f\x66\x29\x95\xe7\xfc\x6f\xb6\xb6\xf5\x41\x9f\x83\xd7\xe0\xa5\xfc\x98\xce\xeb\xc2\x55\xf9\x4e\xe9\x2e\x94\x2f\xe0\xb0\x3e\x69\xaf\xb9\xfd\xb8\xc1\xcc\xc4\x29\x13\xf3\x16\xd1\xd8\x9d\x87\x8f\x2c\xb9\x6e\x1e\x0a\x05\x54\x31\x01\x50\x93\x31\x01\x50\xac\x72\x02\x9e\x3c\xd6\xf8\xe7\xd0\x37\xc6\x3f\x54\x85\x6b\xf2\xa1\xdf\x01\xba\x11\xf6\x4b\xfc\x8e\x08\x91\x6f\x28\x7f\xf4\x64\x2a\xbc\xf9\x19\xaa\x5f\x3c\x1d\x04\xc3\x21\xff\x4f\xa6\x08\x03\x92\xa1\xfe\xc9\x73\x90\x71\xc9\x10\x7f\xc8\x72\xc7\xc5\xe4\xe9\x50\xfc\x2f\xd3\xc0\x5c\x65\x28\x7f\xe8\x7a\x38\xac\xfc\xa5\xd3\x05\xbc\xfa\x29\xea\x71\x6d\x6e\x87\xbe\x44\x0e\xed\x9a\x72\x0e\x3d\x69\x06\xac\xb4\x9a\x1c\xda\x09\x72\x1c\x3f\x53\x18\xc5\xcf\x14\x8d\x01\xd2\xc4\x0f\x09\xa7\xa4\xc5\x21\xfe\x90\xb9\xa6\xca\x7a\xe8\xa4\x28\xac\x71\x41\x7d\xa8\x7f\xf2\x1c\x24\x1d\x0f\xf1\x87\xcc\x35\x4e\x22\x43\x3b\x41\x42\xa1\x7c\x2b\xc7\x3a\xba\x0f\xdd\x24\xd9\x43\x07\xd2\x49\x92\x75\x4a\x61\x6c\x88\x7e\xe3\xfe\x26\xd3\xa1\xfa\x25\xd3\xf9\x9e\x3a\x54\xbf\xd4\xe8\xf9\x7a\x1f\xea\x9f\x6a\x4c\x6c\x97\x1c\xca\x1f\x32\x95\x6d\x58\x43\xf1\xbf\xaa\x83\xf1\xbb\xa1\xfc\x21\x53\x81\x6d\x0c\xe5\x8f\x1e\x2c\x30\xee\x9f\x4e\x3c\xea\x6e\x0d\xb7\xbe\xeb\x55\xba\xb7\xe9\xb5\x96\xc5\xe4\x69\x6b\xf8\xf4\x9b\xeb\xd3\xde\xf6\x56\x13\x87\x0f\xe6\x12\xde\xe3\x0b\xb8\x25\xfc\x1c\xb4\x86\xa4\xb5\xd9\xdf\xde\xec\x6f\xb5\xd6\xae\xa5\x27\xb8\xed\x46\x81\x8a\xef\x1d\x49\xdc\x3b\x92\xf8\x2b\x38\x92\x10\xb5\xac\xb9\xae\xe0\xfe\x4e\x27\x93\x8c\x5e\x92\x9f\xa3\x78\xfc\x99\x92\xef\x7f\xa1\x93\x89\xed\x4d\xa2\xa1\xc3\x38\x00\x8b\x82\x84\x1c\x31\x89\x3b\x00\xa8\x28\x48\x5c\xb0\x1f\x83\x11\x03\xfb\x47\x3a\xa5\x71\x5e\xd0\x38\xa6\x19\xf9\x7e\x02\x89\x2e\xf0\x4f\xc1\x19\xf9\x39\x4d\x43\xf2\xfd\xb4\xd4\xcb\xc5\x63\xed\xdd\x47\xb8\x82\x7c\x13\x24\xc1\xd4\x74\x3d\xd1\x1f\x30\x2c\x0c\x32\x0e\x30\xe7\x00\xd2\xc5\xc4\xe1\x08\x0e\x47\x36\x70\x34\x0a\x12\x09\xf2\x12\xac\xf8\x6d\x08\x2e\x79\xe5\x03\x5a\xcc\x24\xe0\x8b\xe7\x15\x70\xe1\x48\xb9\x9b\x9d\x55\xd5\x97\xcf\x54\x7d\x6f\xc1\x31\x79\x19\x60\x42\x0b\x09\xf8\x8e\x66\x39\xbc\xa4\x2a\x87\x5e\x08\x10\xd5\x89\xf3\x20\x9b\x57\x75\x83\xe5\x2b\x60\x5a\x14\x10\xb4\xc9\x85\xcf\x45\x96\x04\x95\x5c\xc5\x80\x94\xec\x82\x9d\xa8\xb4\x6f\x8f\x28\xb6\x2a\x44\x51\xe5\xcb\x5d\x84\x70\x20\xe9\x8c\x49\xbc\xdb\xa0\x49\xe8\xe9\x1b\xcf\x90\x60\xcf\xe1\xc4\xe4\x42\x8d\x58\xba\xc2\x64\x96\x2e\x68\x56\x5c\x7a\xe0\x16\x22\x4b\x82\xbe\x2a\x8a\xc5\xbb\x2c\x3d\x8b\x42\x2f\xb9\xb1\x85\xba\x10\xd9\x8a\xd8\x16\xe3\x8a\x12\xd1\x62\x6c\x17\x68\xe6\xd0\x70\x6d\x4d\xc9\xea\x3f\xd3\xd1\x0e\xe9\xc8\x6a\x4c\xa7\xbc\x99\xbd\x42\x12\x7a\x6e\x2d\x1b\x5d\x12\xf9\xe7\x15\x91\x56\x51\xcf\x25\x14\x02\xa2\xfc\xa9\x0b\x3d\x67\xcb\x05\xfc\xf4\xe3\x2a\xc2\x91\xc8\x7c\xf1\xdc\xc9\xcb\x67\xb2\xe4\x87\x99\x5b\x32\x81\x35\xc0\x72\xdf\xd2\xc2\xc9\x5d\x68\xc2\x67\x20\x72\x1d\x38\x70\xa3\x5f\x7f\x95\x6d\x30\xba\x76\xfb\xa0\x09\x1c\x80\xc4\x67\x07\xc3\x68\xca\xd6\x47\x8d\x60\x11\x0d\xd5\x66\x28\xfe\xe7\x47\x0e\xdc\x49\x81\xad\xdc\x28\x8a\xc9\x67\x68\x7c\xf5\x14\x0c\xa2\x97\x21\xfe\x70\x9a\xf8\xa4\xd6\x00\xff\xe1\x0c\x50\x00\x74\x74\xfb\x82\x9c\x23\x9a\x0f\xd1\xef\x0e\x37\xe6\xb9\xee\xee\x32\x89\x69\x30\x00\x0f\xbc\x39\x25\x7a\x0c\x29\xdf\x89\xc1\x25\xd0\x1a\x23\x37\xcf\xf8\xea\xc6\x56\x3a\x2e\x26\x34\xca\x3a\x65\x38\x4d\x8a\x29\x0f\x87\x0c\xae\xa7\x71\x5c\x78\x65\xd2\xf6\xf4\x25\xa3\x3c\x56\x84\xee\xc5\x67\x4a\x17\x87\xf9\x87\xcb\x64\x1c\x25\xd3\xca\xae\x40\x59\x0b\xbe\x19\x05\x7a\x3a\x82\xf9\xc2\x73\x6d\xbf\x62\x41\xc9\x57\x30\xdc\x9b\x14\x7c\x79\x60\xe4\x8b\x59\x09\x05\xdf\x1e\x38\xf1\xec\x5a\x82\xb1\x4f\x07\x0a\xbf\xc0\xe5\x80\x2a\xc5\x0b\x6b\xd4\x29\x13\x3c\x6d\xeb\xe7\x54\xb2\x79\x91\xe2\xad\xd5\x86\x46\x69\x9e\xba\x31\x2e\x65\xed\x55\x38\xe5\x16\x8e\x12\xf2\x67\xea\x1f\x19\x86\x12\xdf\x0e\x1c\x36\x6c\xe1\x90\x2a\xc5\x03\xeb\xde\x0a\xcb\x32\x07\xf6\x6d\xa1\xd3\xe7\xb2\xb2\x4e\x8e\xa7\xdd\xc3\xe7\xfb\x6f\x51\x63\xec\xd3\x81\xd2\xde\x69\x38\x98\xf8\xf6\xc1\x49\xc7\x29\x0a\x10\x12\xd8\x2e\x66\x2f\x7c\xbe\xf5\xe3\x87\xdc\xfc\x52\xc8\x74\xae\x68\x5e\xd7\xc1\x9d\xb4\x0d\x59\x76\x7d\x1a\x46\x19\xa8\x8a\xc7\xc1\x02\x1e\x5f\xa0\x0b\x4c\xcf\x8c\x1e\x1e\xec\xbf\x33\xd6\x3e\x2b\x87\x2d\xe4\x22\x2e\x4a\xb2\xe5\xcb\xa4\x4a\x9e\x6f\xbc\xf5\x64\x10\x7d\xd1\x8c\x5c\xd9\xe0\x4f\x46\xf1\xdf\xaa\x80\xa3\x27\x8a\x77\xc3\x5e\x27\xc4\x91\x8e\x79\xe7\x9c\x80\x0e\xa6\x2d\xf7\xa4\x24\x0d\x69\xbb\x67\x40\x4c\xc1\x2e\x64\x48\xda\x4c\xe8\xf8\x34\x8e\x23\x9a\x14\xff\xe0\xe0\x6d\x7d\x27\xdd\xed\xdd\xa4\x35\x5a\x9c\xa7\xd9\xe7\xb2\x06\x13\x5a\x7c\x12\xa0\x16\x88\x19\x2f\x60\x68\xaf\xf2\x5b\x76\x8b\x0a\x85\x76\x59\xbf\x68\x31\xfb\x04\x73\x3d\x4e\xe3\x7f\xfc\x0e\xfd\x3b\x9f\x45\xf9\x42\xb9\x46\x76\xba\x97\xcf\x66\xb7\x46\x1b\xfc\x3c\xf5\xee\x25\x51\x7e\x90\x26\x09\x77\xd9\x84\x96\x5b\xd7\xa0\xbd\x8e\x77\xbb\x7c\xf0\xc0\xbb\x8d\xe2\x2a\x3b\x5d\xff\x0e\xc6\x9d\x14\x48\x99\xbc\x94\xe6\xc1\x38\x14\x02\x27\x08\x89\xc6\xab\xb7\x65\x75\x4b\x67\xa2\xf8\x84\xc0\x55\x4e\xc6\xc1\xa2\x35\xdc\xde\x64\x49\xf8\x48\xd2\x1a\x6e\x6f\xb1\x34\x7d\x1c\x68\x0d\xb7\x1f\xab\x14\x2e\x3a\xb5\x86\xdb\x4f\x55\x12\x16\xee\x5b\xc3\x9d\x6d\x95\xc1\x56\x78\x6b\xb8\xb3\xa3\x13\xb4\x50\xdf\x1a\xee\xe8\x4a\xf5\xb1\xb0\x35\xdc\xf9\xd6\x49\xa6\xc5\xac\x35\xdc\x79\xea\xa4\x27\xb4\x68\x0d\x77\xbe\x73\xd2\xa5\x20\xdc\x1a\x3e\xde\x74\x32\xf3\xd9\xac\x35\x7c\xbc\xe5\xa6\x33\x59\xb8\x35\x7c\xac\xbb\x2f\xcf\x38\xad\xe1\xe3\x6f\x54\xa2\x79\x70\x6e\x0d\x1f\x3f\x51\x59\x52\x6a\x69\x0d\x1f\x7f\x5b\xad\xdb\xbb\x3e\xed\x6d\xef\xdc\x6b\xde\xee\x35\x6f\xff\x2d\x9a\xb7\x20\x8e\xc1\xbf\xc4\xed\xdc\xb8\x22\x05\x97\xa3\x0a\xf1\xe9\x42\x64\x94\x98\x97\x67\xdc\xa2\x1f\xe9\x18\xa0\x37\x12\x4e\xc7\x8c\xa9\x0b\x8e\xe4\xea\x69\xbc\x8a\x9a\x1f\xe1\x72\xd7\xaa\x0c\xd2\x24\xc4\x39\x0f\x7d\x64\x82\x48\x56\x24\x32\x95\x73\xd7\xfd\x38\x36\x86\x62\x0a\x46\xe6\xd1\xaa\x07\x37\xf5\x3d\x62\x99\x96\x95\x28\x3d\xcc\x04\x7c\x44\xfe\x85\x5f\xce\xb3\xff\x70\xb2\x63\x2e\xc9\x37\x21\xa7\x87\xd5\x51\xbe\x2d\xa9\x55\xba\x03\xdf\x53\xbf\xae\xae\x20\xfc\x0d\xb1\xdd\x3e\xb0\x44\x48\x3d\x69\x33\x29\x14\xc2\x0a\xb4\x7b\xa4\x5d\xa4\xfc\xe7\x69\x9f\xa3\x19\x85\x3b\x9c\x78\x6e\x43\x45\x33\x27\x93\x53\x30\x70\x51\xf6\xa1\xe2\x86\xb4\xeb\x89\x99\x6d\x55\xc3\xfa\xc3\x8a\xef\x21\xe2\xe1\x1e\x74\xa0\x23\xfc\xbc\xa4\x63\xe0\xe9\x06\xa5\xcd\x82\x7e\xb7\x05\xae\x28\x34\x5e\x0d\x3c\x9b\x8f\xbb\xb0\x73\x8a\x2a\x8c\x7b\x82\x16\x87\x41\x11\xc8\x11\xb0\xdf\x7d\xf6\x0f\xd9\x43\xbf\xaf\xae\xc0\x28\x56\x01\xc0\x55\x72\x2e\x41\xc4\xd7\xd5\x95\x0e\xbe\x09\xda\x46\xd6\xb4\xbc\x23\x47\x80\x27\x9b\xa7\xfd\x9c\x31\x04\xe5\x61\x9d\x41\xcf\x85\x80\xa3\x29\xcc\x9d\xae\x5f\x3c\xd3\x85\x5b\xd9\x13\xa6\xb6\x42\xba\x73\x2f\x6d\x3b\xbf\xa8\xe7\xe9\xdd\x93\xcd\x53\xf4\xf0\x6a\x1d\xda\xef\x92\x2f\xf0\xd4\x21\x48\x92\xb4\x20\x93\x28\x09\x79\xbf\xa2\x64\xca\x1b\x7a\xa6\x9a\x1f\xa7\x49\x9e\xc6\xb4\x7f\x1e\x64\x49\xa7\x8d\x4b\x70\x67\x39\x8c\x15\xc7\xe9\xb4\x8d\x4c\x5f\x45\x8f\x19\x2a\x1c\x87\x4b\x54\xb0\x21\x1c\x98\x0b\xe6\xae\xe3\x5b\x9d\x3d\xde\xad\x9e\x49\x10\xe6\x11\x0a\x6a\x94\xbe\x0e\x61\x8a\x1b\x2c\xc7\x0b\x3a\x66\x12\x80\x67\x3d\xf6\xc0\x21\xd3\x28\x18\x7f\x56\x21\x44\xc1\x13\x81\x38\xec\xca\xeb\xd6\x4e\x90\x4d\x97\xf0\x12\xe4\x44\xfd\x42\xce\x78\x4c\x2b\x74\x59\x23\x84\x7e\xae\x2c\x86\xdd\xc6\x75\x1c\x08\x36\xf1\x5b\xa6\x1b\x0b\xcd\x36\x92\x65\x1c\x3b\xe8\x4e\x25\xa5\x09\xe7\x77\xfa\x00\x2c\x21\x26\x28\xc8\x1a\xd7\xcc\x02\x26\xfb\xa3\xc8\x54\x1a\x22\xf1\x9b\x73\xf6\x4e\xda\x83\x83\x52\xbb\xe7\x65\xac\x3d\xc9\xde\xd9\x61\xab\xd3\xed\xe9\x86\x10\x86\xeb\x67\x2a\x28\x8a\x60\x3c\xfb\x98\x1e\x48\x3f\x58\x78\xca\xa4\x73\x2c\x7c\xe6\xd6\x53\xcb\xc7\xcd\x3f\x9d\xe1\xc8\xa2\xfd\x20\x8e\xd5\x7e\x22\x80\x4b\xce\x14\x4e\x37\xd5\x01\xc3\x73\xc2\xf0\x1e\x31\x80\x54\x5b\xc3\x6d\x90\xee\xf9\xaa\x6f\x0d\xb7\x41\x76\xc7\x21\xdb\x76\x00\xd8\xda\x08\x5b\xc3\xc7\x3b\x4c\x64\x7e\x7c\x2f\x32\xdf\x8b\xcc\x7f\x6d\x91\x19\x45\x7b\x81\xb3\xf7\x5d\x85\x7b\xf9\x7b\x9e\x26\xd9\x62\x6c\xca\x9b\xbf\xf0\x44\x75\x75\x98\x65\xa9\x2d\x02\xf3\x34\x25\x89\xba\x2a\x0a\x36\x58\x43\xc8\x74\x64\x4c\x40\xc7\xa7\x52\x49\x53\x64\xe4\x22\xae\x77\x8d\x9f\xc0\x20\x0c\xa5\x4b\x47\xc6\x8e\x45\x61\xf0\x92\x0d\x5d\x13\x09\x96\x45\x60\x10\x86\x1e\x1b\x5b\x22\xc6\xcf\x0b\x15\xda\xba\x75\xb0\x06\xe3\xc4\xac\x38\x0c\x7d\x32\xb7\x6f\xe0\x39\x0f\x0a\x2e\x21\x6a\x47\x24\x99\x76\x55\xff\x05\x8c\xb7\x6b\xbe\xfd\xdc\x74\x2e\xa0\xf0\x6b\x74\xd3\x9d\x02\x7d\x4f\x94\x84\x5c\xcd\x24\x61\x7b\xa8\x6e\x9a\x65\x3d\x21\x89\xe6\xae\x4c\xcc\xc9\x87\xff\x12\xc2\xa2\x06\x10\xf8\xc1\x1e\x26\x15\x2a\x7b\x04\x5e\xb7\x97\x3c\x60\x13\x55\x9e\x00\xcc\x29\x3e\x1e\x94\x0a\xec\xbc\x48\x49\xb5\x4c\xac\x91\xfd\x11\x95\xf6\x1d\xd9\xc7\x2e\xb0\x2e\x16\x51\x3f\xca\xff\x11\xc4\x51\xf8\x9e\xe6\x8b\x34\xc9\xa9\x68\xca\x79\x7c\xe7\x8c\xc1\xdf\x5e\x87\xaf\xb1\xfe\x61\x72\xe6\xad\x75\xd7\xa9\xf4\xda\xed\x5f\x69\xe5\xdc\x65\x93\x33\x58\xbe\xe7\x82\x6b\x08\x5f\x86\x68\xbc\x2f\xfa\x00\x4e\x23\x70\x82\x13\xc4\x5e\x4f\x85\x3a\xdf\x10\xbf\x28\x01\x94\xa5\xf5\x93\x7c\xf0\xad\xe1\x36\xe8\xd1\xc4\x8a\x6c\x0d\x77\xc0\xea\xad\x51\x90\xef\xfb\x0d\xff\x7e\xc3\xff\xf3\x6e\xf8\x7a\xbf\x57\x62\xf9\x1d\xa9\xc8\x1a\xea\xaa\xd8\x89\x27\xb3\xc0\x72\x21\xeb\x0f\x20\x73\x55\x75\x9a\x84\x43\xef\xa6\xb0\x1e\x4c\x3e\x88\x12\xd0\xfb\xe8\x10\x82\xc0\x94\xc6\xd0\x88\x38\xee\xdb\x3f\xb9\x7a\x09\x3f\x32\x83\x6d\xde\x7e\xa7\xcc\xe1\x0e\x34\xd8\x3b\x09\xa5\xe4\x02\x30\xf6\xbd\x26\xd2\x95\xb3\x99\xea\x6d\x40\x38\xfb\xf5\x57\x6d\x3e\xf5\x1c\x45\x3d\x51\xce\xba\xd5\x09\x46\x91\x47\x0d\x82\xdc\x3e\x13\xcb\xcf\x32\x8f\xef\xbd\xb7\x47\xda\xa8\x4f\x6d\xf2\xe0\x81\xe1\xc7\x19\x9d\x9b\x79\xb3\x86\xb3\xff\xeb\xae\xb5\x0d\x57\x35\xe8\xf1\x0c\x4d\x3a\x90\x58\xb2\x5d\x43\x1e\x77\x18\xed\xd9\x19\xac\x8a\x18\x58\xee\x69\x1a\x68\x4f\x1c\xde\x39\x42\x39\xa8\x42\x23\xd2\xf2\x48\xed\x55\x03\xe9\x51\xc5\xf3\x12\x9e\xa2\xf8\xd1\xda\xfb\xb2\x29\x08\x43\x49\xc3\xb9\x3e\x86\x63\xda\x90\x69\xd7\xaa\xa6\x52\x7a\xe2\xa4\xe2\xaf\xb2\xf2\x64\xaf\x8f\xeb\x37\x27\x14\xf4\x0a\x71\x95\xd9\xc7\x9a\x2a\xa5\xfd\x51\xfd\xf9\x44\x8b\x99\x54\x37\xeb\x4e\x9a\x7e\x2f\x6a\x55\xa9\x13\x47\xcd\xa1\x11\xa0\x55\xa5\x0d\xe6\x95\x73\x8b\x46\x93\xca\xf9\xcd\xdd\xcd\xa8\x5d\x5f\xbd\xa2\x46\x32\xbc\xbb\x98\x5b\xce\x7b\x2d\xb5\xb2\xe0\xac\x42\xdb\xa8\x78\xac\x39\x79\xae\xde\x8a\x77\xac\x74\x3a\xf7\xe3\xb8\x72\xba\x00\x48\x5c\xf4\xac\x4c\x60\x5c\x15\x5a\xd3\xc1\xd5\xa9\xcd\x78\x14\xe8\x2a\xd5\xca\xa8\xad\x8a\xdc\x94\x9b\x1c\xb0\xfd\x93\x93\x3e\xa5\x45\x2e\x8c\x57\xe2\x4b\x12\xd2\x45\x9c\x5e\xd2\x50\x9a\x08\xc2\xf3\xc1\xf1\x2c\x88\x12\xfb\xb9\x1a\xd4\xf6\x63\x9a\xc9\x1e\x79\x7c\x0f\xc8\x03\xab\x8f\x24\xe5\xba\xbc\x56\xaa\xc5\x35\xc3\x43\xee\xb1\xbc\xdc\xd0\xcf\xda\x4a\x5a\xc4\x06\x0f\xb2\x25\xa4\xb0\xd4\xe4\x0b\xf1\x9a\x21\x90\x8c\xa3\xe6\xfd\x11\x82\x94\xef\xc9\x87\x65\x90\x3f\x18\x90\xf3\x20\xe2\xea\x72\x10\xb9\x16\x85\x56\xc1\xca\x9b\x32\x73\xde\xc5\x52\x50\xf1\xa2\x75\xc7\x68\xd7\x74\xbc\xbc\x4e\xe1\x69\xb2\xd1\xbe\xbd\x2b\x41\x7f\x37\x36\x76\xcd\x63\xd3\x60\x40\xf2\x22\x5d\x70\x5d\x6d\x94\x4c\x49\x30\x61\x5d\xf9\x66\x93\xcf\x55\x4e\x3a\x45\x34\xa7\xe9\xb2\xe8\x3a\x47\x47\x8e\x80\x1f\xc8\x37\x9b\xde\xc3\x22\xef\x7d\x9f\xd5\xfe\xb3\xa8\x5c\x87\x54\xe8\x92\x2f\xd7\x9e\x33\x9d\x8d\x40\xfe\x60\xcf\x7b\x0e\x55\x33\xe2\x3d\x6d\xea\x93\x9f\xf6\x0b\xac\x18\x13\xdc\x97\x04\x7c\x65\x8c\x19\x61\x83\x8f\xe0\x11\x93\x98\x97\x49\x68\x63\xa0\xed\x3b\x7c\xd2\x18\x39\x14\xc1\x7f\x8e\x37\xe2\x1b\xb7\xca\x96\x1f\xae\x59\xf9\x13\x71\xb1\x66\x50\xcd\x94\x16\x1f\x75\x53\xef\x39\xa9\x69\x8e\x82\xba\xf1\x2a\xc8\x67\x98\xa8\x7a\x92\x30\xbb\xfe\x23\x7c\x34\xe9\x08\x00\x3f\xb5\x79\x0b\x79\x3b\x08\x11\x8c\x44\x5d\xfd\xb1\xb9\x00\xcd\x1e\x41\x98\x23\x7f\x77\xe4\x5f\x99\xf3\xf6\x27\xca\x79\x7b\xd9\x5f\x34\xe9\x98\x14\x77\x75\x45\xd6\xa1\xc5\xca\x62\x44\xb1\x6e\x0f\x6d\xe2\xbf\x9b\x2c\x01\xfc\xd7\x70\x39\xd8\x43\x4a\x43\x14\x21\x7a\xa7\x72\x66\xe4\xdf\x60\xa0\xee\xf9\xe2\x74\x8a\xa8\x16\x8e\x15\x92\x8d\xaf\xb7\xbb\x35\xcd\x13\x43\x54\x53\x1c\xb5\x64\xaa\x1b\x54\x36\x18\x10\xbe\x59\x49\x71\x21\x48\x42\x22\x6e\x46\x48\x30\x0d\xa2\x44\xac\x9c\x73\x2a\x02\xfc\xd5\xfc\xf9\x65\x4f\x7b\x03\xac\xa9\xc1\x96\x75\x9c\xed\xbf\x66\x48\x63\xee\x96\x4d\x5c\x0a\xb2\x2d\x81\xed\x8e\x39\x1d\xa7\x49\x48\x18\xc3\xad\xad\x04\x91\x6e\x3d\xb1\x12\x83\x23\x82\x2e\xac\x69\x87\xbd\x5e\x8c\xee\xb8\x43\xd8\x75\x3b\x12\x25\xc4\x89\x16\x71\xca\xbc\x48\x33\x1a\x2a\x37\xee\x5c\x02\x01\x8d\xcf\x34\xc8\x49\x30\x67\x1b\x52\xdf\xcb\xaf\xed\xbf\x52\xfe\x6d\xff\x79\xbc\xcb\xdf\x45\x17\xab\x7b\x78\x5d\x9a\x5b\xc6\x31\xdc\x12\x36\x24\xd2\x4e\x36\x3d\x50\xa0\x2b\x06\x49\xe8\x3f\x06\xec\x98\x7d\xa9\x7c\x69\x58\x52\x9c\x05\x56\x73\x68\xb0\x2b\xc5\x07\x06\x38\x55\x05\xa3\xc8\xb8\x5c\xe0\x2f\x8a\xa8\x3c\xbe\x43\x5a\x30\x8a\xc8\x1e\x83\x94\x72\xd6\x43\xae\x09\xad\x1f\x93\x3e\x21\x25\x24\x40\xa2\xa9\x28\x2e\x6b\x91\x63\x4b\xe8\xb9\x4a\x92\x63\x4a\x2e\xaf\x31\x31\x58\xba\x91\x2d\x69\x53\x10\xc4\xdd\x15\x8b\x6e\x57\x14\xb5\xe5\x60\x43\xb2\x10\xbe\x4e\xa4\xa2\x38\x74\x4a\xfb\x24\x65\x01\xa1\xa4\x65\x7d\xfc\x93\x49\xaa\x2d\x3d\xf1\x50\x68\xa0\x27\x82\xa1\xd4\x77\xfd\x42\x2a\xb6\xe8\x6f\x65\x0d\xec\x4f\xfd\xe0\xd2\xb5\x3a\x45\x62\xfa\xeb\x48\x3a\xe8\xa9\xd9\xc7\x1c\x6c\x30\xe0\xa1\x15\xb5\x95\x85\x51\xa9\xb6\x95\xf8\x72\xbd\xcb\x80\x25\x96\xd6\xcd\xb6\x05\x62\x50\xc5\x70\xc6\xcd\xe0\x2d\x0e\x10\x32\x7e\x94\x10\x47\x63\x0a\x57\x0d\xda\x5e\xc3\x8a\xfe\xe7\xb3\x1d\x01\xfb\x8f\x72\x8b\x11\xe2\x58\x8d\xe4\xfd\x45\xba\x30\x1c\xcc\x99\xdd\x8b\x83\xbc\x10\x90\x4e\xd5\xfe\xee\x70\x42\xea\xb0\x82\xe0\xbc\x68\x5d\xbd\x38\x81\x38\xb4\x90\x6e\xf7\x49\xa3\xb0\xa6\x4b\xac\x21\x01\xdc\xe7\x41\x49\x7e\x20\x9b\x76\x6d\x62\xa6\x25\xed\xef\xcb\xb5\x5c\xaf\x05\x90\x7f\xb7\x52\x09\x22\x34\x59\xcc\x52\xaa\xd3\x94\xa9\x1d\x1e\xd6\xba\xd9\xe5\xfe\x22\xb8\x0c\x46\x31\xf5\x75\xcf\x3d\x0e\x70\xfb\xa9\x9c\x26\xa1\x0e\x48\x95\xa4\xc9\x23\x51\x09\x46\x87\xbd\x4d\x5c\x97\x4d\x3d\xf8\xf6\x63\x9c\xd1\xaf\x82\xed\xc8\xa5\xd2\x83\x11\xa3\x5a\xe5\x04\x81\xed\xdb\xc6\x1e\xaf\x68\xd7\x9c\xc4\xd2\x1b\x41\x7c\xa2\x35\x74\x00\x52\xee\x83\x58\x08\xa6\x96\x20\xa4\xe4\x3c\xc8\x95\x40\xb9\x66\xe2\x8a\x2f\x6d\xb8\x7a\x45\x47\x18\x6d\x98\x65\xdd\xbf\xce\x82\x7c\xe6\x43\x3a\xeb\x35\xcd\xb2\xb2\x9b\x48\x7c\xe5\xe8\xbb\x57\xac\x92\x78\x98\x38\x1a\x86\xfc\xda\x0b\x71\x5d\xd6\x13\x7f\x5b\x25\xc7\x2e\xb2\x07\x65\x4a\x84\xaf\x52\x09\x71\x12\x65\x79\x51\x2e\x20\xae\x28\xe3\x95\x68\x40\x7c\x6a\x0f\xdf\xf5\xab\xf1\x55\xe7\xf8\x12\x02\x6d\xf2\x81\xd7\xcd\xb3\xd5\x58\x53\x94\xd7\xa2\x7a\x95\xa1\xfb\x79\x9a\xd2\xc9\x73\x20\xa1\x2b\x13\xd8\x95\x9b\x20\x3b\xdf\xbe\xe0\x76\xa5\x90\x24\x3e\x0d\x03\xb4\x1b\x0b\x5e\xb6\xd6\xac\x4e\x3b\xeb\xd9\xd4\x45\x4d\xd7\xa6\x0c\x34\x51\xf5\x0f\xd6\x06\x03\x6b\x07\x36\x2e\x70\xb4\xcb\x63\xa4\xbe\xb4\x2a\xef\xf0\x7d\x79\x30\x30\x7c\xe9\x96\x86\x9d\x1e\x8f\xc1\x2d\x6e\xca\xe3\x34\x45\xc9\xb4\x42\x36\x33\xd5\xd8\xe6\xc8\xf9\x24\x5e\xbb\x9c\x08\x8b\x43\x55\xa2\x10\xf9\x82\xa4\xae\xa6\x12\xd1\x84\x24\xa9\xae\x81\xb1\xb7\x45\x90\xe7\x34\xec\xb1\x2a\xb4\xeb\x3b\x06\x91\xa3\x25\x6d\xf2\x32\x45\x78\x30\x03\x16\x3a\x0d\x73\x48\x9f\xef\x54\xd3\x66\x95\xac\x2c\x43\x69\x4b\x79\xad\xad\x2c\x66\xc8\xb5\x24\xc4\xaa\x81\x08\x61\xd2\xa8\x40\x75\xa9\x27\x0b\x8c\xe8\x38\x58\xe6\x94\x9d\xc4\xc3\x34\x69\x17\xe4\x3c\x48\xc0\x28\x29\x5f\xa4\x51\xcc\xaf\xc3\x93\x82\x66\x93\x60\xac\xbc\x63\x37\x38\x8a\x37\x39\x6e\xdb\xfb\x54\x3d\x43\x24\x8e\x7f\x5d\xb5\xa8\xd1\xe2\xfc\x89\x16\xdc\x5d\x33\xdb\x20\x7b\xe4\x7c\x16\x8d\x67\x60\x35\xc0\xd6\x77\x91\x8a\x7d\x8c\x2c\xe2\x65\x5e\x7f\xf7\x2a\x18\x41\xcd\x04\x6b\xee\xe1\xb7\x64\xaa\x91\x61\x57\x17\x54\x55\xb1\x7a\x01\xf2\x36\xc2\x63\xb9\xe0\x88\xac\x95\x6f\x24\xc8\x54\x09\x31\xe6\x53\x87\x3e\xb7\x48\x6f\xce\x7d\x3d\xc7\x1e\xef\x79\xb7\xc1\xfd\x79\x19\x6f\x72\x4e\xc3\xde\x63\x70\xc9\x53\x16\xdf\x81\xd8\xdd\xfe\xb4\x61\x38\xc7\x9f\xfb\x7a\x85\x78\x4e\xd3\x5e\xbb\x25\x8b\x6e\x77\x95\xfd\xb3\x69\x2c\xd1\x1a\x7e\x5b\x66\x02\xad\x4c\x1a\x5a\xc3\xed\x1d\xd7\x26\x5a\x8c\xbc\x35\xdc\xd9\xba\x3e\xed\x6d\x3f\xb9\xb7\x7d\xba\xb7\x7d\xfa\x6b\xdb\x3e\x21\x63\x67\x61\x03\x79\x07\xd6\xce\x25\x7e\x2c\x85\x75\x25\x7f\x98\x75\x34\x91\x97\xce\xfb\xd9\x34\x1f\x96\xa8\x6e\x90\x90\x27\x8e\xb0\xa2\x12\x1c\xfb\x4e\x6e\x27\x8c\x7d\xca\x4a\x09\xb6\x71\x02\x3e\xdf\xf3\xf5\xe1\xfd\xbb\x03\xce\xdc\x6f\xd3\x01\x1e\x70\x09\x58\x2d\x85\x17\x8c\x45\x4a\xde\xbf\x3b\x10\x17\x05\xfe\x0e\x88\xf7\xe8\xe0\x45\x51\xb7\x3c\x4b\x73\x7c\xfd\xe5\x36\x7e\x70\xf4\xf6\xed\xcb\x83\x8f\x87\x47\x6f\xc9\xcb\xf7\xef\x8f\xde\x0f\xc9\x81\xd2\xff\x8e\x79\x95\xfc\x48\x1f\x52\xd2\xde\x20\xac\x3e\xb2\xd1\xee\xfb\xfb\xa0\x5d\xde\x34\x1d\xbb\x7a\x68\xcf\xb5\x08\x05\x5b\x3d\x11\xaf\xcc\xdf\x84\xb4\xa4\x1d\x12\xdb\x2a\x18\x0d\x13\xde\xa5\xd1\x3c\x0f\xa6\x94\xec\x91\xf5\x75\xf1\xd4\x90\x6d\xeb\xe2\x77\x9f\x87\x8c\x75\x52\xfa\xb2\xd8\x33\xe2\x4d\x1e\x12\x35\x5d\x7f\xff\x70\xf4\x16\x66\x25\x53\x5d\xf2\x84\x59\x15\x7d\x73\x1e\x93\x69\x1c\x88\xaa\xcd\xd1\xea\xd9\xfc\xc8\xef\xab\xf1\x78\xe7\x79\xd3\x29\xfd\x78\xf8\xe6\xe5\xd1\xf1\xc7\x21\x11\xb7\xde\x8c\xb8\x58\x27\xe7\x39\xd9\x20\x6d\xf6\x5f\x30\x9e\x31\x8e\xd1\x36\x62\xda\x08\x3f\x92\xdf\xde\xef\x56\xf7\xbb\xd5\x5f\x7b\xb7\x42\x9b\x15\x3c\xbb\xfc\xa3\x9a\xe9\x36\x7f\xcd\xde\xe8\x11\xfd\x1d\xbe\x65\x97\x4e\x87\xd8\xfa\x57\x87\x33\x1c\x93\x29\x37\x8e\x21\xe2\x91\x2d\xb4\xa5\x0f\x0b\xb6\x15\xf2\xd7\x7e\x08\xbf\x90\xb6\xbc\x48\x93\x8e\xf3\x79\xec\x0a\x52\xf1\x1e\x39\x4f\x93\x6e\xcd\x1b\x7a\x94\x99\xa4\xc9\xe5\x3c\x5d\xaa\x16\x55\x42\xc9\xe9\x4d\x22\x6d\x4a\x25\xae\x68\xc8\xe5\x01\x88\x62\xe0\x84\x6b\x12\x69\xea\x78\xf6\x3c\x4d\xe3\x6b\x08\xaf\x1a\x82\x0f\x72\xbe\x49\x50\x0e\x19\xa2\xd9\x81\x07\x22\x34\x34\x3c\xa6\xcb\x13\x1f\x44\x23\x60\x8b\x52\xd4\x3e\x58\x33\xa6\x09\xbb\xdf\x62\x10\xa6\xe7\x28\x5e\xaf\x1d\x81\x01\x21\xdf\xbd\x13\x89\x3c\xa2\x42\xd4\x17\x35\xc1\x05\x87\xf8\x5d\x62\xef\xea\x2f\xaf\x0d\x96\x4b\xaf\x88\x31\xb6\x39\x7d\x86\xdc\x07\x38\xb8\x31\xb2\x70\x1d\x6a\xf7\xe0\xde\x70\x41\xde\x0a\xca\x51\x87\xaa\xab\xf2\x12\xc4\x29\xd1\xf5\x50\xde\xd1\xf4\xda\x7c\x74\xb0\x42\x3d\x43\x2b\x84\x43\xf3\x8a\x71\xe1\xa2\xd5\xf4\xb0\xd2\x88\xa4\x2b\xf5\x1b\x0d\x27\x8f\xa6\x49\x50\x2c\x33\x7b\x38\x38\xbd\x6c\x3c\x18\xa6\x7c\x3c\x0a\xaa\x6a\x40\xe0\xc1\xa0\x79\xff\xc5\x13\x07\x49\xde\x82\x23\x05\x49\xa8\x54\x4b\x45\x0a\x41\x89\x27\x51\x12\xc4\x7e\xb3\x67\x5e\x87\xcf\xa8\x14\xaf\x6b\x2b\x4b\x54\x6f\x20\x45\xe6\xd1\x33\x9a\x5d\x16\x33\xae\xb2\x9e\x8f\x22\x60\x19\x29\x8f\x12\x0d\x7d\x13\x71\x16\x2a\xb1\xe5\xf1\x0d\x22\xba\xe3\xb8\xb6\x53\x8b\x5b\xfd\x42\xaf\x00\xef\x3d\x88\x68\x7f\x1d\xca\x41\x47\x9d\x6b\x11\xa9\xd7\x5c\xb7\x76\x1e\xb7\x9f\xa2\x72\xfe\xb2\x55\x38\x17\xe4\x8e\x3a\x25\xb5\x77\xba\xae\x4a\x53\xcc\xd3\x47\xd9\xb1\xdb\xb2\x74\x14\xc3\xa2\x92\x9f\x83\xe7\x65\x11\x4c\x5b\x94\x3f\x89\x20\xc6\x94\x65\x0d\x20\x80\xf0\xfc\x31\xba\xd1\xc9\xc9\x32\x8e\x4b\x9e\xb8\x68\xcd\x22\x71\x6f\xff\x4d\x85\x30\xd4\x57\x16\x9a\x11\x32\xad\xd1\x9c\x55\x5c\xf7\x0b\xec\x3b\x8f\x63\x3a\x7c\xfb\xea\x91\x33\xfb\xea\xbc\x6b\x07\xd7\x5b\xa9\x36\xe8\x7b\x0d\xc5\x99\x44\x32\x4e\x93\x71\x50\x74\x8c\xd9\xef\x96\x3b\xb2\x29\xe5\x7a\xc2\x8b\x4d\x39\xd7\xb3\x77\x5b\x5a\xc6\xe1\x42\x7e\xf7\xe0\xf2\x30\xc1\x15\x84\xe5\x10\x9c\x10\x78\x2d\xa1\x6a\xf6\xc1\x03\xd0\x37\x98\xbd\xa8\xde\xa6\xcb\xbd\xef\x00\x0e\xee\xd0\xfd\x4e\x90\x4d\xad\xd5\xa5\xc5\xc7\x67\x46\xc9\x21\xfe\x12\xae\x79\xb6\x90\x2b\x14\x31\x3e\x71\xff\xa2\xea\xb5\x9f\x6a\xf1\xc9\x24\x5f\x94\x94\x86\xeb\xdb\xee\xee\xb2\x95\xf9\x4b\x1a\x25\x9d\x56\xcb\xad\x5c\xbd\x8a\xe3\xe4\xc6\xf1\x84\xaf\x37\x40\x36\xec\xb0\x65\xde\xed\xe1\x1e\xe1\xab\x9a\x24\x2d\x0e\x8d\xbe\x2a\x14\x7a\x1c\x0e\x69\xe0\x86\x6d\xc3\xb3\x85\x6e\xcf\x6a\x05\xb7\xaf\x36\x12\xc4\xb5\xd3\x65\xb1\x58\x16\xaf\xd3\xa9\x66\xd7\xc2\x19\x0f\x5a\x2d\xd2\xfb\x0f\x77\x34\x83\xc4\x32\x13\x4c\x73\x6b\x18\x93\xed\x07\x8a\xc3\xf0\x5b\x2e\x83\x9f\x66\x34\x5c\x8e\x29\x9a\xab\x60\x3c\xee\x11\xe1\x8b\x12\xf3\x93\x60\x3c\x3e\x11\xc9\x9c\x27\x32\xa4\x88\x6f\x49\xe5\xcf\xcc\x29\xeb\xe7\xb3\x68\x52\x74\xba\x64\xe8\x60\x54\x66\x39\x4a\xab\x60\x3c\x96\x5a\x2a\x6e\xed\xcd\x49\x9b\xc6\xb4\xa0\x72\x1c\xda\x4b\x92\x99\xce\xa9\xea\x06\x2c\x03\xdd\x5f\x89\x87\x25\x62\x69\xb3\xad\x9e\x8b\x71\xa5\x9e\x15\xee\x4a\x2e\x32\x1a\xae\x16\x7e\x3c\x9e\x1b\x6c\xe9\xe7\x8f\xee\x92\x69\xbb\xde\x25\x53\x55\xf1\xad\x72\x23\x3b\xb3\x02\x62\x48\x80\x86\xf3\x07\x5b\xec\xb0\xfd\x3e\x39\x02\xe5\x1f\xca\x01\x54\x29\x2d\x63\xdb\xff\x06\xaf\x1a\xad\x67\x6d\xde\x27\x8d\x95\xd4\xf8\xb5\xbc\x4d\x31\x50\xf3\xe4\x5a\xc6\x01\xa5\x81\x21\xb4\x74\x82\x00\x4e\x0d\xea\xf5\x01\x60\x07\x56\x9a\x28\xbc\xa0\x27\x8a\xdd\xf3\xb6\x4f\x4b\x07\x60\x58\x4d\x78\xef\x84\x0d\x5c\x22\x97\x58\x55\x57\xc2\x75\x8e\xb2\x6e\xe8\x1b\xeb\x69\x13\x05\xfc\x6d\x9d\x5d\x0e\xfc\xba\xc9\x37\x9c\x06\x3d\xfa\xbf\xea\x48\x22\x38\x88\xc8\xda\x60\x40\x3e\x1e\xbd\x38\x1a\x92\x8c\x72\x8b\xac\x1e\xc9\x53\x61\x3a\xa3\xae\xb8\xb4\x31\x4e\xc0\x35\x5d\x7d\x56\x2e\x2a\xda\x39\x49\xe8\x98\xe6\x79\x90\x5d\xb2\xc5\x02\x21\xb0\x73\x46\x6e\x6d\x70\x58\x0c\xee\xa2\xc9\x79\x9a\x7d\xe6\x52\xde\x7c\x19\x17\xd1\x22\x46\xa1\x1c\xcc\xe0\x29\x7e\xff\x46\x83\x87\xc4\x6b\xcc\xfd\x8d\xb4\xe5\xe6\x75\x98\x66\x0c\xb2\x79\xc3\x88\x54\x37\x46\x43\xbe\x71\x98\x27\x13\x55\xaa\x2f\x71\xe4\xf3\x60\xb3\xce\x3a\x77\xe2\xc2\x9e\xfa\xce\x0f\x65\xb0\x16\x3b\x25\x8e\x81\xa3\xd9\x4f\xe1\xd0\xc9\x57\x53\x8d\x1d\xa4\xb7\x9e\xd2\x23\x94\xae\x5f\x10\xbc\x3d\x26\x07\xc0\x73\xe4\xe6\x39\x3e\x6c\xf0\x1c\xc5\xf4\x84\x49\x8f\xd9\x45\x8f\xe9\xa7\x28\x96\xd3\xc2\x0a\x15\xe3\x73\x72\x55\x79\x10\xab\x9e\xee\x88\x56\x8c\x57\xc3\x78\x86\x5c\x46\x2f\x44\x47\x39\xb9\x5c\x79\xd8\xaa\xe0\x1d\x0c\x9c\x20\xc3\x51\x7a\xd1\x37\xd8\x91\xfe\xd8\x25\x12\x40\x72\x21\xf8\x7f\x57\xa6\x2a\x96\xc3\x7f\xa8\x74\xc4\x68\xe4\x4f\x53\x8e\xa4\x17\xe2\x7d\xb7\xcb\xcd\x39\x1a\xb4\x6b\xa2\x12\xfe\x5c\xc2\x91\x5b\xc3\x1d\x70\x61\x84\xbd\x86\x33\xc6\xfc\xdd\xfd\xcd\xe8\xfd\xcd\xe8\x5f\xfb\x66\x54\x5c\x8b\x8a\x37\xbf\xff\x15\x01\xf6\xee\xd4\x65\x38\x1c\x02\x1e\x92\x83\x34\x39\xa3\x8c\x15\x05\x22\xe6\x31\x9c\x83\xe1\x2c\x00\x81\x8b\x65\x24\x17\x46\xc0\x41\x9c\xa7\x24\x88\xe3\xf4\x3c\xe7\xf1\xd9\x41\x51\x97\xf7\xd7\x58\x45\x52\xf0\x7f\x13\x5d\xd0\xf0\x9a\x67\xad\xb9\xf7\x1a\x6b\xe2\x46\xb5\x48\xed\x28\xc7\x42\x65\xa9\x0e\x9c\x1d\x53\x25\x4a\xae\xae\x64\x84\x74\x9d\xd1\x56\x3a\xd4\x76\xd7\x56\x06\xf0\xb3\x9c\x10\x91\xb8\x62\x96\xf7\xa1\x23\xf5\x8b\x46\x43\x5c\x0f\x71\x34\x01\x55\x73\x17\x6a\xdf\x74\xea\x04\x48\xc1\xf7\xf1\x93\x56\xe3\xce\x48\x46\x51\x52\xed\xc0\x91\x8b\x89\x9a\x8c\xd3\xca\xcb\x1f\xdb\x12\x36\x55\xfa\x7d\x71\xd8\xea\xb1\x49\x38\xa3\x59\x34\x01\xc7\x1e\x19\x1d\x07\x8c\xe3\xa0\x48\x35\x0f\x1e\x90\x38\xf8\xf5\x92\xc4\x69\x10\x92\xf0\x32\x09\xe6\xd1\x98\xa4\x09\xcd\xa1\x35\x31\x21\xba\x21\x11\xcd\x3a\x55\x7a\x02\x80\x92\x06\xf6\xb2\x71\x07\x8a\xcd\xd6\x94\x16\x47\xea\x90\xec\x71\xe1\xcc\x26\x46\x0b\xac\x75\xfe\x01\xb0\x32\x41\x4c\x89\x3c\x26\x97\xdf\x7a\x18\x9a\xfe\xd2\xab\x17\x9e\x9d\x9f\x47\x10\xb0\x04\xf5\x8a\x80\x0e\x22\xa7\xfc\x04\x3d\x74\x9e\x56\x71\xe1\x7d\x99\x51\xa1\x5e\xec\xc1\x05\xde\x98\xaf\x0e\x7e\x38\x9e\xd1\x0b\x9f\xda\x40\x6b\x4d\xad\x04\xcb\x15\x65\x83\x22\x86\xe6\x53\xc4\xd5\x2e\x55\xca\x5b\x0a\x7f\x19\x85\xfb\x99\x88\x4f\xce\xaa\x12\x8b\xac\x4b\x86\x72\xbd\x09\x30\x57\x56\xf2\x5d\x13\x78\xde\xd7\x41\x37\x87\x56\xb7\x7b\x0e\x1c\x5b\x02\x1a\x8a\x7d\xb9\x30\x45\x8a\xeb\x71\xf3\x03\x19\x96\x59\x02\x05\x38\x28\xb3\xdd\x1a\xdc\x5f\x0d\x57\xba\xd6\xea\xab\x72\x5d\x5f\xef\x6e\x52\xa3\x28\x65\xea\xa7\xd0\x41\x87\x53\x60\x3e\x63\x14\xe8\x41\xb8\x45\xea\x52\x55\xb3\x1f\x86\xfc\x59\x84\x52\xa2\x05\x49\x48\x72\x5a\xe4\x64\xb9\x80\x0c\x71\x1a\x01\x96\x11\x15\x34\x63\x7b\x47\x7a\x26\x84\x2d\xe1\xc7\xb4\xbf\xb6\x86\x9e\x46\xbc\x4e\xa7\xf9\x7e\xf1\xa1\x08\xb2\x62\xcd\xd6\x34\xe6\x34\x9e\xa8\xc4\x89\xfb\x80\x59\xb0\x70\xb3\x16\x23\x50\x18\x8d\x27\x8e\x13\x1f\xf9\xca\x6e\x4a\x0b\xae\xcf\x62\x85\xad\xa7\x76\xa0\x5f\xd0\xc3\xcc\xa1\x7b\x44\x9e\x3c\x2d\x9e\xc1\x5a\xe9\xfb\x18\x07\x64\x4c\x69\xd1\xb1\x1e\xfd\x08\x4b\x46\xe7\x94\x33\x18\x88\x17\x34\xf0\x4c\x94\xf5\x51\xa0\x0d\xcc\x26\xe1\xa2\x5b\x26\x4a\xb3\x23\x70\x85\xd1\xef\xf7\xc9\x2f\x4b\xee\x09\x98\xb5\xc9\x78\xaf\x73\x5e\x2e\x79\x19\x59\xf1\x2a\xf2\xda\x7e\x02\x6b\xad\x74\x35\x0c\xff\x19\x93\x67\x7a\x0f\xa6\xdc\x90\xb3\xee\x9d\x26\x7f\xbc\x63\x9a\x7d\x1a\xfd\xab\x77\xc4\xfa\xf5\x48\x77\x91\xc6\x31\x27\x1f\x3f\xd9\x0a\xda\xd4\x60\x36\x5d\x2a\x95\x08\xa8\x6d\x93\x37\xca\x0c\xd7\x20\x96\xb4\x84\x5c\xc4\x8c\xa6\xce\x9c\x4a\x23\x0b\x46\x7a\x72\xac\xbe\x49\xf0\x3d\x9b\xf2\xd1\x44\xda\xf8\x24\xdf\x94\x3a\x6e\x46\x19\xda\x4c\x19\x86\xa6\x95\xd7\xcf\xac\x04\x5d\xc9\x50\x16\x72\x49\xe7\x56\xe8\xb9\x1d\x91\x96\xea\x03\xa0\x4f\xb6\x37\x6a\xc6\x78\xde\xa5\x71\xcc\xf8\x8c\xee\x09\xa7\xc1\x21\x2f\xc2\xce\x69\x74\x4e\x93\x02\x8e\x9c\x7d\x46\x71\x30\x34\xbd\x97\x2c\x84\xa1\xfd\x09\xc7\x14\x90\xe3\x61\x78\xda\x93\x57\x54\x46\x72\x4f\x13\xa3\xc8\xc1\x7e\x8c\xb8\x82\x18\xe8\x97\x6d\xd6\x32\x6c\xa1\x43\xe2\x96\x4c\xd6\x23\x4e\x7c\x0f\xb9\xdc\x3c\xb7\x03\x3d\x71\x9a\x3a\xc8\x28\x8c\x09\xec\xb5\x0f\x3c\x2f\x1d\x81\xd9\x71\x0d\x36\xba\x70\x35\xf0\x81\x34\x7c\xab\xa8\xca\x4a\x75\x5d\xa5\xca\x1e\xbf\x52\xcd\xec\x0c\xb2\x25\x20\xa5\x2e\xe3\x4b\xad\x31\xb5\xb0\xa9\xc5\x60\x4b\xf4\x45\xd0\x0e\x1a\xcc\x04\x04\x29\x67\xde\x7d\x32\xa6\x56\x88\xb0\xac\x51\x19\x62\xcb\x3d\x28\xcb\xd7\x6c\xcf\xc9\xc2\xd7\x4e\xea\x77\x69\xbf\xfb\x09\x3d\x17\xb7\x4e\x18\x07\xd8\x59\x18\x67\x92\x51\x68\xf8\xc6\xf3\x33\xc7\x9a\x65\xdf\x19\x8f\x3c\x62\xee\x78\x54\xcb\x07\x89\xe0\xc8\xe2\x5c\x58\x41\xbd\x96\x47\x52\x97\xbd\x54\x94\xf5\x77\xa3\x5a\xef\x6c\x2c\x6d\x46\x04\xa1\xeb\x08\x10\xfb\x6a\xc8\x28\x5c\x32\xb0\x33\xc7\x82\x26\x21\x18\xb8\xa9\x49\x0e\x72\x50\xb4\x24\x39\xa3\x50\xe5\x0c\x46\x57\x94\x4e\x00\x98\x15\x62\x52\x4f\x97\x2b\x57\x54\xeb\xcb\x24\xc8\xf3\x68\x9a\xd0\xb0\xef\xf6\xd1\xa6\x28\x1f\x4f\xf6\xcd\x8e\x92\xb1\xc6\xa3\x9a\x09\xf2\x36\x83\x4d\xc6\xd0\x48\xb4\x3d\x31\x89\xb1\x74\x18\xc4\x19\x0d\xc2\x4b\xfd\x60\x5d\x0b\x8a\xf9\xed\x29\xcd\x14\x64\xa5\xf4\x5a\x37\xae\x68\xd2\xb1\x5a\x53\x4e\xe0\x36\x5d\x97\x5c\x7a\x65\x72\x2e\xee\xf3\x0b\xc9\xa4\xe8\x22\x15\x63\x8b\xe6\x73\x1a\x46\x41\x41\xe3\x4b\xbb\x59\x41\xee\xe3\xa6\xb4\x6d\x4a\x27\x50\x7d\xa7\xc4\xd5\x84\xcf\x6d\x15\xd6\x64\x73\x96\xcf\xb6\x1f\x3e\x18\x74\x97\x7b\xee\x84\xe9\xb0\x37\x73\x93\xb7\x71\xc3\x3e\xd4\x0f\xa9\x8e\x31\x98\x23\x1e\x8d\x35\x4f\xe2\xba\xd4\x1d\x08\xc2\x35\xba\x13\xbe\x6e\x3a\x10\xbc\xef\xd6\x8f\xc7\x91\x1c\xd2\x85\x14\x1c\xcc\x81\xd4\xf0\x77\x78\x5a\x3e\x4f\xcf\xa4\x4a\x93\x04\xf9\x65\x32\x56\x87\x1f\x9f\x60\xe4\xe3\xdb\xcb\x04\xde\x4e\x1b\x08\x40\x32\x86\x85\x2d\x87\x77\x61\x43\xf8\x55\x6a\x36\x04\x7f\x07\xa3\x53\x2b\x66\xbb\xcf\x7b\x82\x23\x53\x78\x4d\x4e\x54\x49\x5b\x28\xb7\x76\xd4\x12\x3b\xca\xc1\x80\x1c\x4e\x34\x67\x8c\x72\xf5\xae\xef\x92\x0a\xff\x2b\x24\x2a\x88\x76\xd3\xa5\xcb\x9d\xcf\x28\x18\x63\x88\xd1\x77\x09\x67\xaa\x39\x89\x0a\x93\xad\x7a\x37\x6a\x87\xd8\xd5\x32\xf3\xed\x1e\x3e\xf4\x8b\x1a\xed\x09\xc5\xfb\x31\x84\x48\xf1\xf0\xb7\xaf\xe8\xa0\xc7\x92\xc7\x33\x6a\x5b\xef\xc5\xe9\xb4\xac\x5d\x62\x31\xa6\x8a\xb3\x05\xd4\x32\x64\x7b\x42\x89\x3f\x3e\x7f\xc4\x12\x13\xc4\x39\x00\xd8\x03\x6b\x4e\x47\x8e\x9f\x29\x21\x88\x1f\xbe\xe0\x09\x43\x41\x63\x9d\x6e\x9f\xef\xc8\xe3\x40\x7a\x2c\x04\xbf\x2a\x34\x24\x6c\x75\xcf\xb2\x34\x49\x97\xb9\x72\x5f\x28\x0c\x03\xd8\x6e\x6f\xbb\x22\xe2\xd5\x08\x61\xb7\xed\x35\xaf\x05\xa7\x12\xa9\xb6\xd2\x6b\x42\x40\xae\x0d\x1d\xab\xa1\x7e\x0e\x6f\x31\x6f\xd7\x35\xfc\xd8\xb9\x22\xe5\xb8\x75\x82\xbf\x55\x5c\x90\x5e\x9f\xf6\x76\x36\x9b\x5c\x81\xb6\x97\x39\xd7\x8b\x8f\x8b\xf6\xda\xfd\x85\xe8\xfd\x85\xe8\x9f\xf8\x42\x54\x3f\x15\x45\x2a\xeb\x9b\xbc\x17\x15\xc0\x2b\xdc\x64\xfa\x82\xbf\x35\x7e\x62\x9a\x4c\xa2\xa9\x17\x8e\x67\x49\xc0\xc3\x51\x60\x05\x75\x89\x46\x41\xe2\x09\xd4\x02\xda\x64\x1e\x69\x8a\xdb\x48\xf3\xcb\xcc\x51\x34\x15\x1e\x0c\x2c\x2b\x46\x0e\xf4\x3c\x9a\x5a\x4a\x7d\x6c\xcd\xc8\x35\xce\x57\x1c\xe2\x4a\xc1\x5e\x9b\x5e\xab\x74\x3a\xb6\xc4\x05\x3d\x63\x49\x1b\x86\x54\xc4\x7b\xe7\x7d\x86\x56\xa4\xaa\xac\x04\xdb\x55\x4a\xa0\x28\x7f\x97\x51\x71\x0d\x8a\x6e\x27\x8c\xba\x47\x3a\xdd\x6a\x60\x84\x4b\xb0\x83\x84\x70\x7f\x4f\xae\xae\xdc\x3c\x71\x36\xf5\x67\xd2\x20\x8b\x23\x56\x14\x75\x2d\x59\x2c\x8b\x17\x74\x12\x2c\x63\xef\xc5\x49\x5d\x1f\xd9\x8e\x6c\xb7\xa3\xae\x7c\xbd\xe1\x5b\x18\xc9\xf4\x43\xd4\xa2\xc7\xf7\x54\xf9\x3d\x0e\xee\x82\x35\x8a\xdf\xa2\xfb\xf6\x8b\x2e\x2e\xa0\xb0\x5a\x4a\xe6\xd8\x68\xd4\x53\x21\xca\xf6\xe0\x41\xd2\xd6\x2b\x7a\xe1\x19\xb9\x58\x55\x7c\xb0\x39\x32\x8a\x4c\x27\x24\x30\x7c\x03\x82\xe3\x49\x65\x47\xa0\xec\x02\xd8\xba\x7b\xf5\xf2\x9f\xd6\x72\x83\x3a\x98\x5c\xec\x5d\x68\x52\x97\x6f\xf8\xd8\x75\x0c\xdf\xe5\x15\xb9\xd4\xf6\xbb\x75\x7a\x23\x7f\x7f\x31\x2e\x8f\xe1\xfa\x0f\x5d\xc1\xc2\xe7\xd5\x95\x45\x43\xfb\x63\x88\xbb\x80\x1c\x9f\x61\x78\x8f\xc7\x2d\x59\x2d\xf4\x49\xb8\xa1\xf2\x5f\x3d\x9a\x72\x10\xae\xba\x48\x45\xc0\xe8\xa8\x20\xf3\x68\x3a\xe3\x82\xa3\xf2\x5e\x2c\x94\x54\x4e\xcb\x45\x5a\xdb\x6e\x91\x9a\xad\x9e\xb4\xe7\xc1\xc5\x8f\x94\xbe\xa3\xd9\x4f\x41\xde\xee\x11\xf6\xfd\x2e\x8b\xd2\x2c\x2a\x2e\x8d\xf4\x69\x90\xbf\xcb\xa2\x31\x15\xbf\xd9\x7f\x30\xcd\xec\x47\x92\x26\x63\xea\x7b\xc5\xf8\x99\x5e\x56\xbc\x63\xfc\x4c\x2f\x9b\xbe\x64\x84\x9a\x1c\x5c\xf3\x1a\xf6\x90\xdd\xc5\x0b\x3a\x8e\xe6\x41\xdc\xc1\x00\xee\x4b\x32\xf3\xb2\xf5\x6b\x13\x3b\xf2\xb9\x79\xd7\x34\xef\xab\xfa\xee\x49\xff\xa6\xd4\x7d\x4f\xd7\x7f\x44\xba\x16\x42\x91\x43\xd8\x70\xff\x2a\x83\x09\x09\xaa\xf6\x8a\x4a\x8d\xe9\xf9\xc2\x14\x8f\x44\xfa\x9a\x21\x13\xd5\x52\x70\x71\xd1\xfd\xa2\x34\x83\x17\x7d\xbc\x9d\xae\xcb\xd3\xb9\xd6\x88\x99\x00\xca\x43\x46\x2a\xf1\x67\x02\xa8\x37\x20\x2c\x1d\xe1\x02\x5e\x9b\xf9\xab\x77\xa0\xbc\x6d\xd8\x50\x52\xf9\x77\xd1\x07\x92\xf2\x17\x82\x2c\x0d\x39\x0d\x72\x3f\xdc\x34\xc8\x0d\x28\x20\x5f\x04\xaa\x45\x45\x94\x6f\x0c\x15\xaf\x0d\x93\x50\x35\x55\xdb\x60\x25\xf5\x63\x18\xc3\xcf\xa7\x6a\xc9\x59\x75\xd5\x2d\xba\xe0\xe5\x2d\x3b\xb0\x46\x0f\x8a\x8b\xbe\x34\xfc\xf3\x56\x80\x9f\x19\x4b\x2d\xc4\xc5\xca\xcb\x46\x86\xe8\xb9\xc9\xf2\x11\xd1\x82\x2a\x57\x91\x0a\x5e\xb5\xca\x52\xb2\x2b\xb6\x5c\xd6\xe0\xa8\x43\x3a\xca\x50\xcd\xda\xf2\x41\xb9\xf4\xe9\x81\xd2\xa4\x27\x33\x1b\x2c\xb5\x52\xd0\xf2\x26\x4b\x16\x9d\x0a\x86\xb3\x9c\x2f\xe3\xa0\x88\xce\xe8\x4f\x41\x7e\x9c\xc3\x9b\xbe\xb2\xaa\x1c\x58\xab\xae\x69\x6d\x0d\x53\xa3\x1c\x1a\x3b\x9d\x4c\xe8\x58\xd4\xcc\x57\x6e\xc9\x72\x28\x2f\xe0\xa3\xe7\x52\x68\x7b\x51\x9a\xd6\x22\xb2\x58\x9c\x4e\x6d\xe3\x4b\x9d\x81\x22\x0a\x39\xda\x41\x50\xf1\x79\x75\x83\x9e\x07\xca\x0c\xb6\x4e\x11\x28\x5a\x6a\xb4\x0e\x81\xc8\x9a\xaf\x3c\x38\x38\x55\x2e\x36\xa8\xb0\xc1\x52\x33\x6b\xc2\x26\x46\x50\x83\xf6\xd9\x4d\x94\x73\x26\xf0\x52\x28\xf4\x0f\xf8\x68\xd8\x1f\x05\x39\xad\xe5\x8d\x3e\x50\x1f\x19\x78\xe0\x0c\x02\xe0\xf9\xd3\x20\x7f\x1d\xcd\xa3\xc2\x43\xbf\x26\x80\x28\xab\x12\x4b\x88\xde\xc8\x37\xca\xe4\xd1\xaf\xbe\xdd\x4e\x67\x1a\xd0\x45\x34\xa7\x79\x11\xcc\x17\xa5\x45\x14\x84\x5e\x58\x3c\x23\x29\x63\x5b\x46\x76\x59\xb5\x4a\xa7\x82\x3a\x13\x46\x93\x49\x34\x5e\xc6\xf0\xae\xa7\x0c\xd3\x1a\xc8\x1c\x48\x5a\x04\xf1\x8b\x26\x15\x58\x90\x58\x6a\x36\x57\xaa\x00\xd7\x3c\xce\x5c\xaf\x6e\xb6\x2b\x6b\x46\x05\x9d\x77\xed\x17\x7d\x8e\x59\x25\x40\xb9\x17\xd8\xc6\xaa\xf6\x49\x6d\xbc\x60\xdd\xf2\x1e\x71\x9d\x4c\x83\xc5\x1d\xa7\x53\xef\x2a\xc6\x1c\xc5\xb7\x86\xe3\x74\xaa\xd5\x6f\xee\x42\x86\x7a\x8d\xc5\x8c\x2b\xc4\x4b\x19\x5d\x7b\x44\x13\xf6\x65\x6c\x6a\x6a\x9c\x56\x86\x87\xc6\xec\xa2\xbb\xb8\x4e\x67\xd7\x32\x2a\x6e\xb0\xfd\x79\x2b\x31\x9a\x88\xd3\xa9\xa7\x6a\x99\x5a\x52\xa5\x2a\x64\x9e\x2e\xe0\x2a\xa7\xfe\xc4\x7c\x3e\x8b\x72\xc6\x8d\x17\x69\x5e\xdc\xe0\xc8\xfc\x2e\xcd\xab\xa5\x22\x37\x06\x53\x25\xd7\x76\x2b\xc5\x13\xcd\x3a\xa9\xcc\x42\x07\x03\xe8\x74\x7f\x11\x5c\xc2\xbb\x8a\x3d\x43\x4d\x86\xb3\x04\x92\x21\xa9\x28\x62\xef\x79\x4d\x66\x62\xd8\xf3\x34\xfb\xfc\x31\x7d\x97\xa5\x67\xb4\xbc\x0c\x02\xc2\x65\x17\x42\xe2\x2d\x2f\x28\x21\x50\x68\x81\x09\x8e\x3c\x65\x58\x52\x73\xd6\xc2\x3b\xc9\xdd\xac\x60\x9e\x82\xd2\xc9\x9e\xf1\xf5\x8c\x9c\xa0\xcf\x53\x32\x54\x66\x0c\xd7\xba\x55\xae\x83\xe7\xea\xf8\x38\x4e\xcf\xe1\x59\x89\xd4\x6b\x54\x55\x5f\xfd\x0c\x82\xc7\x4e\x64\xc4\x44\xd2\x24\xbe\xe4\x01\x21\x0a\xe3\x75\x86\x7c\x21\xc1\x5f\x42\xf8\x1e\xf6\xc8\x67\x12\x64\x68\x3f\xda\xc1\x0f\x24\xec\xa3\x35\xeb\x63\x23\xde\xa5\xee\x83\x80\xfe\x85\x95\xaa\x97\x9b\xd5\x51\xba\x9f\xac\xcd\x7b\x85\x6a\xc2\x16\x74\x0d\xf8\xa5\x17\x8b\x28\xbb\xf4\xac\x78\x94\x8b\xc9\x2d\xe7\xee\x63\xbc\xd0\x2c\xaf\x6c\x09\x58\xa0\x9e\x05\x00\x94\xed\x93\x25\x2c\x88\xee\xae\x6f\x55\xbe\x0f\xce\x25\xc9\x88\x14\x2f\x18\xaa\x7e\x3f\x1f\x47\x91\xbd\x7c\x65\x19\xbc\xdb\xfe\x3d\x17\x88\x53\x70\x48\x9a\xd3\xeb\x50\x35\x00\xfe\x94\x21\x0a\x9a\x8f\x39\x0c\x06\xab\xac\x08\x58\x9b\x78\x35\x96\x2e\x46\xbd\xdc\x6e\xb1\x92\xac\x6b\x00\x8e\xa2\x66\xf4\xaf\x98\xaa\xad\x91\xf3\x85\x4b\xc1\x66\x3e\x11\xbf\xc6\x4b\xe8\x39\xdc\xe8\x75\xcc\x60\xda\x70\xd5\x31\x0a\x92\x7e\x94\xff\x23\x88\xa3\xb0\x03\xb1\x2e\x44\xca\x8b\x28\xa3\xe3\xa2\xe3\xbb\xe7\x10\x2e\xc5\x00\x50\xd4\xd8\xe9\x3a\x97\x28\x58\xd0\xd1\x31\x88\x64\x0f\x3c\xd5\x1a\x5e\xeb\x3c\x15\x35\xa8\x42\xf4\xcc\xac\x89\xab\x27\x6c\x1b\x12\xe1\x57\x5c\xc2\xb6\x65\x50\x70\xbd\xd0\x3f\x5c\x26\xe3\x28\xf1\x4b\x2b\xc2\x91\x37\xba\x79\x5a\x37\x93\x88\xeb\xd7\xc8\x10\xcf\xc0\x0b\x12\x18\x21\x46\xc9\x14\x0e\x38\xde\xe3\xad\x0b\x66\xfa\x92\x12\x6e\x9d\x6a\x2a\xc0\x50\x66\xf9\x59\x34\x9d\xd1\xbc\xae\x3c\x86\x42\xb4\x23\x72\x3f\x27\xe9\x79\xf2\xa1\x08\x0a\xea\xf3\x2b\x88\x72\xcb\x1b\xc0\x55\xec\xda\x35\x2c\x96\x71\x4c\xc3\xba\x2a\x30\x54\xc9\x51\x57\xbb\x97\x2a\x89\x20\x50\x77\x81\x3a\xac\x85\xe8\xe9\x7a\x2a\x2a\xa8\x29\x69\xdc\xad\x0d\x3d\x69\x08\xd6\x77\x36\x18\x96\x67\xa1\x92\x36\x2f\x19\xfa\x93\x51\x09\x63\xe7\x1b\x7a\xd2\x38\x6c\xd9\xf5\xfb\xb0\x34\x07\x97\xf3\x0f\xa8\x3c\xaf\xa4\xac\xad\x93\xf3\x54\x61\x83\x18\xbd\x37\x54\x0e\x43\x6f\x2a\x86\xc7\xa7\x9a\xa1\x27\x0d\xc3\x5a\x68\xf4\x24\x62\x68\x9b\x0b\x0d\x4b\xd2\x39\xf7\x32\x6c\xb6\xf8\x05\x53\x6b\xb8\xf5\xb4\xcc\x13\x11\x63\xe1\xad\xe1\xce\xce\xf5\x69\x6f\x67\xeb\xde\x8b\xc5\xbd\xd1\xd6\x7f\x8d\xd1\x96\xa0\xf4\xbb\x08\x47\xb3\x9a\xef\xfe\x86\x96\x5a\x3c\x5a\x8e\x69\x82\xc5\xd3\xbe\x42\x10\x80\xe6\x6e\xfb\x83\x38\x1e\x58\x91\x2d\xe1\x3d\xae\x1d\x16\xc7\x75\xe6\x2f\x8d\xca\xdd\x08\x60\x15\x4e\xfc\x7d\x21\xc0\x3e\xf1\xcd\x4d\x38\x99\xc7\xc1\x6f\x57\x77\x00\xaf\x2b\x15\xbb\x03\xae\x95\x27\xdd\xae\x5a\x88\x9b\x17\xc0\xb1\x0e\xea\x94\xdf\x18\x46\x06\xc7\x15\x20\xe2\x13\x43\xdc\x49\x00\x02\xb6\x3f\xd8\x93\x61\xb8\xaa\x04\xbb\x00\xfd\x84\x0b\x9f\x2c\xb2\x69\x8e\x65\xdd\x1b\x84\x7c\x96\x32\xbe\x0e\x8f\x07\x7e\x20\x80\xd7\xf3\x47\x47\xd9\x34\xe7\x51\x06\xd6\x85\xf0\xd6\xac\xc3\x58\x18\xab\xec\x34\xee\xde\x0f\x0e\x29\xc9\x1c\x1c\xec\x4f\xbc\x6e\x74\x07\xe7\x1f\x9b\xed\xbb\xa0\x42\x4c\xec\x68\x3c\x34\x44\x44\x55\x48\x3f\x1c\x85\xd8\x17\xb7\x2a\xca\xc9\x38\xcd\x32\xd7\xa5\x24\x9c\x80\x82\x82\xee\x67\xd3\xdc\x17\xe5\x4f\xc7\x19\x7f\x48\xfe\x06\x27\xa8\x9c\x7c\x81\xf3\xd3\x35\x6b\x2f\x2a\xc4\x9b\x0e\xc3\xeb\xa4\x67\xaa\x70\x3b\xa5\x73\xa4\x0f\xb9\x1c\x0a\x50\xe4\xd8\x82\x04\x1a\xf1\xec\x8c\xcf\x1f\x02\x80\x42\xc8\x70\xe7\x02\x9b\x27\x38\x11\xd4\xd1\xbb\xd8\x56\x1b\xc0\xd3\xbf\x2c\xb8\xcc\xd7\x8c\xb9\x5b\xef\x38\xc1\x1f\x83\xae\x72\x49\xce\xce\xc5\x8e\x8a\xde\xba\x31\x10\xe0\xdc\xbd\x70\x25\xbc\xbe\x94\x30\xca\x58\x05\xac\x57\xb4\x70\x56\x97\xd8\x91\x84\xb8\xbe\xb7\x57\x46\xc8\xe6\xcb\x25\x76\xf6\x15\xf1\xd7\x2a\xa2\xa4\x75\x1c\x87\x02\x55\x2e\x78\xa5\x52\x06\x6c\x94\x30\xa9\x18\x41\x24\xd2\x77\x1c\xcc\x43\x5e\xce\xa6\xa1\x5d\x6f\x97\xb8\xe3\x0d\x62\xd5\xaa\x36\xd9\xaa\xa4\x3c\xd5\x7e\x25\xd9\x19\x61\x47\x57\x67\x18\xab\xf2\x0b\x33\x5a\x68\x49\x38\xd2\x6b\xcd\xcd\xf1\xf2\xe9\x78\x62\x83\x16\xa9\xdf\xf3\xbf\x11\x3b\x74\x8f\x94\x78\xf5\xf7\x39\x87\x17\xef\x56\xd0\x70\x8d\xa0\xa4\x15\xb6\x4f\x25\x51\x6b\x24\xea\x6f\x16\x9d\xc3\x5b\xbc\x72\xde\x6f\x14\xa3\x43\xb8\x17\xdf\xec\x91\xa7\x52\x1b\x54\xd1\xc4\x32\x59\x04\xe3\xcf\x47\x5c\x0f\x6d\x58\x01\x42\x92\xa1\x1b\x32\x93\x74\x17\x4c\x37\x4b\xb2\x2a\xfe\x43\x91\xde\x1e\xd9\x26\xcf\x64\xa2\xf4\x80\x4e\xe4\x39\x50\xbb\x04\x50\x7e\xcb\xcb\x1c\xa0\x63\x21\xa7\x27\x8a\x9b\x33\x2a\x74\x29\xd8\x7d\xb3\x8a\x7d\x77\xb2\x79\x4a\x86\x3e\x27\xdd\x07\x10\xfb\x39\x40\xe1\xb6\x25\xb2\xec\x80\xde\x41\x1c\xe3\xc5\xdd\xef\xf7\xe5\xfa\x3e\xb0\xcb\x5a\x9b\x8f\xe3\x1e\xe7\x90\x6f\x77\x10\xd6\x57\x82\xb2\xdd\x28\x50\x35\xf4\xcc\xb8\xfd\x32\x99\xfb\x82\x83\xb7\x87\xf2\xd0\x15\x18\xaf\xc3\x82\x24\x34\x7d\xa8\x48\x30\x1e\xf8\x9a\x9f\x8c\x58\x1d\x3c\x68\x20\x03\x17\x68\xf3\xd2\xae\x98\x55\x88\xdb\x5b\x47\xb5\xd0\xab\xb2\xd8\xc8\xab\x04\x3e\xf6\xef\x9b\x52\x06\xb3\xac\x25\xd5\x1e\x03\x07\x19\x2d\xff\x09\x97\xc9\x86\x58\x88\xd9\x0f\xb8\x7d\x36\xa5\x2f\x5c\x04\x8b\x3f\x76\x31\xad\xd0\xe7\xae\x9a\x25\x97\x96\x70\xfa\x5a\x7e\xdd\xf7\x14\x57\xeb\x68\x15\xe3\xa3\xc5\x8c\x23\x41\x54\xdd\x33\xba\xe6\x3e\xbc\x83\x52\x78\x09\x77\x8c\xf5\x80\x9c\x8d\x3b\x4f\x65\x9b\x34\xd8\x73\x1d\xdc\xb8\x3c\x00\xb9\xb7\x91\x2f\x2a\x0c\x27\x08\x3d\x6e\x5c\xb1\x6b\x3a\x15\xe6\x9d\xa6\xa1\xe3\x40\xbd\xc8\x2e\xad\x77\x83\x08\x14\x9e\x0a\x96\x8f\x97\x18\x6f\x1b\xc7\xf0\xb8\xbc\xe3\xb8\xa8\xe1\x14\xbf\x47\xa8\xd7\x39\x8e\xdd\x79\xd9\x3a\x92\x64\x2a\x37\x8a\x26\xe7\x4a\x7b\xdb\x30\x8b\xd4\xee\x0a\x56\x0b\x7f\xaa\xa5\x56\xbb\x66\x24\x49\x09\x40\x61\x8e\xf9\x03\xd9\x84\x43\x8d\x71\xd6\x74\xa5\x43\x1c\x31\x34\x48\xf8\x3b\xed\x24\x14\xbe\x1c\x21\xe4\x6b\xf2\x48\x1e\x54\x9d\xd8\xb7\x35\xcb\xd5\x88\xf7\xc6\xd6\x8d\x35\x0f\x1d\xf3\x16\x4f\x54\x57\x0b\xde\xdc\xd1\x3e\xcd\x8b\x68\x1e\x14\xf4\xa7\x00\x14\x88\x75\x54\x85\xc0\xeb\x28\x0a\xd7\x7c\x17\xd4\xf4\xf5\xa9\xa3\xd9\x0c\xa1\x71\xd5\xcd\x8e\x07\xb4\x6c\x66\xde\xcb\x66\xa8\x0c\x1d\x06\x21\x50\xa4\x2e\x50\xc8\x07\x78\x2a\xa6\xb4\x78\x61\x87\xf6\x91\x3b\xab\x5d\x4d\xdd\x5c\x89\xba\xee\x78\x9e\x1a\x21\x5e\xde\xe8\x8a\x95\x59\x11\xa9\xdf\x2b\x35\xdf\x22\x00\x21\x2e\x2a\xf1\x8c\xc8\xbe\x12\x61\xbf\x6d\x34\x42\x55\xff\x8d\x02\x12\xaa\x42\xab\x0e\xf2\x6b\x46\x27\xd4\x3a\x1a\x36\xc0\x6c\x31\x96\x6e\xaf\x72\x7e\x6a\xae\x63\x44\x02\xba\xdc\x2a\xa5\x62\x5c\xa2\xec\x1f\x9b\x2b\x11\x23\x4a\x8b\x04\xc3\x62\x8a\x11\x6c\x04\xcf\x89\xeb\x27\xce\xd2\xb8\x3e\x03\xe7\xb3\x9f\x58\x8f\xdb\x64\xc8\x3f\xac\x9d\xa4\xdd\x73\x84\x97\xa1\xf6\xcf\xa6\xf2\x94\x67\x39\x31\x9c\x53\x9d\xc5\x3b\x2e\xfd\x96\x72\x06\x59\x4b\x0c\x32\xac\x4a\xd9\xf6\xa3\x02\x16\x55\x6f\x3d\x9e\xd8\x42\x78\x82\x0b\x43\xd0\x59\x37\xb1\xa3\xad\x71\x60\x9b\x2f\xb0\x0c\x25\x7d\x73\xe8\xb4\xb2\xad\xc2\x42\x67\x3f\x58\x2c\xe2\x4b\xe1\x29\xa8\x11\x61\x75\x6d\x33\x36\xbe\x05\x58\xcd\xb0\xc4\x1b\xd5\x5d\x33\x0f\x22\xfe\x8e\x66\x3c\x3a\x04\xcf\xad\x63\xef\x78\x26\xec\x6b\x85\xdf\x91\xe9\x7a\xc5\x63\x57\x49\xa5\xe0\xe2\xb0\xa9\x31\x5c\x06\xe8\x4a\xcd\xde\xc9\x2f\x2b\x6e\x8a\x48\x7c\x24\x3a\xa9\xb4\x98\xde\xad\xa5\x8b\x1f\xf6\xf9\xa7\x8c\x3d\x24\xcb\x02\x81\x47\xd9\x78\x19\x07\xd9\xfa\xfa\xfa\x7a\x75\xc4\x21\x49\x41\xbb\x77\x12\x73\x88\x6b\x7f\x5b\xc3\xed\x27\x7e\x07\x2e\xdb\xf7\xb7\xff\xf7\xb7\xff\x7f\xed\xdb\x7f\x71\xf5\xcf\x60\x65\x4c\x28\x7f\x24\x8b\xdf\x2d\x46\x85\xcf\xb2\xa0\xda\x10\x60\x6d\x30\x80\x98\x57\x41\xc6\x48\x99\xed\x60\xcb\xdc\x1c\x22\x23\xb8\x30\x9a\x4c\x68\x46\x93\x82\xd0\xe4\x2c\x87\x42\xa3\x2c\x3d\xcf\x69\xb6\x86\x1c\x7a\x9e\x47\x49\x98\x9e\x83\xc6\x02\x45\x7a\x20\x0f\x1e\x88\x9c\xfe\x3f\xdf\xbc\x7e\x55\x14\x0b\xe1\x2b\x96\x73\x4d\x33\x8d\xec\xf9\x61\x81\xf5\x89\x40\x05\xd1\x34\x49\x19\x23\x88\xa3\x84\xb2\x9e\x24\x69\x48\xd7\x90\x77\x30\xa7\x46\x35\xf0\x8b\x79\xcc\x46\x26\x36\xb6\x76\xb7\x69\x23\xd7\x1c\x93\xff\x7c\xf5\x7e\xdb\xa8\x6e\x96\x6d\xb7\xbb\xa5\xa5\xa4\xe4\xc0\x5a\x78\x27\x91\xe9\x9a\x44\x80\xfc\xc4\x44\x7b\x70\x8f\xc9\x9d\x69\xb3\x5e\x2a\x03\x08\xa3\x3c\xde\xf2\x67\x69\x5e\xf4\x48\x11\xcd\x69\xba\x2c\x7a\xac\xc2\xac\x07\x4a\xe6\xf3\x34\x13\x8f\xd1\x60\x33\x61\x70\x64\x8f\xc0\x7f\x57\x57\xa4\x2d\x88\x3d\x4e\xc7\x41\xcc\x12\x87\x4f\xbf\x79\xfc\x0d\x04\x96\xe5\x7b\x0f\xaf\x90\xed\x84\xe2\xd7\xd5\x15\xd9\x54\xd9\xac\x19\xb2\x07\xad\xa9\x34\xd9\x28\xd9\x53\xed\xd7\x0a\x4f\x8b\x8c\x2e\x20\x52\x1b\x3d\xb7\xa6\xcc\x92\x9d\x04\xe0\x7b\x74\x96\x11\x92\xd3\xf3\x34\x8d\x69\x90\x5c\xc3\x1d\x2b\xdb\x9f\xa5\x04\xa3\xb1\x2c\xdc\x32\xa2\x03\x9f\xd9\x96\xe1\xfb\x07\x63\x1a\xc9\x5d\x66\x07\xcc\x8b\x40\x56\x3d\x47\x35\xbf\x41\xe1\x84\xc4\x78\x18\xdc\x00\xea\x6c\x42\xb4\x78\x05\x43\x7e\xf5\x7e\x5b\xc7\x75\xe5\x92\x16\xc2\x3c\x9a\x08\x06\x63\x38\xbf\xb3\x2a\x32\xc6\xc3\xab\x04\x79\x58\xd6\x9a\x2e\x68\xd2\x69\xbf\x3b\xfa\xf0\x51\x86\xa2\xe4\x84\xc3\x3b\xb7\xbb\x86\x3c\xe9\xc1\xdc\x3e\x78\x60\x4e\xaa\x71\xe8\x5b\x82\x41\x4d\xfb\x79\x90\x47\x63\xd2\x26\x1b\xd0\x85\xe7\x4b\xc6\x1e\x50\x15\x1b\xa4\x3d\x54\x57\x85\xaa\x9e\x7e\x91\x8a\x47\x6b\xed\x51\x90\xd3\x27\x8f\xdb\xd6\xf8\xb5\x1f\xe9\x57\x34\x08\x69\xd6\x69\xef\x03\x5f\x8d\x7e\x0d\xf8\x69\x0b\xda\xe7\x23\xac\x28\xc4\xe4\x63\x9a\x14\x8f\xd8\x41\xbb\xdd\x23\x6d\x26\xf9\x47\x63\xa8\x62\xf0\x4b\x2e\xd5\x8e\xea\xc6\x4a\x4c\x59\x0d\xb9\xf2\x88\x23\x97\xc9\x18\x1d\xaa\x6d\x4d\xb2\xef\xe2\x79\x81\xae\xaf\xfd\xb1\xa5\xab\x48\x2f\xb7\x63\x0d\x4a\x5d\x9a\x4d\x72\x92\x66\x4c\x5a\x15\xc1\x8a\x81\x1e\xb5\x76\x5f\x63\x2e\x09\x3b\xf0\x20\x82\x47\x91\x89\x26\x97\xaa\x7e\x81\x64\xa9\xc8\xc7\x6e\xa2\x7d\xd6\x00\x07\x69\x92\x50\xf1\x6c\x41\x52\x98\xa6\x44\xe3\x72\x51\xb6\x2e\x03\x36\x7c\xa4\x17\x85\xd3\x41\x01\x8b\x5e\x6b\x08\xeb\x78\xb3\x5b\x55\x5d\x7a\x2f\xea\xef\xf8\x1a\xc4\xab\xa4\x79\xec\x60\xa0\x81\xa0\x86\x08\xf6\x15\xc7\xa9\xa0\x04\x91\xf5\xa3\x13\xad\x83\x14\x59\x34\x9d\xd2\x8c\x87\x18\x62\xb3\x0f\x62\x8b\xf2\x17\xca\x70\x50\x47\x30\xd0\x03\x1f\xd5\x98\x91\x82\x9b\xd0\x0f\x18\xaf\xec\x1a\xdc\x24\x01\xdf\xce\x79\x11\x14\x74\x3c\x0b\x92\xa9\x5f\x81\xc0\xcd\xfb\x25\xe2\x83\xf0\x12\x0c\xeb\xe1\x46\xf8\x31\xe3\x30\x36\xcb\x5b\x37\x23\xfd\x36\xa0\x18\x0d\x28\x6f\x95\x50\x08\x29\xfb\x32\xab\x86\xa2\xe0\x4c\xe6\xbd\xb5\x52\x37\x56\x2b\xd2\x16\xc1\x57\x5b\xf6\xc5\x96\xd1\x32\x3b\x0b\x5e\x5b\x28\xd6\x1b\x81\x8b\x59\xb3\xb2\xbc\xaf\x97\xde\x47\x5e\xaa\x83\x37\x0f\xb1\x90\xef\x96\x03\xd8\x5d\xa8\x62\x02\x62\xa5\xe1\x75\xa5\x2f\xcb\xe3\x4b\x46\xef\xfc\x6d\x29\x2c\x2e\x46\xd5\x25\x6b\x2b\xca\x45\xfd\xd4\x64\xa6\x4a\x08\x90\x0a\x4e\x5b\x18\x60\xe7\x87\xa4\x5d\x90\x49\x10\xc5\x34\xec\x93\x23\x76\x4e\x3b\x8f\xd8\xd9\x23\x80\xa8\x60\xe5\xab\x09\xb5\xe9\x99\x0b\x8d\x4f\xa5\xcf\x50\xd1\x27\xa2\x70\x48\xbe\x53\x7f\x52\xdf\xc7\x76\x9f\x6c\x31\x1e\x92\xf6\x76\x7f\x53\x29\x0f\xa5\xfe\xb1\x9d\xd0\xe2\x53\x1c\xe5\x05\x4d\xa2\x64\xaa\xb2\x95\xf6\xf0\xd4\x30\xe8\x92\x0a\xae\x8c\x87\x38\x73\xc9\x57\x5a\x15\xb2\x41\xea\x49\x70\xd4\x05\x78\xe8\x52\x55\x60\x9c\xf6\x99\x98\xdb\x1a\x3e\x65\xbf\x0c\xf9\xb9\x35\xdc\xfa\x96\x9d\xfc\x77\xee\x4f\xfe\xf7\x27\xff\xbf\xf8\xc9\x5f\x1b\xfe\xc3\xa3\xc5\x3b\x32\xfa\x57\x86\x9c\xf8\x54\x39\x8a\xa6\xdc\x06\xb7\xff\x0b\x3f\xa1\xf3\x7b\x90\xf0\x35\x9d\x98\x1b\x82\x8a\xf5\x78\x89\x1e\xce\x19\x1b\x27\x87\xe0\xec\xe2\x7c\xc6\x7a\xdf\x31\x0d\xb4\xbe\xe7\x85\xc9\x43\xb2\xed\xbe\xbc\x03\x8b\x3f\x26\xc5\x9b\xef\x0f\x89\xff\x45\x9c\x60\xee\xef\xc4\xa9\x2e\x48\xc8\xe1\xf3\xfd\xb7\x62\x92\x43\xf2\xdd\xb7\x64\x9c\xce\x17\x4b\x11\x67\x65\x74\x49\xe6\xe9\x59\x94\x4c\x51\x34\xb1\xc7\x64\x3c\x0b\x32\xd8\x0b\xf8\xcd\x6c\xc8\x4d\xa9\xa4\xb9\xba\x84\x8e\x29\x7f\xb4\x50\xa4\xac\x41\x8e\xab\x9c\x74\xf6\xc9\x1e\xd9\xda\xec\x91\xe7\xec\xff\xad\x1e\xe9\xf7\xfb\x3d\xf2\x7f\x64\x8f\xec\x7c\xd3\x65\x87\x1d\x92\x2f\xe8\x38\x9a\x44\x7c\x21\x1d\x7e\x38\xda\xda\x79\xb2\xf5\xc4\x36\x31\x8b\xf2\x14\xd2\xc5\x38\x5c\xaf\xb2\xd7\xfc\x4d\x2c\xeb\x08\x1b\xa0\x79\xb5\x86\x6f\x96\x85\x24\x15\x4a\x30\xe1\xda\xc0\xac\xdf\x98\x50\x56\x31\x9e\x47\x36\xa2\xf6\x7e\xbb\xcf\xd0\x72\x90\x86\x74\xbf\xe8\x6c\x22\xad\x35\x1b\x5b\xfb\xff\x9c\x6c\xce\x00\xf9\xbb\x5d\x20\xd6\x22\x3d\x5e\x2c\x68\x76\x10\xe4\x5a\x95\x8d\xb2\xf3\xe5\x28\x2f\xb2\xce\xe3\xae\x7c\x91\x2b\x12\x36\x7b\x8f\xad\x1b\x33\x9e\xbb\x88\xa3\xa2\xd3\x6e\x77\xcd\xc7\xca\x49\xd7\xb4\xae\x1a\xa7\x21\x1b\x5c\xe2\xeb\xbc\x94\x0f\x01\xe6\x87\x3d\xb2\xcf\x04\x42\xf8\xf8\x7e\x8f\xfc\x5f\xd7\x89\x01\xe0\x99\x59\x31\xb1\x06\xa4\x72\x2a\x1b\x52\xf2\x88\xec\x93\x0d\xb2\xb5\x89\xec\x8c\x7c\x7e\xf1\x65\xec\x51\xdb\x86\xe9\xba\xdb\xff\x25\x8d\x12\x36\x4c\xdb\x52\x71\xbc\x04\x8f\xbb\x30\xc5\x6f\x8e\x5e\x30\xc2\xde\xda\x94\x4c\x49\x58\xf8\x01\xe5\x7b\x28\xee\xdb\xcd\x27\x8f\x6d\x82\x9b\xa7\xe1\x77\xdf\x6e\x6d\x96\x11\x9a\x49\x5f\xda\x8f\x31\xa7\x26\x51\xb8\x92\x8a\x32\x3a\x0f\xa2\x84\xeb\x8e\x58\x9e\xbe\x7b\x14\xce\x5d\x4c\xf6\x20\x80\xb5\xdd\xf2\x76\xd7\x72\x5b\x03\xcc\x4a\x82\x29\x8b\xd7\xef\x0c\x13\x39\xdd\x24\xc8\xda\x87\x49\xc1\x3d\xe2\xf4\xc8\xd6\x66\x97\xfc\xff\x19\xd6\x36\x9c\x5a\xb8\x53\x1c\x61\x7e\xee\x7b\x81\xab\xea\x52\x25\x75\x7d\xc6\x3c\xd5\xbf\x43\xe2\x26\xe8\xb0\x0e\x84\xc1\x3f\x5c\xa8\x43\x82\x78\xeb\x20\xd8\xa7\x9c\x2f\xff\xe4\x0c\xb0\xb7\x6b\xff\x24\x08\x4b\x68\xbd\xe4\xdc\xae\x3a\x51\x66\xeb\xfa\x49\x21\x08\xd1\x72\x2e\x5f\xe7\x58\x44\xc5\x60\xf6\x55\x8e\xd3\xf7\x00\x65\x49\x31\x9a\x0d\xe1\x5a\xb1\x35\xac\x15\x63\x39\x7d\x54\x63\x9d\xcf\x80\x20\x7f\x2e\x7d\x06\xa0\x97\x0a\x22\x1a\x28\xd9\x7a\x82\x58\xd8\x28\xc8\xe9\xce\x13\xb2\x07\x65\xb4\x7a\x68\xe7\x89\x61\x02\x10\x86\x94\x6b\x16\x61\x0f\xec\xf0\x42\x3d\xb2\xf5\x8d\x29\x09\xab\x7e\x3e\x1f\x05\x49\x87\x17\x33\x99\x9f\xb5\x98\x85\x5b\x12\xb4\x70\x9f\xb3\xa1\x17\xa9\xb1\x7b\xb1\xe9\x23\xe0\x5a\x35\xbb\x94\x2b\x9a\x2b\x93\xc0\x5e\xf7\x1d\x8f\x05\x91\xa4\x85\x10\xca\xbe\x8f\x7e\x68\x4d\x41\x22\xe1\xee\x6e\x26\x1a\xa9\xf9\x2c\xe0\xd2\x1a\xec\x6f\x17\xe3\x78\x99\x47\x67\x2a\x74\x65\x34\x8a\xe2\xa8\x50\x02\xce\x28\x48\x3e\x0f\x46\x59\x90\x8c\x67\x24\xa7\xd9\x59\x34\x96\x1b\x60\xc0\x3d\xbd\xb6\xbe\x1f\x44\x3f\xf4\x6d\x1a\x52\x61\x24\x72\xb9\x0b\x4d\x68\xc6\xb6\xa1\x20\x9e\xa6\x59\x54\xcc\xe6\x24\xa4\xf9\x38\x8b\x46\x9c\x2d\x09\xf9\x87\x26\xfd\xf3\xe8\x73\xb4\xa0\x61\x14\x80\x10\xc4\xbe\x06\x87\x49\x41\xb3\x24\xe0\x4f\x27\x3e\x3d\x0f\x92\xcf\x9f\x84\x9b\xd9\x4f\x7c\x5e\xff\x7f\x3f\x89\x91\x26\xd3\x4f\x6c\x88\x9f\xe0\x2d\xd1\xa7\x30\x9a\x46\xce\x53\x0e\x39\x35\x3e\x8a\x1c\xc9\x3d\x55\xce\x80\xf4\x19\x53\xa4\x9e\x6d\xb6\x01\xad\x3e\xb7\x57\xe4\xc8\x62\x8b\x62\x46\x0f\xf8\x3e\xd5\xfe\xe7\xcb\xf6\xee\x9a\x97\x67\x0a\x1e\xdb\xb1\x76\xee\x0e\xae\x60\x83\xb4\x37\x41\x54\x82\x56\xb0\xb9\x0b\x43\xc7\x0b\x86\x0d\xb2\x47\x3a\x5c\x9c\xea\x7c\xf7\x94\x3c\xd2\x4d\x74\xe5\xb3\x81\x47\xdb\xd6\x7e\xab\xbc\x6e\x98\x4d\xa1\x3a\x45\x83\x35\x6a\x2b\xc1\x44\x10\xae\x80\xb0\x79\x00\xf1\x28\xc9\x8b\xa8\x58\x16\xd2\x59\x72\x14\xd2\xa4\x60\x9b\x96\xed\x78\x9f\xd7\x72\x98\x84\x51\x46\x4d\x03\x06\xf3\x8d\x4d\xde\x93\xb2\xac\x7a\x64\x03\xaf\xa6\x5a\xa8\xa5\x16\x34\xd5\xd2\x6d\xb5\x56\xe1\x45\x66\x4f\xbc\x0e\x94\xcd\x23\xb0\xc9\x19\xda\x2f\x3f\xbe\x62\xf3\x20\x5f\xb7\x60\x0c\xa0\x54\xd5\xb7\xae\xc5\xaf\xd3\x2a\x7e\x2d\x9f\xd2\x71\xe4\x8a\xe8\xdc\x51\xce\x5f\xca\x61\x3e\xee\xc8\x9d\xe0\x41\xa5\x54\xde\x54\x7b\x91\x47\xf1\x21\x15\x1e\xfc\x39\x1d\x6f\x49\x09\x9d\x87\xc8\x3f\x4b\xa5\x9c\x10\x61\x3f\x2f\x11\x27\x2b\x2c\xfc\x69\x27\x2f\xb5\xba\x72\x85\x05\xe8\x7a\xe9\xeb\x41\x3c\x66\x1d\x95\xc1\x3b\xaa\x1e\x49\x3d\x5a\x1b\x18\x1b\xd6\xd6\xb8\xa3\xb4\x28\x61\xf0\x9f\x7f\xbe\x3c\xd9\x7c\xf4\xdd\xe9\x97\xed\xeb\xce\xcb\x8f\xaf\xd8\xef\xfd\x47\xff\x77\xfa\x65\x6b\xe7\xfa\x4a\x7d\xec\x6c\xf6\x76\xb6\xae\xbb\xff\x33\xe8\x17\xa0\x04\x55\x1b\xb8\xf1\x2e\xaf\x8c\x31\x20\x70\xfe\x3c\x6f\x6b\x45\x84\x89\x27\x98\x70\xfa\xf7\xa2\xed\x85\x5e\x82\x77\x83\xb7\x17\xee\x4a\xb2\x10\xa7\x07\x85\x1f\xf7\xec\x3c\x86\x38\xff\xfe\xbc\x6f\x6e\x38\xec\x09\x89\x92\x92\x81\x1b\xdc\xe7\x6e\x86\xee\x65\x23\x8d\x06\xbf\xbd\xd9\xc8\x6a\x93\x8b\x94\x6c\xa4\xf9\x72\xce\x00\x8f\x73\x71\x7c\x98\xa7\xe1\xa3\xef\xbe\x7d\xb4\xb5\xa9\xb2\xe1\x8c\x0b\xbd\x1b\xa7\x31\xe9\x1c\x7e\x38\x1a\x1c\xbe\x3c\x20\xec\xdc\x30\xdc\xde\xdc\xdc\xe9\xda\x3c\x19\x55\xeb\x9e\x42\x51\xae\x33\x70\x99\xd7\x70\xd8\xe2\x4c\xb8\xdd\x23\xdb\xcd\x6c\x55\x31\x53\x35\xb6\x14\x42\xa7\x7d\xf2\xcf\xf7\x2f\x7f\x72\x1c\x09\xaa\x02\xfe\xd1\x94\xd6\xe8\x4e\x2a\x82\xac\x1b\x9e\x26\x80\x0e\x78\x99\x73\x86\xfc\x6d\x8f\x3c\xee\x92\x21\x69\xb7\x1b\x8d\x7b\x1c\x47\xf0\x90\x4c\x75\x10\x94\x4f\x51\x62\x8f\x8f\x61\xe1\xa7\xfd\x7f\x1c\xfd\xf8\xaf\xa3\xf7\xff\x6b\xcf\x2a\xd4\x51\x32\xa7\x76\xfd\xde\xc9\xe5\x40\xb7\x1e\xfb\xd6\xd6\xea\x23\x17\xab\xc9\x7f\x2e\x71\x0f\x1e\xee\xd0\x9c\x0a\x9c\xe1\x05\x9e\x73\x08\xbe\x77\x12\x83\xf3\x39\x20\x33\x0e\x1d\xee\x80\x1f\xa3\x43\x6c\xe9\x51\x46\x9e\x3f\xd4\x29\xc5\x38\xa1\xf2\x33\x8a\x79\x9e\xd9\x7a\xd2\xed\x91\xed\x4d\xe5\xe2\xcc\x90\xf2\x24\x7a\xad\x41\xca\xc2\xcd\x16\x68\x89\x57\xaa\x43\xc8\xe2\x4a\x7d\xac\x57\x6c\x0d\xcd\xcf\xeb\xd3\xde\xce\xe3\x7b\x35\xfe\xbd\x1a\xff\x2f\xae\xc6\x17\x2a\xfc\xc5\xb8\xda\x7e\xef\x16\x16\x77\x2d\x1d\xa2\xb0\xb5\xbb\x52\x68\xb5\x1a\x3b\x3d\xae\x67\x5a\x8c\xbd\x96\x60\x8b\xa0\x98\xf5\x48\x42\x0d\xeb\xef\x4f\xa0\xb9\x70\x1e\x9e\xca\xab\x6a\x1c\xdc\x59\x7a\x2d\x10\xf6\x3a\x60\xe3\xc3\xfe\xe3\xa9\x3a\x6b\xac\x6e\x78\x81\x2b\x16\x32\xa1\xf3\x85\x41\x0f\x75\x79\xe5\x8a\xd2\x2a\xd6\x4f\x93\x4e\x1b\x46\xd5\xc6\xc1\x50\xbb\x86\xfd\x74\x9e\x32\x26\xc6\xdf\x12\x1e\xbe\x3b\x20\xfa\x5e\x99\xbf\x30\x6c\xf7\x08\x45\xac\xf7\x13\x67\x83\xe2\xc2\xbb\x63\x3b\xc3\xf4\xf6\x20\x09\x71\xfb\xa8\xf9\xd2\xca\xc8\x9a\x7a\x63\xf0\xfa\xf0\xc3\xc7\x97\x6f\x61\x05\x1d\x1c\xbd\x7d\xfb\xf2\xe0\xe3\xe1\xd1\x5b\xf2\xfe\xe5\x87\x77\x47\x6f\x3f\xbc\xfc\x50\xda\x6a\x18\x14\x01\x6e\x96\x7d\xe3\xcd\x69\xf0\x50\x98\x11\xce\x83\x8b\x71\x3a\x5f\xc4\xf4\x22\x2a\x2e\x87\xe4\x09\x50\x96\xd5\x43\xd0\x85\x2a\x3b\x04\x56\x95\xde\x6f\xba\x9e\xc8\x35\xc2\xe6\xe0\x8b\x19\xc8\x1a\x0e\x7e\xa1\x6d\x3b\x21\xba\xc3\x03\x7c\x03\x7f\x09\xc9\xf9\x2c\x1a\xcf\xc8\x3c\x28\xc6\x33\x21\xbe\xf2\x4d\x88\x31\xb4\xd0\x28\xe7\x89\x5a\x00\x4d\xfb\x23\x5d\xc3\x75\x94\xd3\x5b\xb0\x40\xf0\xc7\xa1\x8d\x26\x9d\x4f\x7e\x42\x3e\x81\xb7\x71\x51\x78\xea\xba\x43\x57\x85\xd9\x58\x05\xd8\xae\x03\x65\xc7\x0c\x2f\x8d\xa5\x0b\xd5\x88\xbe\xdb\x15\x5d\x3b\x58\x9c\x44\x19\x35\x3c\x02\xd8\xe8\x2a\x1b\x0f\x1b\x8a\xa7\xf5\x0a\x70\x1d\xd8\x17\x9b\xb6\xe8\xbf\x90\xc6\xb4\xa0\x55\x35\xd8\x83\xb1\x71\x83\x5f\x61\xff\xcc\x76\x2d\x20\x44\x41\x10\xbc\x3e\x50\xee\x70\x5b\xa9\x84\x3b\xcb\x21\x29\x77\xb5\x1c\x15\xfd\xb5\x35\x29\x0c\x9a\x24\xbc\x66\xab\x3d\xe0\x45\x26\x13\xfe\x34\xcf\x43\xe2\x91\x59\x18\x7b\x56\xc5\xab\xca\x66\x83\x3d\x4b\x5e\xfb\x07\xf7\x6c\xae\x1d\xf4\xca\x25\xfe\xe2\xe5\xa3\x83\x57\xc7\x6f\xff\xf7\xe5\x7b\x55\x4f\x48\xc7\xb3\x65\xf2\x99\x86\xe2\x55\x09\x7f\x31\x2a\xfe\xfa\x19\x5d\xc4\xc1\x98\x76\x06\xff\xbe\x3e\xf9\x77\xf2\xef\xec\xf4\xd9\xbf\xbf\x0c\xa6\xbd\xf6\xf5\xd5\xa3\x47\x57\x5f\xda\x5d\x70\x2d\xfc\xc5\x0b\xff\xef\x53\x59\xe2\x44\x94\x39\x65\x85\x4e\x64\xa9\xd3\x13\x7f\x39\xbb\x94\x51\xa8\xa4\x8c\x6e\x0b\xb5\xa4\x1a\x42\x65\xc4\x35\x1f\xcb\x6e\x4b\x4e\x6a\x60\xc0\x5d\xb3\x80\x78\xc4\x5f\x06\x03\xb8\x03\xa5\xc2\x1d\x06\x78\xda\x80\x0a\xd6\x1c\xd2\x67\x79\x07\x2c\xcb\x5c\xb9\xc2\xef\x8c\x05\x43\x36\x08\x7f\xff\x6a\x88\xea\xea\xce\xda\xe2\x64\xae\x53\x03\x9f\x2d\x18\xf4\x1d\x95\x12\xd6\x34\xdc\x98\x66\xcd\x5d\x7c\xba\x33\x7b\x76\x67\xc4\xd0\xc1\xe7\xae\xb2\xa0\x06\xd7\x77\xc9\x98\xc6\xe0\x50\x5f\x3e\xe2\x34\xca\x8c\x63\x1a\x64\xd2\x84\xcb\x6a\x45\x24\x5b\x0b\xda\x0f\x04\xbe\x1a\x0a\x59\x91\x6f\x8f\x33\xcb\xdb\x7b\x1d\xfe\xab\xb4\xab\x14\x38\xc3\xf0\xd7\x3d\xb2\xb5\xb9\xb9\x49\x1e\xf2\xcb\x19\xcf\x5d\xab\xd7\xf1\x03\xbc\xdb\x03\xec\x48\x7c\x31\x0e\x92\x53\x41\x2f\x3c\x16\x8b\x78\xd7\xb7\x3a\xaa\xdc\x19\xb3\x48\x04\xc2\xfd\x08\xcb\x4a\xa7\xc3\x9c\x45\xf0\x80\xf0\xa6\xdd\x9e\xa5\xad\xc7\xe0\xc2\xf9\x0f\xe3\x91\x3f\x89\x2d\x34\x08\xc3\x1c\x47\x0a\x17\x56\x0e\xae\x34\xc6\xd5\xc3\xbd\x35\xbe\xe1\xca\x83\x81\x38\x6b\x47\xdc\x5d\xbd\xe0\x7a\xb0\x1b\xcb\x5b\x21\x95\x7a\x18\xf2\x52\x41\x96\x45\x67\x14\x33\xdc\x20\x54\xb3\x27\xdb\xab\xe0\xb0\x1e\x68\xc3\x5b\xbd\xdf\xa6\x14\xc9\x14\xf2\xb5\x7a\x14\x92\xab\x2b\xf9\x75\xb2\x79\xaa\xb6\x4c\xb8\xc2\xe6\x7d\xd3\xd0\x22\xc1\x2c\xc1\x13\xb1\x44\xe7\xdd\xbc\xc8\x9e\xea\x4d\x95\xc4\xcb\x40\xfb\xaa\x61\x59\xb7\xdc\xd5\xe4\x3a\xc2\x2b\x95\x9c\xcf\xa8\xf4\x3b\x10\x72\xb1\x1c\x4e\x5f\xa0\x71\x67\xfb\x7b\x88\xd0\x2c\x88\xb8\x02\xb5\xae\x7d\xa7\x3a\xda\x4f\xd2\xac\xc3\xf0\xf2\x99\x5e\xf2\x93\xa2\x6f\x00\xa6\x13\x98\x8e\x1f\xa8\x3f\x0b\xf2\xa3\xf3\xe4\x1d\x04\x5a\x2a\x2e\x21\x76\xa1\xc5\x05\x4a\xd0\xf3\x99\x5e\x9e\x96\xdb\x76\xb6\xd3\x84\x1c\xbe\x3b\x68\xdb\x41\xfc\x85\x6c\x51\x51\xa7\x63\x66\xa1\x97\xc9\x01\xf6\x41\x28\xdc\x8d\x13\x74\xdc\x88\x72\x92\x17\x11\x0f\x46\x12\x85\x88\xa8\xb1\x59\x68\x29\xc2\xfd\x76\x9c\x9d\xf2\xd3\x92\x94\x03\xd8\xee\x91\x51\xd1\x8f\x1e\xa7\x02\xb3\x57\xd3\x34\xa1\x42\xf3\xd4\x59\xff\x64\x8b\xfd\xe7\x59\x54\x80\xbf\x14\x8b\x1b\x21\x10\xeb\x08\xf5\xc9\x3d\x43\x49\x17\x83\xeb\x65\xb5\x0b\x05\x92\x77\xe8\x55\x2f\x08\xd6\x30\xfd\x58\xf5\xd2\x0f\xe8\xe9\x0a\x31\x36\xd9\x5d\x83\x73\xaf\x80\x22\x89\xa6\x7a\x2c\x11\xcf\x11\xaa\xf6\xac\x29\x7b\x19\xa2\x67\xbf\xbe\x51\x55\x58\x3c\xdf\x4c\x6c\x50\x54\x8d\xa5\x06\x73\x28\xb5\xfb\x28\xb1\xfe\x7c\xfb\xa4\x65\x76\x27\xb4\x89\xd6\x19\xc5\x71\xc7\xf3\xaf\x74\x09\x56\xd6\xfa\xb5\x59\xab\xbd\x61\xb3\xdb\x8d\x76\x8b\xe4\xd8\x30\xbb\x8f\xed\xb4\x35\x1f\x84\x17\x5b\x69\x41\xf2\xe5\x62\x91\x66\x05\xe8\xd6\xf8\x4d\xed\xbb\x03\xa2\xb4\x2a\x6d\xc3\x11\x64\x39\x61\x36\x7e\xa9\x70\x93\xc5\x58\x4f\x65\x2b\x51\x98\xf7\x58\x0f\x34\x55\x69\x41\x8f\x1c\xea\xda\xbb\x69\xa9\xb7\x1b\x57\x8f\xab\x31\xe8\x38\x69\x2f\x79\xa5\x7d\x7d\xda\xdb\xf9\xe6\x5e\xa5\x7b\xaf\xd2\xfd\xaf\x50\xe9\x8a\x87\x15\xb7\x7a\x8e\xbd\x1f\x64\x69\x42\xfe\x77\x39\x0f\xce\xa2\x9c\x7c\x1f\xb0\xcf\xbf\x7d\xe6\x9f\xfd\x39\xf5\xaa\x7b\x07\x03\x72\x98\x44\x45\x14\xc4\xd1\xaf\x94\xfc\x9d\xf7\x82\x11\x6a\x40\x72\xb0\xc4\x92\x06\x37\x30\x50\xb6\x54\x0d\x27\xe7\x7d\xd0\xea\xca\x62\x32\x8a\x88\x08\x40\x75\x18\x0e\xc9\x66\xdd\xcd\x1b\xb7\xf6\x60\xc3\xb7\xdd\xea\x7a\xcd\x4c\xbc\xee\x74\xf5\x2b\x34\x19\xeb\x6a\x22\x11\x0a\x2d\x69\x83\x1e\x8f\x13\x5e\xfe\x3a\xa5\x87\x54\x3d\x13\x59\x8d\xcc\x92\xbe\x77\xbd\x6e\x88\xd0\x08\x58\x7b\x4e\xef\x07\x6b\x02\x3d\x25\xae\x78\x79\x5b\x3d\xd1\x98\xe1\x34\x95\x67\x75\xcb\x54\xcb\xb2\x49\xc7\x98\x47\x99\xed\xae\xb7\x51\xd4\xa9\x20\x3c\x63\x67\x54\x39\x3b\xe4\xf0\x05\xe4\xc8\xde\xa9\x49\xdb\xd8\x28\xf3\x33\xe4\x7f\xfd\xc3\xdf\x0a\x39\xd5\xe8\x6c\xf9\x3c\x48\x8c\x54\xa5\xcb\x77\x41\xfc\x7f\x76\x60\x92\x2f\x84\x9a\x1b\x5e\x48\x1c\xa8\xc3\xa3\x34\x20\xf2\x9b\xea\x28\x65\x5d\x5d\xe4\x33\xcf\xcb\x6c\xab\x01\xbf\x79\x86\x44\x83\xd5\x9e\x15\x10\x99\x27\x5a\x97\xa1\xdc\xa7\x0f\xd2\x39\x0b\xa0\x67\xaa\xed\x3e\x3d\xa3\xd9\x65\x47\x7a\x43\xfe\x10\x25\xd3\x98\xbe\xe1\x08\xef\x92\x21\xf1\x66\xe8\x9a\xc4\xb4\xaa\x8e\xf8\xc1\xc5\x04\xaa\x83\x96\x12\xde\x25\xdd\x20\x0b\x22\x99\xc6\x29\xd2\xb0\x2d\x12\x19\x72\x7e\xf6\xf6\xf6\x38\xd5\x60\x20\xe1\x76\x41\xc2\xb2\x33\x37\x03\xe3\xd7\xba\x6d\x5f\x75\x42\x86\xb5\x7c\x4a\x0e\x06\x3c\x34\x9f\x4a\x12\x5e\xd9\x31\x73\x91\xeb\xb1\x91\x3f\x79\xce\x88\x46\xf0\x1e\xad\x86\x1d\x3d\x67\x40\xe5\x2e\xbe\x45\xc7\x2d\xfe\xc2\xeb\xca\x39\x53\x15\x55\x49\x01\x27\xec\x82\xf2\x48\x2c\x8a\x8e\xe4\x3d\x5d\x32\x89\x68\x1c\x5a\xa6\x07\xa2\x15\xa3\xa7\x16\xcf\xc1\x1d\xb4\x18\x0f\xef\x9a\x45\x86\x32\xd9\x8a\xfa\x20\xc9\xc2\x75\x84\xe5\xb0\x37\x09\xdb\x97\xac\x4d\x7e\x0b\x16\x67\xea\xe1\x1d\x59\x51\xd4\x27\xe4\x44\x26\x06\x3e\xb9\x17\x03\xef\xc5\xc0\xbf\xb6\x18\xa8\xdf\xe7\xf1\x45\x73\x57\x2f\xf4\xee\xe6\xee\x9e\x81\xbc\x91\xea\xc6\x52\x63\x65\x38\x27\x8a\x48\x2d\xd2\x0a\x99\x7d\xa2\x53\xa4\x70\xb9\x26\x73\xd9\xa7\x71\x71\x0f\x3c\x4f\xe7\x6b\xc9\x60\x13\x81\x81\x4f\x7e\x1c\x94\x50\x1b\x42\xe3\x0c\x54\x82\x7b\x7a\xf6\x15\xb1\x72\x0c\xa5\x2b\x68\x0c\xde\x04\x49\x30\xa5\xfa\x75\x3e\x63\x59\x1c\x15\x86\x2a\x40\xba\xf0\xd0\xe0\x68\xbf\x9f\x1b\x18\x72\x2a\xce\xe6\x35\xf6\xef\x21\x65\x1c\x26\x4a\x4c\xff\x9e\x96\xf8\x37\x0a\x72\xee\x73\xa1\x2c\x12\xc5\x94\x82\x97\x4a\xcf\x26\x65\x7a\x9a\xb7\x1d\x8b\xca\x36\xcd\xf6\x80\xc4\x1c\x44\x88\x36\x4a\x63\x4d\x18\xee\x44\x51\xf8\x1c\x45\x1c\xca\x8e\x4f\xfa\x32\xcc\x99\x60\xa3\x52\xea\xdc\x1c\x73\x67\x9c\xfa\x92\x42\x84\xe6\x10\xdb\xae\x1a\x67\x9f\xbc\x61\xac\x3c\xa2\xb9\x08\x22\x0d\xf8\x70\xbc\x50\x1a\x9e\x3d\x1b\xe3\x4d\x0e\xea\xea\xed\x32\x8e\xb5\x63\x8c\x1e\x93\x22\xe9\x45\x04\xd7\x66\x3e\xdc\xfd\x31\xe3\x0f\xdd\x59\xd8\x1d\xb2\xf6\xb5\xe2\xee\x38\x98\x6c\x14\x6d\xc7\x0e\x70\xa2\x42\xc9\x98\x07\x31\x52\x13\x3e\xe6\xfd\xbb\x03\x11\x61\xa2\x3a\x76\x8c\x46\x9b\x70\xf5\xca\x09\x0f\x90\xae\x4e\x9c\x36\x9a\x38\xe8\x21\x83\x74\xb1\x64\x10\x9d\x4a\xf2\xa0\x03\xd5\x52\x89\x8d\x75\x0f\x77\x2d\xa1\x20\xdf\xe3\x46\x4f\x69\x4b\x86\x54\x4e\x17\x7b\x04\x82\x64\x57\x85\x90\x22\xcf\xf4\x6f\x4e\xdd\x50\xe4\x94\xb1\x03\xf4\x59\xe3\x59\xdf\xc1\x3a\xe7\xf7\x2a\x7a\x2d\xc6\xbc\x8b\x78\xee\x80\xb7\xfa\xac\x68\xba\x23\x2e\xc1\xbd\x27\x46\x8a\x19\x2c\x17\xa3\xd0\xde\xac\xc0\xd9\x0c\x1c\x7b\x9e\x79\x01\x54\x55\xde\xd8\x24\x02\x17\xbe\x90\x45\xf2\xfd\x94\xa4\xc3\x15\x22\x17\x05\x72\xdd\x36\x42\x42\xb3\x18\x44\xd8\x1d\xab\xd8\x47\x6c\x2f\xc9\x2b\x3b\x5f\x16\xf2\x04\x00\xa3\x65\x80\x01\x21\xcf\x08\x30\xa4\x8e\x29\x7e\x2d\x88\x54\x67\x80\x66\xa9\x44\x99\x51\xe5\x56\x19\xab\x38\x1c\x54\x49\x17\xb9\x1c\x9f\xa6\xb4\x35\xfa\x05\xa3\x8b\x65\xc8\xa1\x8d\x96\x51\x1c\x02\xc2\xc4\xa0\x58\xa6\xe3\xdf\x16\x18\xfe\xc7\xa3\x17\x47\xeb\xeb\xeb\x20\xde\xb7\x73\xb2\x9c\xc6\x97\x7d\x11\x45\x8c\x1d\x08\x96\x39\xdb\x13\x0b\xd5\x4a\x82\x5c\xca\xb2\xdf\xd2\xae\x46\xdd\x90\x30\xc6\x01\x19\xea\xbd\xf5\x96\x11\xe9\x69\xf4\xcb\x09\xcb\x3e\xd9\x3c\x3d\x65\x62\x17\xfe\xbc\xba\x52\x76\x9b\x36\x28\xff\xb1\x05\x65\xd8\x58\x76\xfd\x57\x45\x56\xed\x00\x49\x10\x17\x76\xd0\xab\x10\x55\x76\x8b\xaa\x2e\xd5\xb5\xd1\x29\x0f\x81\x92\xf8\x9f\x65\x11\xc7\xcf\xb7\x90\xdf\xf5\x69\x78\x15\x3f\xd0\xc4\x8a\x60\xe1\x0b\x55\x60\x9c\xd5\xa1\x2d\x53\xa2\xd4\x17\x53\xfa\x7e\xc6\x88\xc5\xa2\xcc\xeb\x3c\xa6\x79\x76\xc3\x1c\x5e\xb4\x83\x99\x99\x32\x8a\xb4\x0c\x68\xbc\xe1\x54\xcc\xee\x1a\xd5\x94\x0f\xc1\xbe\x86\x12\xa4\xc2\xb2\x9a\x7a\x7a\x96\x61\xae\x68\x52\xef\xce\x51\x72\xc8\x65\x46\xe1\x86\xf4\xfd\xbb\x03\xe5\x81\x89\x9b\xb2\x8c\x83\x44\x09\x9b\x51\x22\x94\x2e\x7e\x5f\x4f\x99\xeb\xeb\xb1\xdf\xef\x5f\xe3\xf8\x6e\xb6\x2f\x3d\xad\xc9\x94\x45\x3d\x9c\xb4\xce\xa7\x7d\xa9\xbb\xf9\x55\x88\x50\xd2\x80\xe9\x93\x1e\xcf\x5a\x19\xa2\x45\xc9\x12\xc5\xce\x1b\x69\x03\xd3\xf4\xfa\xef\xdb\x7b\xbd\xcf\xbd\xde\xe7\xaf\xad\xf7\x11\x4a\x9f\x70\x74\x8b\x9b\x3f\x9f\xde\x47\x69\x6b\xb0\xe2\x87\x33\x27\xa5\xd1\x79\xf1\xdc\xe0\x23\x6c\x18\xa6\xcb\x0f\x47\x53\x01\x23\xb5\x92\x77\x2b\x02\x85\xad\x69\x79\x29\xef\x78\x6c\xfa\xc5\x05\x17\xf9\x42\x2c\xe9\xca\x92\x83\x3a\xac\x66\xb4\xb3\x08\x20\x47\xed\xd2\xf1\x75\xd0\xd2\x37\xeb\x5d\xbe\x3c\x60\xd1\x62\x59\xa8\xc7\x6b\x09\x3d\x17\xd8\xec\xe8\xed\x92\x09\x1d\x43\xd2\x56\x70\x56\x1c\x8d\x21\x69\x87\xa3\x4f\xbe\x5c\x29\x26\xee\xa8\x3e\xa9\x46\xa7\xb4\x59\xa3\x0a\xce\xdb\xa8\x2f\x57\x36\xba\xed\x36\xba\x58\x16\xaf\xe8\x45\xfd\x30\x5f\xd1\x8b\xb2\x31\x9a\x59\xd5\x03\xac\x6f\x8b\x03\x95\x0d\xcd\xdf\x96\x35\x2e\xb1\x19\x9d\x68\x38\x39\x11\x3d\x8d\xe4\x9e\x18\x7a\x4f\x74\x0b\x80\x4f\x4b\x76\xae\x17\xcf\xf5\xae\xc5\x69\xa7\x35\xdc\x81\x2d\xea\xe9\xfd\x16\x75\xbf\x45\xfd\xb5\xb7\x28\x7d\x35\x41\x8b\xd9\x8d\xee\x25\x04\xf0\xdd\xbe\x4a\x2c\x89\xfe\xef\x0b\xff\xef\xbb\x04\xf1\xdf\x83\xd4\x6c\x9b\x0c\x44\x9a\x23\x5b\x40\x0b\x91\x2c\xc1\xc6\x65\xed\x8d\xd3\x64\x12\x4d\x25\x18\x0a\x85\x83\xa1\x65\x64\x15\x09\x76\x2e\x9e\xad\x19\x17\x34\x22\x51\xc2\xfc\xc8\x43\x81\x5b\xc8\x80\x44\x09\x72\x98\x7f\xb8\x4c\xc6\x7c\x8b\xc1\x50\x39\x4f\x95\x60\x8c\x15\x67\xd4\x06\x12\xa9\xaa\x2e\xee\xa0\x08\x43\x44\xa3\x20\x91\xd9\xdc\xeb\xa1\xd3\x1f\x99\xac\x84\x10\xf0\x99\xd6\xe4\xce\x40\xe9\xbc\xc5\x1b\x41\x50\x02\x6e\x9e\x76\xc9\x83\x07\x44\xfc\xee\x83\x4e\xf0\x68\xd2\x69\x6f\x5e\xb4\xb9\xeb\x92\xcd\x2e\x79\x46\x5a\xb4\x98\xb1\xdd\x03\x02\x93\x3e\xbf\x7c\x15\xe4\xb3\x16\x19\xda\xc9\x5c\xa3\xdb\xd2\x52\x02\x8a\xff\xf4\x63\x96\xce\x9f\xff\x06\x3d\x6d\x8b\x2e\xa1\xb0\x42\xcf\x2f\xa1\x61\xd6\xe9\xfd\x24\x3c\x64\xe5\x54\x34\x2f\x2f\x24\x1f\x87\x82\xd5\xe3\x59\x26\xe3\x98\xfe\x46\x03\x38\x66\x6d\xd5\x74\x1d\xc3\x94\x76\x5a\xce\x0f\x1a\xe7\x41\xba\x4c\x1a\x5d\x33\xdd\xc1\x38\xbc\x6d\x73\x12\xc2\x43\x29\x01\xe3\xa3\x72\xa6\xe0\x37\xec\xff\xb1\x6a\x10\x4d\x86\x33\x09\x18\xc0\xe8\xb3\xea\xde\xcb\x62\x76\xd7\x07\x84\xc6\x87\x83\x3b\x3a\x1b\x40\x00\xe0\xf2\xb3\x01\x57\x7d\x70\x2e\x1e\x51\x6f\x8f\x16\xb8\x33\x8b\x9a\x7e\x2c\x6e\xd0\x05\x74\xc7\xcd\xb9\x2b\xf7\x7f\x41\xb0\x87\xee\xc3\xe7\xfb\x6f\xad\x60\x64\x82\xa7\x72\xad\x0c\x7f\x40\x2b\x74\x33\xd7\x6b\x6b\xbc\x77\x7d\x6e\x19\xa5\xde\xd2\xbc\x2c\x66\x5a\x1b\xd4\x23\x6d\x1c\xba\xb9\xdd\x13\xc3\x9c\xd2\x62\x58\xa2\xf3\x94\xbe\x4a\xfb\xb8\xa0\x18\x49\x4f\xe8\xe9\x8c\xc2\x67\x41\x6c\x44\x19\xeb\x5b\x81\xb3\xcf\x82\xd8\x71\x46\xa2\xd2\xae\xd7\x00\x3d\x2b\x0d\x45\xf8\xf9\xbb\xc9\x60\x44\xd1\x9b\x0c\x47\x14\x6d\x38\xa0\x26\x67\x51\xc6\x5d\x82\x18\x2c\x37\x6b\x4f\x4e\x02\xd0\x3d\x3d\x49\x36\xe5\xe4\xab\x23\x14\xb2\xe6\x34\xae\xf0\x86\xe4\x44\x0b\x54\xfc\x7a\x4f\xb8\xd1\xfc\x51\xdf\xe6\xd9\x10\x38\xf2\x39\xe7\x27\x0a\x18\x85\x8e\xb4\xee\xb1\x86\xb8\x1a\x9e\xa7\x7c\xd6\x28\xa0\x92\x63\x73\x9a\x05\x53\xba\x5f\x34\x39\x39\x0b\xd0\x52\x1c\xf9\x20\xd4\xa1\xb6\x02\x4b\x7c\xdd\x71\x8e\x5d\xa4\x70\xb2\x5c\x05\x2d\xde\x81\x09\xe7\x8e\x35\x63\x62\x50\xa5\xc3\xb1\x32\x7f\xfb\xf9\xf6\x0e\x4c\xae\xfa\x3a\x7a\xe6\xec\xc8\x1a\x9a\x12\x19\x6f\x37\x2c\x5f\x6f\x7b\xce\x12\xd7\xf6\xaf\x6c\xf1\x92\xeb\xd5\xe8\x97\x35\x51\x4d\xbb\xb0\xff\xd6\x63\x02\xc0\x1c\x4c\x28\x89\xee\x6b\x60\x02\x91\xf2\x2d\x06\xdd\x5b\x2b\xa1\xec\xf9\x22\x8a\xf9\xe1\xad\x96\xbc\x05\x68\x05\x8d\xbb\x10\x12\x0f\x9b\xe5\xf4\x67\xcb\x6b\x0d\xe9\xd1\x2e\xe6\x74\xab\x4a\x64\x75\x3b\xb8\x75\xcb\x89\xaa\x9a\x1b\x39\x85\x2f\xe8\x38\x9a\x07\x71\x39\x2a\xb4\x14\xd8\x10\x09\xba\x40\x09\x51\xfe\x71\x07\x6c\x0a\x4f\x35\x83\x2d\x8f\x95\x5c\x72\x04\x03\xf9\xba\x72\xd0\xf5\x2b\x08\x55\x58\xcd\x3c\x3e\x7a\x4e\xa8\x2b\x8d\x49\x95\x72\x06\x57\x76\xf8\xfd\x23\x71\x9a\x9b\xe0\xe9\x3d\x1d\xd3\x68\xd1\x80\xcc\xdd\x32\x4d\x08\xc0\x05\xbd\x2d\x05\x88\x1a\x1b\x0f\xb0\xe1\x2a\xae\xe5\x62\x9e\xc1\xd9\x80\x4d\x28\x80\x8b\x45\x77\x24\x20\xd6\x2e\x6f\x76\x40\x7a\x1f\x9c\x37\x5f\xe2\x6e\x01\x3f\x22\x2a\xe1\x9a\x70\x36\x86\x07\x8f\x2c\xe4\x86\x96\xae\xeb\x6d\xa3\xae\xde\xbc\x9f\xf6\x4c\xf9\xd6\x98\x6f\x1c\xd1\x34\x59\x61\x1c\x26\x74\xc9\x38\x4a\x81\xbe\xf2\x38\x1a\x74\xbe\xbc\xc7\x77\x2e\x6b\x97\x10\x8e\x30\xee\xaa\xea\x28\x04\xfe\xf7\x76\xd4\xca\xb9\x49\x47\xd9\x5e\x70\x67\x27\x02\x33\x42\x7a\xd5\x98\x10\xa4\x7f\x68\x7e\x80\x9b\x50\x8c\x31\xc2\x5b\x71\xa5\x31\x97\x4f\x65\x5c\xf3\xba\x69\xe3\xd0\x7d\x19\xec\xbc\x64\x0a\xcd\x3a\x7d\x63\x2d\xed\xc8\xeb\xd7\xaf\x1b\xf6\x21\x2e\xa5\x20\x55\xd3\x4a\x2d\x7f\xa0\xd9\x82\xd6\x6e\x4f\x0a\x03\x1c\xba\x1a\x01\x0e\x4c\x45\x2f\xf2\xe5\x68\x1e\x15\x3f\xa7\x59\x9d\x94\xa4\x01\x4b\x56\xba\x2f\xbf\xfa\xea\xbb\x41\xab\x02\xaa\x74\x2b\x2e\x69\xcf\x3a\xe2\x38\xd7\xdf\x5a\xf1\xd3\xc3\x69\x4a\xd1\x61\xa4\x1e\xa4\xa1\x09\x06\x4b\xd8\x48\x01\xd9\xdf\x2a\x24\x0e\x60\x6e\x49\x5b\x7c\x70\x21\xf4\x51\xc2\xc8\x43\x05\xcb\xd2\x95\x60\x5a\x06\x20\x64\xa7\xb2\x6c\xab\x51\xd3\xa0\x17\x31\x12\x9d\xe8\x8a\x01\x28\xcf\xdc\xaf\xcc\x42\xa5\x25\x50\xf3\xe6\x8a\x76\x32\x5e\xbf\x7e\xed\x02\x73\xea\x47\x55\x2a\xc2\x34\x06\xcd\x12\xe0\x5b\x58\x38\xf0\x98\x6c\x4a\xd9\x5d\xe5\xa3\x59\xd3\x11\x23\x5d\xa5\x75\x35\x4d\x47\xd5\xc2\x8d\x92\x51\x90\x63\x45\x85\xe8\x00\x30\x4a\xb1\x6e\x05\x8c\x02\xb9\xee\xf6\x56\x68\x63\x1e\x25\xa6\x75\x8b\xd3\x82\x80\xb8\x61\xfd\xb3\x20\x9f\x65\x41\x51\x39\x86\x12\x98\x46\x3b\xc3\xea\x3d\x92\xb7\xb3\x15\x1d\xf2\x83\xd4\x9f\x33\xc4\x75\xb0\x79\xb8\x58\xbd\x87\xd3\x20\x7f\x97\x45\xe3\x4a\x9c\x95\xc0\xdc\x58\x09\xbc\x7a\x2f\x45\xd8\xa1\xbc\xaa\x97\x0a\xe6\x86\x6d\x8c\xd0\x15\x5a\x45\x33\xe5\x60\x5f\x89\x86\x64\x4c\x86\x7f\x70\x5b\x9b\xaa\xbe\xd9\xa0\xa8\x45\xcc\x42\x8c\x6b\x97\xfe\x58\xdb\x31\xa0\x2b\xcd\x51\x64\xbc\x58\x08\xc6\x45\x9a\x49\xf1\x47\x5a\x3e\x80\x19\x71\x8f\x30\x58\xc3\x96\x58\x40\xfb\x1a\x9b\x48\x4b\x07\xe7\x29\x6a\x0f\x3d\xb7\xe3\x50\x07\x19\x05\x4b\x25\x78\x3c\x76\x60\x99\xd3\xa3\x38\x45\xdc\x7a\x42\xd7\xc3\x50\x21\xc3\xcd\x5b\x37\x76\x3d\x69\x8e\xd1\xa7\xc5\xac\xd3\xed\xb9\x24\xfb\x3a\x9d\x22\xd9\xb8\x59\x97\x7c\x03\xd5\x86\x18\xd5\x2e\xf4\x25\xf6\x3b\xa2\x40\x7f\x1a\xa7\xa3\x20\xee\x33\xa4\xf6\x03\x37\x59\xc4\x3c\xf3\x35\x19\x8d\x83\xc5\xdb\x9b\x36\xcb\x0a\x3b\x8d\xf2\xc4\xaa\x26\x91\x55\x8a\x6e\xd0\x7e\xfa\x80\x63\x4a\xc9\x02\x1d\xff\xf4\xd4\x79\xa3\x7a\x59\xcc\xb4\x3d\x9f\x65\x98\xd3\x1a\x6e\x3d\xed\xb5\x1c\x03\x21\x61\xa0\xae\x2d\x73\x5a\xc3\xed\x6f\x20\x81\xcf\x69\x6b\xb8\xfd\x1d\xff\x54\xb4\xd0\x1a\xee\xf0\x22\xd1\x28\x48\x5a\xc3\x9d\x9d\x9e\x69\x3e\xf8\xff\xb1\xf7\xee\xfb\x6d\xdb\xcc\xa2\xe8\xdf\xc9\x53\xa0\xdd\xe7\x6b\xa4\x98\xb6\x75\xb7\xa3\xc4\x5d\xcb\x91\xed\xd8\x2b\x71\xec\x6d\x3b\x6d\xbf\xed\x9f\xeb\x1f\x25\x42\x16\x1b\x89\xd4\x22\x29\x5f\xd2\x78\xbf\xcf\x79\x8e\xf3\x62\xe7\x87\xc1\x85\xb8\x52\x94\x2f\x69\xda\x65\x7f\x6b\xa5\x22\x09\x0c\x06\xc0\x60\x30\x18\xcc\x05\x1e\xd9\x20\xfd\xd8\x6d\xb5\xe0\x99\x9b\x11\xfd\xd8\x6d\x51\xf0\x8c\xb3\xff\xd8\x6d\x51\xb4\xf8\x65\xef\x8f\xdd\x16\x69\x90\x1b\x01\xfd\xd8\x6d\x35\x6f\xcf\xbc\xe6\xab\x27\x7b\xc4\x27\x7b\xc4\x7f\xb6\x3d\xa2\xcb\x18\xf1\xde\x36\xf3\xe5\xcd\x04\x4b\xd8\x00\x42\xb9\x8f\x38\x7b\x4c\x13\x7b\x78\x3b\xdf\x64\x25\x37\xae\xbf\x8b\xcd\x4a\x09\x93\xfa\xd5\xd5\xd5\x3c\x26\x8d\x2d\xce\x0d\x4b\xd8\x48\x58\x3c\x80\xc3\xd9\x08\xf9\xd3\x50\xc2\xfd\x91\x0e\x24\x66\x32\x7a\x4d\xe0\x51\x33\xd6\xdf\x55\xb8\xc2\x38\xd1\x75\xe3\x46\x2b\xae\x42\x0b\x08\x7c\xb2\xf8\x65\x6c\x6a\x1f\x71\x66\xd9\xd4\xd4\xcd\x4b\xde\x5d\x6e\xcf\xbc\x56\xed\x69\xb7\x78\xda\x2d\xfe\xd9\xbb\xc5\x77\x6a\xbd\xfe\x70\x86\xe6\x25\xed\xe0\x73\x53\xce\x43\x9c\xa4\x71\xe4\x8f\x9f\xec\x39\x1f\xdb\x9e\xf3\xb6\x9c\x85\x5f\x84\xaf\x72\xb3\xc1\x22\xfd\x70\x5e\xd0\x54\x11\x4f\xd9\xac\x9e\x5b\x0b\xdd\xe3\x86\x32\x9c\x90\x8d\xe0\xc8\xbf\x7a\x8f\xe7\xdd\x5c\xc8\x45\x5f\x78\xcf\x9f\x3d\xd3\x71\x33\x0a\x14\x78\xa6\x95\xbf\x89\x33\xdb\x11\x1f\x24\xc3\xbd\x67\xcf\x4a\xde\x4d\x97\xbe\x82\xc3\x83\x23\x3c\x88\x2f\x69\x70\xa8\xa2\x3b\x2b\x5e\xce\x8a\xab\xfa\xb5\x60\x40\x66\xd1\x38\x1e\x7c\x2e\x47\x29\x4a\xd9\x02\x62\x71\x95\x2b\x63\xf0\x58\x6e\xdc\x9c\xa3\xf7\xc0\x37\xdf\xf9\xdc\xcf\xbd\xfe\x5e\xe4\x9a\xd3\x76\x6d\xec\xec\x52\xf9\xf9\x29\x37\x3b\xc5\x73\xb3\xc8\x5d\xa6\x3e\x37\x1a\xf2\x36\xc9\x9a\x35\x2c\x35\x22\x2d\xde\xfc\xad\x42\x41\xd2\xed\x09\xa7\x6a\xd7\x6d\x87\xf3\x52\x44\x02\x27\xcb\xbb\x8f\x77\x3e\xd8\x9c\xa3\x16\xce\xa7\x43\x2e\xec\x10\xcb\x4d\xb9\x9c\x6f\xb7\xb9\x70\x6e\x51\x11\x69\x5a\x21\x5d\x4e\xaf\x3f\xc9\xe9\x4f\x72\xfa\x3f\x5b\x4e\x67\x42\x7a\x3a\x72\x68\x75\xe6\x88\xdf\x38\xc1\xb3\x09\x01\xfd\xf3\x1c\x25\xd0\x20\x4e\xf0\x4a\x18\xab\x72\xfa\x5a\xe9\xc0\x09\x25\x1d\x2d\xe7\xf9\x6b\x42\xa1\xe3\xd1\xe8\xd1\xb5\x43\xdf\x8f\x3c\x4e\xb8\xe3\xf1\x48\xb9\xdd\xc0\x57\x2c\xe8\xf4\xce\xb7\xb8\xd0\x49\x47\xf3\x2f\x74\xd2\x11\x5c\xe8\x50\xc1\x65\x91\x7b\x9b\x22\x39\xdf\xbd\x39\x19\xe2\x81\xb4\x35\x5d\x5a\x6f\xea\x98\x88\x90\x8e\x46\xe7\xf6\x02\xaa\x51\x08\xb2\xe8\xb2\x8a\x1a\x0d\xa3\x61\xec\x6e\xd1\xf2\xf5\x7e\xcd\xa5\x38\xdb\xf7\xaf\x19\x11\x1c\x87\x5f\xf4\xcb\x61\xa9\xed\x79\x45\x55\xab\x9f\xbb\x20\x12\x46\x87\xf1\xaf\xc5\x08\xd8\x8a\xdc\xaf\xe1\x89\x9f\x7c\x3e\x49\x66\x69\x86\x83\x43\x6c\x5c\x06\x4b\xcd\x17\x17\xbc\x1f\x12\x11\x26\x32\xdd\xa1\x1f\x16\xb4\xef\x2c\x73\x3f\x0a\xf0\x83\xe0\x30\x09\x2f\xfd\x0c\xd3\x23\xa1\xa3\xf5\xa2\x62\xf7\xeb\x3b\x4d\xfa\x35\xb7\xfb\x45\xc5\xee\x87\xc0\xc8\x4f\xe7\xb6\xee\x2c\x73\xbf\xa6\x2f\x70\x46\x37\xf4\xc2\xb1\x2f\x28\x75\xff\xe6\x4b\xcc\x7d\x51\xb1\x7b\xd3\xfd\xf1\xcd\xa4\xb0\x71\x57\x91\x7b\x53\xfd\xbc\x86\x5d\x45\xee\x3b\xe4\x44\x8e\xcb\x30\x05\xbd\x93\xc4\x93\x43\x3f\x4d\xaf\xe2\x24\x28\x1a\xff\x92\x75\xee\xbd\x0e\xe6\x8d\x89\xab\xc8\xbd\xc9\x70\x5e\xc3\xae\x22\x0f\xc1\x7a\xe6\xb5\x5d\x50\xca\xde\xbc\x78\x58\x5d\x45\xe9\xac\x0f\x37\x6f\x34\xa5\xf1\x2c\xca\x9f\x27\x61\x9a\x86\xd1\xc5\xf3\xd2\xd8\x4e\xe3\x54\xbf\xba\x92\xb0\xb4\x7c\xb5\xe8\x29\x50\xb1\xde\x11\xcd\xbf\xe5\x3a\x1e\x8d\xa4\x04\x62\x9a\xed\x85\x72\x8a\xd6\x2c\x23\x5a\x8d\xa7\x33\xf4\xd3\x19\xfa\x9f\x7d\x86\xce\xef\xba\xfa\x5f\xbe\x68\x77\x5d\x9b\x63\x7c\x8d\xde\xe2\x04\x5f\xa4\x5f\xfc\xf4\x4b\x88\xde\xf8\x63\x7c\xfd\x9f\x49\x36\x4c\x57\x46\x33\xf5\x38\xdc\x61\xd1\x4c\x8f\xf0\x10\x27\x38\x1a\xe0\x2e\x22\xed\xa7\xdd\xd5\xd5\x8b\x30\x1b\xcd\xfa\x2b\x83\x78\xb2\xca\x4f\xdd\xab\x17\xf1\xb2\xf8\xdd\x1f\xc7\xfd\xd5\xf4\xca\x4f\x26\xab\x61\x94\xe1\x24\xf2\xc7\xab\xa4\x4b\xf8\x3a\xe3\xff\x5d\xb9\x88\xff\xd7\x87\x66\xf3\x91\xaf\xc6\xf2\xfb\xae\x63\x82\xcd\x3f\xfc\x70\x0d\x3f\xfe\x16\x97\x5d\xd4\xf2\x15\x67\x57\x71\xf2\xf9\x08\x43\xa8\xda\x22\x45\xb9\x5e\xdc\xd4\x96\xf7\xbf\x7c\x39\x2f\x28\x75\x1f\xdf\xbc\x9b\x68\xb0\x1d\xf9\xfd\x31\x9e\x87\xa5\x54\xd2\x8e\xa0\xbd\xc0\x7d\x70\xbb\xf2\xa7\x25\x71\xcb\x4b\x3a\x70\xb3\x16\xb8\x07\x6e\x41\x7c\x15\xb1\x28\xc4\x45\x88\xf1\x62\x76\xac\x2c\x5f\xcb\xbb\x9b\x3a\x10\x9b\x4d\x4b\xa0\x45\x0b\xd9\x91\x32\xbe\xdd\x1b\xa5\x04\x67\x49\x88\x2f\xe7\x45\x80\xe0\xc5\xec\x68\x59\xbe\xde\x87\xb4\x32\xb2\xdb\xcd\x21\x2a\x52\xc6\x41\x4e\xda\xa7\x7b\x0f\xd1\x05\x2e\xe1\xce\x6c\xc7\x45\xfd\x70\x8f\x31\xa1\xd9\x1b\xe6\xc4\x48\xb5\xe3\xa0\x7e\xb8\xf7\x68\xb0\x84\x2d\xc5\xc8\xd0\x42\x76\x7c\x8c\x6f\x1c\xa5\x56\x29\x94\x0a\x6e\x75\x0d\x15\xa7\xce\x96\xa5\xdb\xbf\x9c\x1f\x4a\x2f\x73\x46\x94\xbf\xe4\x7c\x40\xba\x71\x9c\xaa\xcf\x9c\xfa\x25\x40\x84\x04\xf3\xc7\x0b\x2c\x5d\x4c\x4e\x67\xd2\x83\x24\x8b\x3f\xea\x35\xe3\x28\xbc\x74\xfa\xc6\x90\x39\x81\xef\xce\x33\x64\x31\x6c\x8b\x52\x56\x81\x0d\xdf\x1d\xc7\x2b\xcb\xf9\x8a\x08\x4b\xb6\x60\xb7\xd6\x7b\xc9\xe6\xd3\x99\xea\xe9\x4c\xf5\xcf\x3e\x53\xb1\x03\x15\xbf\x20\xfa\xb6\x51\xda\xef\x62\x58\xcd\xbd\xa3\xfc\x69\xc8\x85\x71\x9a\xe2\x2f\x1b\x15\x59\xa0\xd1\xeb\xb2\xc2\xa8\x94\xbc\x74\x76\x33\x25\xf2\x01\x8b\x40\xf9\xfa\xb9\xc4\xc0\xc3\x6c\x30\xaa\x90\xef\x7a\x6e\x91\x81\x9f\x62\xf4\x82\x50\x7c\x9a\xbd\xe8\x2a\x9f\x60\xb2\x92\x8b\x74\x25\x1d\x85\xc3\xac\xa2\x25\x14\x41\x46\x72\xc0\x9a\x59\x80\xb1\x64\x70\x5f\x8b\xf0\x15\x8d\x40\x45\x2f\x64\x5f\x5b\xd0\x98\xe2\x28\x08\xa3\x8b\x47\xc7\xe3\x90\xb6\x23\xdb\x10\xd9\x90\x62\xa1\x03\x4d\x6c\x34\x70\x46\x65\x9a\x60\xe5\x56\x92\x0e\x44\xa9\xf9\x96\x84\x0c\x9a\x2e\x23\x28\xa4\x60\x91\x9d\x2c\x52\x75\x14\x46\x69\xe6\x8f\xc7\xa5\x5a\xd6\x4a\xdb\x7d\xdd\xdd\x85\x0a\xf0\xb8\xc0\xd9\x87\xf8\xa2\x44\x40\x31\x52\xca\xe9\x63\x4f\x5b\xd4\x8a\x14\xb4\x3a\x8d\xe7\xc6\xe1\x20\x45\xe6\xb4\xd7\x1b\xf9\xd1\x05\x2e\xd1\xa4\x4d\xf8\xa0\x20\x64\x93\x2c\x65\xf4\x14\x41\x88\x74\x4c\x6a\x24\x1e\x8f\x65\x79\x60\x61\x7e\x93\x8e\x46\x2b\xc0\x1a\x0d\x76\x93\x8e\x4c\x76\xe3\x16\x9f\xe6\xdc\xd2\x18\x64\x80\x8c\x5b\x1a\xc5\x92\xe0\x41\xd5\xf4\x6e\x62\x44\x36\x4d\xfd\xe3\x21\x62\x92\x2e\x32\xae\x29\x68\xb3\x0c\x07\xbd\xe8\xfd\x9a\xd7\xc8\xf8\x01\xda\x96\x49\xcf\x90\x44\x29\x0e\x38\x1b\x75\xc9\x3f\x14\x58\x3a\x1a\x75\xc9\x3f\x54\x78\xb5\x25\x64\x68\xb5\x9e\x44\xd2\x27\x91\xf4\x1f\x2e\x92\xe6\x7a\x7e\xee\x63\xfd\x40\xd9\xa2\xa9\x7f\xf8\x11\xbe\x20\xf3\xec\x27\x9b\xfd\xd0\x91\x97\x20\x5d\x7d\xa7\x16\x85\x44\xff\x5c\x3d\x1f\x0e\xfc\xa9\x0c\xc4\x05\x63\xaf\xb7\x79\x68\x42\x90\x30\x61\x8e\xe8\xcc\x7a\x19\x6d\xa0\x17\xb5\xeb\x41\x27\x78\x15\x34\x06\x41\xab\xf5\xca\x5f\x6b\xb7\x06\xad\x57\xad\x46\xa7\x85\xeb\xeb\xb5\x57\x83\x76\x0d\x37\x5b\x41\xa7\xd5\xee\x34\xfa\x2f\x72\x5c\x6c\x60\xfc\xba\x5f\xaf\xd7\xfb\x83\xda\x5a\x6b\xf0\x6a\x30\xf4\xd7\xd6\xeb\xc3\xda\xa0\xb9\x8e\x3b\xcd\x7e\xd0\xae\x0f\x5e\xd5\xfb\xeb\xfe\xb0\x56\x7b\xe1\xe6\x4d\x14\xc7\xae\x24\xe9\xfa\xfd\xb0\x6b\x19\xc4\x9c\x13\x32\x37\xf8\xae\xb5\x7f\x74\xa7\xa7\x85\x09\xda\x06\x64\x7d\x5c\x2d\x70\xcd\xee\x52\xa8\x0a\xc7\x2c\x9e\xc5\x1f\xbb\x75\xef\xc7\x39\xf3\xf4\x63\xb7\x41\x98\x6d\xfb\x89\xd9\x3e\x31\xdb\x7f\x36\xb3\xcd\x79\x2d\x57\x7e\x69\xcc\xb6\xc8\x30\x79\x98\xc4\x5f\xf0\xc4\x8f\x56\x02\xfc\xf3\xb7\x4a\xe7\xaf\x5f\x90\xde\x27\x5d\x3f\x55\x24\x4a\xdf\xe9\x0b\x25\x23\x81\x56\x22\xd5\x4b\xdc\x25\xf7\xfe\xe2\x19\xfe\x0b\xb2\xf5\xf3\xb1\x78\xfc\x7c\xfd\x65\x73\x7c\xdf\x3b\xc5\xb7\xa5\x4b\x05\x49\xbe\x6d\xa1\x5b\xb4\x11\xfe\x0f\xdb\x5b\x5a\x17\xd2\x6d\x7f\x27\xe9\xb5\x9d\xfd\x7e\xa0\x04\xdb\x3f\x6c\x50\xc2\xd1\x5e\x91\x0d\x65\x18\x46\x38\xb8\x4f\x06\x6e\x9e\x39\x36\x8b\x11\x4b\x5b\x9d\xa7\xb3\x86\x74\xdc\xe2\xb0\x2d\x12\xb2\xae\xa0\x7d\xb2\xb1\x85\x38\x65\x94\x04\xc3\xa4\x8d\xa5\x96\x03\x7b\x6e\x36\x7d\x3e\xae\x3b\x6c\xa4\xbe\x7e\x9c\x8d\xc7\xb7\x92\xb1\x7b\x38\x44\xf8\x3a\x4c\xa1\xb8\x75\xc8\xb5\x16\x0b\x73\xca\xf3\x0c\x36\xbc\x35\x9a\xc3\x46\xce\xbb\xbf\x8c\xea\x67\x55\x47\x9a\xf9\x95\x69\x3c\xad\x54\x21\x6f\x35\xbb\xf7\x22\xfc\x1f\xd6\x13\x8c\xd6\x0f\xb2\x70\xa3\x0e\x37\xb5\x6f\xc8\x31\xcb\x62\x3b\x29\xaa\x76\x10\x2e\x62\x64\xaf\x78\x2f\x9c\xd4\x58\x3e\x7d\x37\xd4\x11\xa2\x24\xe2\x09\x4a\xf2\x74\xde\xef\x70\x56\x91\x4e\xe7\x38\x9a\x4d\x70\xe2\xf7\xc7\xb8\x8b\xb2\x64\x86\x4d\xd5\x9f\x3f\xc1\x69\x61\xaa\x6e\x29\x9f\x37\x14\x06\xe5\x2d\x92\x72\x78\xa7\x73\x92\x78\xa7\x5a\x16\xef\xd4\x91\xc6\x5b\x2f\xf2\x5a\xd1\x88\x89\xe6\xeb\x3c\x7b\x3f\xed\x84\x3d\xbb\x4b\xdc\xff\xc3\x83\xf2\x1e\x1d\x32\xd6\x17\x02\xdf\x4f\x6f\xa2\xc1\x3b\xd8\x6f\x88\xc8\x0b\x5d\xa8\x9e\x29\x39\xd1\x37\x59\x91\x8a\xe4\xa6\xa1\x55\x53\x26\x09\x40\xa8\x2c\x03\x6e\x97\xd1\x12\xe0\xb0\x32\x18\xf9\xc9\x66\x56\xa9\x55\x57\xb2\xf8\xd3\x74\x8a\x93\x9e\x9f\xe2\x4a\x95\x7f\x86\xac\xca\x95\x7a\xd5\xb9\xf1\xf0\x99\x75\x67\x1e\xcd\x37\xee\x3c\x8d\x2a\x8f\x88\xc6\x6b\x5c\x90\x0e\x99\x2b\x46\x08\x28\x4a\x82\x6d\xf1\xd6\x96\x62\x5b\x55\xf4\xf0\xcc\xf6\xa2\x0a\xdd\xee\x25\x8d\x4d\x9e\x8a\xbb\xa8\x83\x7c\xd4\x17\xeb\x65\x7e\xd7\xef\x0e\x02\x86\x72\x33\x27\x6b\x87\x68\xda\xf3\x05\x7b\x55\x32\x3d\xba\x9a\x12\xdd\x3e\xd8\x66\x52\xf4\x5b\x35\x7b\xf9\x05\xce\x16\x4c\x5e\x7e\x81\x5d\xdb\xc9\xf7\x9d\xbb\xdc\x42\x1c\xe5\xb3\x97\xeb\x66\x73\x5d\x59\x1e\x35\x95\xe4\xa7\x67\xaa\x7a\x9d\x4c\x13\xab\xa2\x6d\x56\x25\x13\xa1\xcb\x53\xf6\x58\xe9\xd0\xf9\x00\x49\x07\x73\x2d\x66\x0f\x39\x62\x77\x9e\x8e\xd8\x4f\x47\xec\x7f\xf6\x11\x5b\xd2\x67\x32\x0e\x31\x61\x2c\x5d\x3d\x69\xff\x17\x1e\x0e\x13\x7c\x83\x7e\x0d\xc7\x83\xcf\x18\xbd\xf9\x03\x0f\x87\xae\x68\x3d\x0b\x85\xf6\xd9\xf7\x13\x72\x84\x3f\xf0\xa3\x01\xf6\xa1\xac\x2d\xa8\xcf\x1d\xe2\x00\xb1\x2a\xef\xfc\x4b\xf4\x6b\x1c\x07\xe8\xcd\x85\xf3\x90\xdf\xca\x0f\xf9\xff\xc5\xb8\xa9\xe2\x3c\xcc\x58\x6c\x51\x4a\x5b\x4b\xa0\x3a\x3d\x0b\xad\x2d\x05\x2d\x4e\x92\x58\x0b\x1e\xb4\x4a\xdf\x51\x1b\x04\xba\xed\xec\x65\x2f\x52\xb2\x31\x4e\xe3\x28\x0d\xfb\x63\x4a\x60\x53\x1f\x9c\x48\xd0\x84\xdd\xf9\x90\xbd\x68\x9a\xc4\x97\x61\x80\x93\x54\xd4\xf2\xc7\x69\x6c\x56\x8d\xc7\x63\x52\x95\x50\x1b\xb7\x1e\x47\x51\x1c\xd0\xaf\x61\x34\x88\x27\x32\x64\x02\x8c\xe5\x14\xa0\x57\xae\x59\x38\xc1\x64\xb1\x85\x29\xaa\xa3\x14\x0f\xe2\x28\x80\xdd\x31\x8c\x2e\xc6\x38\x8b\x23\x18\x4e\xd2\xbd\x82\x83\x3e\x47\x55\x39\xee\xf3\x97\x68\x43\x74\x45\xd2\x33\x90\xb6\x41\x03\x7c\x2b\xbd\xe4\xb8\xc8\x5a\x07\xe7\xe1\x8f\x48\x28\xa3\x24\x8e\xe2\x59\x3a\xbe\x81\x30\x18\x8e\x7d\x98\x7c\xb2\x9c\x47\x50\xe0\x67\xbe\xf3\x84\xac\xf6\x56\x51\x79\x44\x81\xd2\x79\x02\x46\x3e\xa9\xfd\xa0\xf4\x5e\x49\x6e\x18\x47\x69\x4c\xb6\x2e\x42\x14\x15\x4a\x1a\x2b\x7b\xd1\xa5\x3f\x0e\x83\x43\x56\xbe\x22\xcb\x3c\xdc\x0b\x1b\x06\x43\x92\xf0\xd5\x3d\x9e\x91\xf9\x4a\x16\x1f\xd2\x77\x80\xd2\x0a\xed\xbd\x07\xdd\x64\xc6\x16\xd2\xf9\x85\x9d\xca\x37\xd4\xb9\xa2\xc2\x2c\x03\xcd\xaf\xca\xa1\x53\xbc\x91\x30\xfd\x85\xa0\x7b\x44\xa9\x10\x0b\x41\x4d\xea\x66\x36\x4a\xe2\x2b\xa4\x76\x4f\x2f\xaf\x74\x87\x75\x93\x7e\x5a\x29\x75\xf2\xf7\x17\x9a\x7d\x90\x66\x0b\x49\x40\x3f\x97\x0a\xe9\x67\x3e\x31\x00\x70\x83\x22\xa4\xe0\xb9\xa5\x68\x83\xa7\xce\x94\x64\xe3\x22\xea\x78\x18\x42\x30\xe7\x9e\xca\xfd\x0c\x64\x09\x79\x9e\x74\x0a\x27\x89\x2e\xe2\x5b\x7a\x53\xd5\xcd\x6d\xc8\x9f\x02\x67\x11\x1a\x9b\x3f\x64\x46\x6d\xb9\x7d\x43\xc8\x65\xd9\x5e\x15\x12\xd4\x83\x73\xba\x8f\x0d\x36\x6a\x2c\x3a\x19\x90\x02\x6f\xc9\x77\x8b\x92\x89\xd6\x7b\x08\xc2\x84\x16\xbe\x33\xc2\x04\x9c\x64\xea\xe4\x4c\xe6\x6e\xa4\x98\x3e\x00\x2d\xaa\x34\xc8\xf5\x6c\x30\x1b\x15\xde\xca\xbd\x48\x2f\x9d\x47\x7b\x4a\x87\x04\xd1\xa1\x39\xdb\x1f\xce\xc4\xbe\x4a\xa4\x4d\x7e\x26\x64\x22\x9f\x41\x71\x19\x9f\x2a\xbb\x6a\xae\x90\x96\x44\x5d\x75\xd7\x77\x6e\xf7\xf3\x76\xee\x8c\x1c\xa9\x98\xe0\xa2\x23\x4a\xbe\x1d\x8a\x4f\x73\x39\x36\x8d\xfd\x7f\x0b\xd0\xf6\x82\xb9\x4b\xc6\xf2\x55\x98\x25\x71\x4c\xb2\x38\x88\xd1\x60\x8c\xfd\x68\x36\x45\x11\xc0\x27\x03\x2c\x8e\xed\x45\x43\x25\x61\x6f\x59\x79\x14\x49\x39\x20\x8a\x68\x5c\x1d\x4b\x22\x1c\x9d\xd2\xd2\x67\x44\x48\x22\xd5\xbb\x88\x02\x09\x83\xae\x01\xa8\x6b\x03\xd9\xcd\x7f\xde\xf2\x9c\xd8\xab\xab\xfa\xe8\x2b\x0c\x80\x09\x60\xea\x6e\xce\x10\xaa\x88\x15\x3e\x67\x72\xe3\xa9\x10\x4a\x89\x08\xca\xcc\x68\xe1\x74\x73\x11\x92\x23\x5d\xa8\xeb\x8e\x49\x1d\xcb\x9c\x1b\x73\x5b\x38\xf2\x02\x84\x4a\xa4\x50\x97\x77\x88\x5a\x96\x59\x06\xf9\xb5\x34\x3c\x39\xfe\x6c\x74\x2a\x4c\xa3\xfa\x19\xdf\xa4\x95\xbc\x6e\x95\x6b\x79\x21\x59\x3c\xfa\xe9\x27\xe4\x1a\x43\x42\x4c\xc9\x09\x7d\x5f\x51\x0a\xbd\x56\xc7\x59\x17\x80\x0b\xc6\x3b\xdf\x7d\x12\x4c\x78\x01\x91\xff\xf9\xb0\x4f\xf0\x60\xe4\x47\x61\x3a\xe1\xc7\xd0\x62\xe6\x00\x00\x8a\x87\x97\xb6\x21\x0f\xec\x67\x8c\xa7\x22\x7f\x00\xef\xec\xea\xcb\x3f\xd2\x51\x18\x91\x86\xae\x07\xf1\x64\x3a\xc6\xd7\x61\x76\xd3\x6d\xc3\x91\x8c\x14\x20\x04\x51\x21\x9b\xc3\x67\x7c\x43\x35\x05\x62\x34\xa5\xf1\x5a\x5d\x45\x09\x9e\xc4\x97\x18\xf9\xe3\x31\xf4\x2a\xf5\x10\xbe\x1e\xe0\x69\x06\x62\x3f\x7b\x25\x97\xcf\x46\xf8\x06\x45\x98\x8e\x48\x1f\xb3\xfa\x01\xe9\xf1\xcc\x1f\x8f\x6f\x50\xff\x06\x86\x8c\x0c\x0f\x4b\x05\x00\x34\xf3\x2b\xd9\x90\xc2\xe8\xa2\x52\x95\xf6\x81\xca\x0f\x4a\xef\xd0\xd7\xaf\x04\xdf\x95\x30\x0a\xf0\xf5\xc1\xb0\x02\x6e\x8a\x84\xd8\xce\x5f\x54\x61\xf2\x97\xeb\xfa\x06\x21\x51\xd8\x67\x7c\x73\xb6\x22\x56\xa2\x6e\x0e\x6d\x52\x24\x29\x6f\x98\x26\xff\x8d\xc9\x13\x4e\x99\x64\xde\x07\xd4\x36\x17\xc5\x51\x19\x9e\x40\x4d\x6a\x8b\x68\x92\x59\x0c\x9b\x2a\x50\x07\x15\xa2\x0e\x01\x67\xe9\x4c\x8a\x33\xa5\xf7\x04\xb0\xa4\x8a\xf4\xd0\x60\x65\xfb\x64\xf7\xfc\xf0\xe0\xc3\x87\xbd\x8f\xef\xce\x4f\xf6\xf6\xb7\x0f\x3e\x9d\xc8\xc7\xa3\x32\x33\x60\x0a\x55\x8a\xc4\xf4\x28\x47\x47\x53\x26\x23\x78\x6d\xf9\x99\x8f\x36\xd0\xe9\xd9\x6b\xf5\xfd\x1e\xb8\x1b\xf3\xd7\xe5\x96\xaa\x00\xb8\x32\x9d\xa5\xa3\x8a\x4e\xf7\x4c\xc4\x53\x4a\xef\x05\x29\x2d\xfc\x19\xdf\x54\x8d\x31\xc8\x01\x2e\x30\x78\xa5\xc4\x4d\x01\x99\x35\xca\x97\xd4\xc4\x9f\x2a\x4c\x32\x04\xb2\x05\x86\x02\x24\x46\x48\x53\x1d\xa6\x7d\x7f\x2a\xa9\x2e\x24\xbd\xb6\xea\x29\x4e\x05\x57\xe0\x1a\xd5\x3f\xf5\x31\xd8\xf7\xa7\xa7\x50\x2d\x84\x2d\x9e\x8f\xcc\x29\x14\x3f\x93\x3c\xd2\x45\xe3\x8a\xdf\x3c\x5a\x58\x66\x8e\x55\xa9\x59\x09\x6f\x72\x72\xb0\x75\xd0\xe5\x44\x86\xc6\xf1\xc5\x7f\xe8\x52\x75\xec\x90\xab\xef\x2b\x49\x97\x50\x16\xa4\xd6\xa3\x23\xfb\xb6\x32\xf1\xa7\x15\x97\xb1\x02\xff\x03\xfb\xc5\x20\x1f\x65\x32\xf6\xec\xa8\x17\x06\xb2\xe3\x8d\xa0\x88\xcf\x18\xa5\xb3\x04\xf4\xc4\x9c\x59\x85\x29\x4a\xb3\x90\xd0\x03\xe5\xe4\x38\x40\xfe\x10\x1c\x84\x92\x24\xbc\xf4\xc7\xda\x5e\xab\xc0\x24\x03\x02\x6e\xff\x74\x69\x84\xc1\x99\x8e\x62\xde\xa5\x95\x41\x6e\x0f\xa0\xd6\x11\x5f\x9c\x0e\x33\x5c\x77\x22\x7f\xba\x45\x78\xcc\xf4\xcc\x96\x1a\x43\x7f\x9c\x62\xf9\x96\x8d\xb9\x3d\xcd\x1d\x53\x91\xce\x9f\xb5\x89\xee\x00\x83\xcc\x0b\xcc\xb8\xb4\x68\x1d\x87\xff\xd7\xc6\x78\xfe\x00\x35\x4b\x8c\x63\x79\xc5\x00\x52\x28\x4c\xea\x25\x54\x54\x47\x49\x5b\xec\xee\x61\x52\x71\x71\xeb\x19\x90\x7c\xc9\xe9\xca\xb8\x74\xa4\x07\xd5\x50\x6f\xbc\xb4\xd4\x4b\x66\xea\x0a\xa6\x90\xfe\xb1\xdb\x80\xd0\x3e\x4c\x19\xfe\x63\xb7\x09\x6e\xa8\x6b\x65\xee\xc8\x58\xcc\x4d\x9c\x65\x61\x74\x61\xf7\xec\x05\xc6\x14\x48\x99\x6b\xd1\x86\xf0\x59\x7b\x6d\x94\xc8\x23\x3d\x0b\xfb\x20\x57\xd0\x22\xd6\x28\xeb\x37\x41\x79\xfd\xe9\x5a\xef\xe9\x5a\xef\x1f\x7e\xad\xc7\x22\xfa\xb2\x53\xcb\x5d\xa2\xfa\xce\x33\x87\x75\xe4\xbe\xd0\x52\x5f\x2c\x62\x38\xcb\x97\x74\x9d\x1d\x0e\x36\x83\x20\x85\xa1\x13\xbb\x9b\x1f\x81\x5a\x2a\x45\x33\x2a\x7e\x31\xa7\x37\x8f\x08\x5f\x61\x06\x91\xf2\x10\x24\x05\xa0\x9b\x2a\xdd\xed\x9f\x3f\x97\xcf\x07\xec\x7c\xf6\x5c\x57\x12\x91\x6d\xf3\x39\xbb\xb6\x92\xca\x49\xbc\x8a\xc6\xe9\xe1\xae\x74\xa4\x5c\x1c\x31\x87\x2b\x85\xa3\x31\xb9\x89\x8c\xbd\x45\xd5\xe8\x12\x8a\xe8\xbe\xcd\x7b\x9a\x5a\x36\x0b\x9b\x3d\x0e\xff\x53\xf7\x2d\x7d\x7b\x72\xe9\x2e\x85\x85\x20\x0f\x44\x04\x28\xff\xf4\x13\xe0\x4e\x15\x53\x61\x74\x01\xdc\xb8\xaa\x40\xe4\xd7\x17\xf3\x52\x9a\x52\x88\xb2\x97\xf2\x5d\x3b\x29\xa4\xa1\xb1\x9f\x42\x33\xc7\x19\x99\xec\x1f\x36\x36\x8c\x81\xe6\x7f\xc6\x8b\xd5\x55\x9a\xb9\x5d\x21\x29\x58\x6a\x59\x32\x23\x32\x5b\x92\x66\x28\x8d\xa9\x9d\xe3\x74\x0a\xac\x1b\xce\xce\x7e\x74\x93\x91\x03\xbf\x87\xfa\x78\x48\x18\x00\x5d\xe2\xfc\x0a\x15\x46\x83\x2a\x19\xb5\xbf\x70\x58\xf9\xc1\x82\xf5\x4f\x3f\x21\xdb\xc8\x57\x8d\xfa\xc8\xbc\x6e\x20\xa8\x5a\xdc\xa3\x9d\x9d\x8d\x29\xdf\x8c\xf0\x75\x86\x7a\x87\x9f\xd0\xe0\x66\x30\xc6\x9e\xe8\x26\x0c\xbb\xd8\x6c\xa0\x27\xd0\x65\x66\xb3\x34\x4d\xe2\x01\xe1\x59\x29\x1d\x1d\xa3\x15\xe9\x18\x2c\x96\x89\x6d\x2e\x2c\x1d\x61\xa4\xa1\x97\xba\xf5\x50\xad\x4c\xff\x2c\xc3\x4a\x49\xc1\x25\x9a\x49\xc6\x60\xcf\x05\x00\xdd\x8c\x4d\xd2\xc5\x56\x4c\x3b\x28\x47\xba\x5f\xdd\x12\xea\xd6\xcb\x85\xf0\xbd\xc0\xcb\xd9\x04\x7b\x2f\xeb\x90\xa8\xce\x00\x38\x0b\x59\x27\xdc\x4e\x72\xcf\x9a\x96\xd3\x99\x6b\xb3\xd8\x64\x5e\x93\xff\x90\xac\x6b\xda\x23\x72\xb4\xa4\x9c\x5a\xa2\x5c\x78\x69\x49\x2a\x27\xd6\xab\x74\xd2\x87\x0f\x7e\x10\x08\xdb\x2e\x29\xef\xa7\xf8\xae\x4f\x8f\x74\x70\x90\x58\x2c\x37\xde\x82\xf7\x92\xad\x38\x15\xe8\xc4\x48\xc8\x96\xbe\x79\xbb\x85\x16\x8b\xe1\x30\x7f\xa5\x6a\xa5\x72\x16\x04\x5a\x05\x0d\xf9\x52\x48\xc8\xb3\xe8\x96\x68\x0d\x02\x13\x2a\xe7\x8a\x34\x07\xd5\x82\xd1\xb6\x4a\xb5\x02\x21\xb7\x01\x1b\x91\xd5\xd5\x6c\x17\x44\xf6\x7d\xca\x51\xfa\x24\xfb\xfe\xd3\x65\xdf\xdc\xa4\x8d\x27\xec\x7d\x28\x1f\xdd\xbd\xbe\x1f\xa9\xd2\x6e\xd8\xf7\x85\xeb\x2d\xbe\xa6\xea\xea\x22\xd7\xdd\xe3\x89\x9f\x64\xdb\xac\x60\xee\x76\xeb\xbc\x1a\x03\xb5\x12\x34\xcb\xfb\xa2\xe9\xbc\xa5\xd7\xe2\x12\xec\x38\x4b\xc2\xe8\xe2\x16\x5c\x5b\x6c\xef\x89\xb4\xdc\xf7\x23\xf9\xd3\x2f\xfe\x78\x86\x6f\xd1\x25\xf9\x0f\xbb\x0e\x21\x90\x87\x38\xc1\x73\x6e\x48\x3d\xd5\xbc\x00\x82\xd4\x30\x9c\x54\xb1\x38\x1b\x79\x80\x11\x91\xd6\x3d\xda\x92\xb9\x85\x81\xda\x8d\x8e\x32\x64\x9b\xee\xfb\x51\x25\x8b\xab\x4c\x55\x04\x3a\x1c\xf2\x99\xab\x7c\x2a\x16\x2b\x22\x52\x0f\xd2\x44\x54\x5e\x84\x54\x7d\x43\x21\x32\x3f\xdd\x17\xa6\xfe\x98\x41\xdc\x0a\x13\x22\x8b\xd9\x1c\x62\x78\x8f\x4e\x62\xe6\xd9\x2b\x77\x07\xaa\x33\xe8\x95\xaa\xd9\x35\xde\x9e\x90\x63\xa0\x1b\x36\x49\x17\x5c\x24\x84\xa7\x34\xce\x46\x72\x4a\xf0\x4a\x15\x1a\x61\xd8\x46\x69\x16\x66\x33\x2a\x70\x99\xe6\x5f\x01\x9e\xc6\x69\x98\xc9\x58\x32\xb8\x02\x3d\x00\x33\x18\x87\x38\xca\x74\x4b\x8c\xd2\x0d\x1b\x26\x16\x3c\xd5\xb8\x39\x82\x8b\x62\x64\x8e\x1f\x57\xc1\x17\x5e\x25\x0b\xd2\x1b\xce\xa2\x00\x6c\x22\x07\x38\xc9\xfc\x50\x4c\xbf\x63\xf9\x88\x89\x5d\x6c\x1d\x3d\xfa\x12\x12\x78\xdd\x61\x2d\xb1\x91\x27\xb3\xa9\x65\xfc\x92\x64\x5b\xe1\xbd\x9e\xc5\xb9\x44\x4b\x40\x77\x69\x03\x12\x6d\x8e\x67\xb8\x4b\xff\xc3\xc5\x5c\x2d\xd9\xbb\x73\x56\xd8\xe4\xe7\x93\x02\x71\xed\xc3\x01\xe2\x9c\x10\x71\x0e\x89\x2a\x93\x59\x9a\xc1\x56\x87\x27\x38\xca\x04\xdd\xf4\x6f\x32\x9c\x36\x1b\x55\x26\x8c\xff\x50\xd5\x26\x92\x95\x7b\xf0\xe9\x23\x93\xa0\x4d\x20\xaf\x4f\x49\x15\xcd\xa2\xf0\xbf\x67\x18\x85\x01\x8e\xb2\x70\x18\xaa\xac\xb8\xd4\x64\xf3\xe1\x29\x31\xc5\xd0\xa4\x9d\x6d\xfa\xb0\xed\x48\x9b\xd0\x6b\x9d\x0a\xf8\x20\x57\xfc\x7e\x58\x5d\xf1\x33\xc2\x59\x57\xf8\x00\x73\xd0\x7f\xde\x97\x0a\x0c\x61\x95\x0f\xa3\x35\x0a\x82\xb9\x21\xfe\xd8\x6d\x12\xd9\x95\x67\xee\xbf\x3d\xf3\xda\xa5\x72\x25\x33\xf5\x6e\xbb\x54\xc2\xb6\xd7\xb2\x16\x3e\x26\x02\xc6\xd0\x1f\x64\x71\x72\xe3\x51\x8d\x32\x19\xd8\x67\x84\x4f\x13\x59\x3f\x1e\x22\xd1\x9b\x8d\x0d\xf4\x23\x8d\xc8\xf4\x23\x94\x79\xb6\xba\x8a\x7a\xf1\x64\x12\x47\xff\x75\xfc\xfc\xd9\x33\xa3\xf3\xf9\x2f\xd6\x00\xc7\xa9\xf2\x23\x19\x86\x04\xff\x58\xf5\x90\xf4\x0a\x47\x83\xe5\xbe\x9f\xe2\x4e\x4b\xfb\x30\x09\xda\x7a\xd1\xcb\xe9\xe7\x60\xa8\xbd\x1c\x84\xd3\x11\x4e\x96\x29\xe4\xea\xeb\xe7\xcf\x6e\x9f\x3f\xc3\xe3\x14\x23\xa9\x33\x54\x63\x4e\xfb\xc2\x87\xe1\x47\xf4\xd3\x4f\xec\xc3\x8a\x3f\x09\x44\xdf\x36\xf7\xb7\x9e\x3f\x7b\x46\x3f\x54\x4e\x39\xce\x1e\x52\x51\x85\x67\x82\x21\xfd\x40\x11\x83\xdf\x32\x3e\x67\x62\x94\x65\xc4\x58\x43\x34\x1c\x06\xaa\xf4\x93\xf8\x2a\xc5\x49\xf5\xf9\xb3\x67\x62\xc4\xe2\x38\x5b\xe9\x25\x37\xd3\x2c\xfe\xaf\x63\x5a\xf5\x16\x8e\x4f\xf2\xfe\x23\xbe\xa3\x3f\x9f\x3f\x7f\x56\x51\xcf\x63\xcf\x10\x55\x89\x1c\x8f\xe2\x24\x1b\xcc\xb2\x94\xbe\x21\xcb\xa6\x87\x36\x10\xaf\xfb\x5a\x7a\x7d\x3e\x0e\xfb\xe4\xd3\xca\x38\xec\x4b\xef\x41\x1b\xd6\x83\x4e\x91\xaf\xa4\xd4\x8a\xf4\x4e\x81\xe0\x8f\x2f\x62\x00\x41\x7e\xbc\x7e\x2e\xb0\xf8\x10\xc7\x9f\x67\x53\x94\xf9\xfd\x31\x96\x30\x39\x7e\x7b\xf0\x1b\x3b\xf4\x89\x77\x7b\x1f\x7f\x39\xb7\xbd\x3f\xfe\xf4\xf6\x7c\x7f\xef\xb7\xf3\x9a\xeb\x43\xdd\xf5\xa1\xe1\xfa\xd0\xb4\xb6\xed\x6a\x47\xfe\x68\xb4\x25\x7f\x34\xda\x93\x3f\xf2\x36\xc5\xd0\xf4\xe2\xc9\x94\x9c\x14\xc7\xe6\x10\xd9\xa6\x54\xab\x15\xc4\xb3\x3e\x11\xfb\x49\xad\xbc\x00\xb0\x58\x19\x0b\x24\x9b\x2a\x84\x10\x4e\x10\x85\xe8\x0d\x6a\xb4\x3b\xaf\x51\xb8\xb4\xa4\x80\x17\x42\x22\x7a\x83\xea\x8d\x75\xe3\x1b\xf9\x0b\x4e\xc3\x33\xb4\x41\x60\xbc\x41\xf5\xd7\xea\x77\x7a\x97\x5a\x50\xab\x42\xab\x55\xd1\xef\xa8\x76\x5d\xaf\xf7\xf5\xfa\xf9\xe3\xed\x73\xa5\xd7\xbf\xfa\xe3\xcf\xe8\xdd\x4e\xa5\xf1\xfb\x7a\x55\xed\xed\x35\x0d\x91\xa8\xbe\x0b\xb5\x97\x0b\x8d\x80\x34\xc8\x69\x3f\xbe\x56\x3f\x82\xa5\x01\x69\xf3\x3a\x44\xbf\xa3\xca\x75\xde\x21\xf6\xbb\x21\xfd\x6e\x4a\xbf\x5b\x55\xad\xb3\x00\xa5\x92\x5e\xa3\x9f\x7f\xfe\x19\xad\x43\xc9\xf4\x1a\xfd\x84\x6a\xd7\xc3\x21\x1d\xa0\x4e\x53\xab\x42\x56\xc7\xe9\x35\x19\xc8\xf4\x5a\xfb\xc4\x17\xcf\x69\x0a\xdf\xaf\x5f\x3f\x77\x76\x6a\x32\x1b\x67\xe1\x74\x1c\x0e\x40\x4d\x60\x76\xef\x9a\x90\x71\x70\x7a\x7d\xf6\xda\xf2\xad\x45\xbf\x35\xac\x1f\xd7\xe9\xc7\xd6\x59\x41\xeb\xe9\xac\x8f\x40\xc0\xf1\xd0\x24\xbc\x46\x83\x78\x3c\x9b\x44\xa9\x42\xfd\x32\x4c\x22\x29\x54\x02\xe8\xd5\x4b\x42\x33\xb5\x3a\x1f\x29\xf6\x58\xab\xd7\x6a\xfa\xd0\x8a\x95\x4c\x07\xab\x92\xc1\xc4\xb4\xaa\xe8\x2b\xf9\x4d\xc7\xdb\x51\xa5\x2e\x57\xa9\x77\xa4\x2a\xf5\x8e\xab\x4e\x43\xae\xb3\x5e\x45\x79\x9d\x86\x31\xeb\x82\x1b\xd0\x3a\x59\xc1\x48\x85\xd1\xa5\x3c\x5a\xe4\xb1\xf4\x88\x5d\xaf\x4b\xe3\xc3\xc8\xb3\xc5\x5e\xd5\xf8\x8b\x86\x32\xa4\x85\x23\xaa\xf0\x47\x46\x63\x65\x86\x55\x61\x9d\x4a\xbd\x39\x63\xab\xb0\x55\xa5\xe2\x9c\x01\x56\x58\x2e\xab\x58\x34\xca\x70\x5b\x00\x8a\x60\x9c\x98\x9c\xf0\x87\x6b\x2b\x13\x64\x0c\x60\x63\x01\x0e\x08\x55\x1a\xe8\x77\x14\x9c\x92\xff\x5d\xaf\xa3\xdf\xd1\x75\xe3\xec\x4c\x5f\x48\x50\x36\x44\xbf\x6f\x40\xc1\xeb\xd0\x28\xa0\x30\x49\xf8\x79\x0b\x87\x5a\xb1\xaf\x1c\x26\x78\x40\x3b\x17\xa0\xa3\x41\x1c\xb1\x0d\x26\xdf\x95\x8e\x7a\x07\x1f\xc9\x1e\x51\xbb\xae\xd5\x3c\x54\xbb\xae\xd5\xe1\xdf\x06\xfc\xdb\x82\x7f\xd7\x3d\xa0\x05\xf2\x6f\x03\xfe\x6d\xc1\xbf\xeb\xf0\x6f\xbd\x4f\xfe\x6d\x76\xf2\xcd\xec\xe5\x4b\x86\xd4\x4b\xb4\xb9\x7d\x4c\x03\xb2\x23\x2a\x0e\x21\x22\x10\x24\x61\x36\x9a\xac\xf0\x32\xab\x39\x2a\xa4\xf4\x06\x13\x1f\x56\xe8\x83\x24\x61\xac\xe0\xeb\x8c\x86\x0f\x10\x5d\x3e\x0f\xe2\x23\x9c\xe2\xac\x8b\x1c\x5b\x24\x1b\x84\xe3\xcf\xe1\x94\x99\xfe\xc6\x43\x14\x1d\xc5\x70\x1c\x1b\xf9\x29\xea\x63\x1c\x81\x7b\x00\xbb\xe0\xf2\xa3\x00\x6c\xf8\x82\x30\x40\x51\x9c\x31\x3b\x4c\x93\x14\x68\x36\x17\x0e\x89\xdb\x8b\x9e\x7f\xc6\x37\x87\x49\x18\x27\x47\xd4\x04\x78\x63\x23\x7f\x6f\x25\x1d\x6e\x17\xa6\xcd\xa9\xd9\x01\x55\x7c\xe3\x7f\xdc\xe2\x70\xc3\xde\x7c\xfe\xd6\xc2\x9f\x3f\xe3\x9b\x5f\xe3\x04\xac\x18\x3f\xe3\x9b\x95\x2b\xf2\xdb\x5e\xec\x38\xfc\x82\x59\xa9\x34\xbc\x78\x4b\x18\x10\x5a\x45\xad\xa2\x65\x24\x1c\x01\x12\x18\x20\x13\x2c\x1f\x39\x8e\x63\xfe\xcc\x1b\x5c\x42\x9d\x52\x2d\x90\xfe\xa7\x83\x11\x26\xc7\x0f\x44\x44\x68\x4b\x1f\xd2\xa3\xf8\x8a\xc0\xae\xf0\x66\x96\xc8\x2e\xfd\xb2\xb0\x0f\x32\x5c\xfb\xb0\xf0\x46\xa5\x71\x96\xde\x9d\xea\x4b\x35\xb7\x11\x25\xe8\x50\xd1\x83\xfe\x7c\xc3\x30\x64\xcf\x16\x29\x04\x31\xb2\x13\xe5\xe9\x20\x59\xcb\x91\x3f\x09\x95\x53\xa8\x73\x46\x47\x16\x66\x9c\xbd\xb1\xb0\x1a\x37\xc3\x42\xd2\x7e\x62\x00\x87\x70\x3a\xfa\x50\xca\x68\xff\xc0\x10\xff\x97\x40\xdc\x89\x39\x9b\x85\xa3\x38\x43\x84\x24\xdd\x85\x32\x79\x0f\x50\xb7\x80\x42\xc8\xc7\xb3\x7e\x19\xc8\x20\x3e\x71\x98\x67\xd2\xde\x06\x1f\xf2\x9d\x8a\xc9\x68\x67\xd2\x2e\x26\x97\x58\x57\x0a\x00\xa6\x0c\x32\x7b\x3d\x07\xdb\xfd\xf0\x1a\xd8\x76\x11\xb6\xbf\x6f\x00\x13\x3f\x65\x83\xbc\x9a\x53\xc7\x57\x54\x63\xa8\x5b\x26\x1b\xe5\x13\x0e\xa4\xc5\xd6\xdd\xcf\xa8\x43\xf8\x99\x36\x61\x68\x63\x03\xb5\xe6\x4d\xda\x77\x37\xb4\xf6\x3e\x3b\x46\xdc\xb5\x66\x0c\x5a\x67\x43\x72\x86\x7e\x27\xb2\x84\xb9\x88\xe6\x72\x73\x59\xa6\x2b\x66\x33\x61\x74\xf9\xde\xc2\x69\x8c\xd7\x6e\x66\x43\x8a\xe6\xfc\x46\x3c\xe5\x2c\x87\xbf\x72\x70\x1d\x99\x61\x31\x3e\xba\x2c\xea\xd8\x88\x17\x8e\x8c\xbc\x99\x7f\x15\x10\x8d\x93\x9d\x3c\x2c\x67\x6a\x59\xc1\xcd\x43\xfc\x0d\x6a\x81\x27\x0b\x7d\x28\xa2\x7d\x75\x2e\x4e\x39\x04\x26\x69\x2e\xd8\x91\x02\x60\xaa\xd0\xad\xae\x21\x42\x8a\xaa\x70\xed\x58\x4a\x67\xe8\x77\xf7\xe2\x74\xfc\xa9\xc2\xb7\x7d\x05\xea\x08\x34\x4f\xd5\xa5\x68\x9f\x03\xa7\x24\xeb\x49\xd3\x83\xa3\x41\x72\x33\xa5\xa6\xb1\xb2\x9c\xb7\xef\xa1\x78\x38\x4c\x71\x66\xcc\x0c\x5d\x23\x41\xdc\x13\xf5\xf2\xc2\x9e\xb9\x57\x7b\xf9\x09\x31\xff\x59\xcf\x7f\x36\xf2\x9f\x4d\x0f\x58\x8c\x7c\xca\x50\x70\x0d\xf0\xa2\xb8\x12\xae\x79\xe5\x4f\x51\x23\x0a\x40\xf6\x6c\x65\x23\x87\x10\x43\xe8\x7b\xff\x94\x82\x21\xf2\x8b\x3e\xa4\xca\x37\xb5\x6c\xb3\xa0\x6c\xd3\x7a\x24\x2a\x33\x84\x2a\xad\x7a\x2a\x81\xaa\x8f\x75\xf5\xb1\xa1\x3e\x36\x3d\xa1\xb0\x30\x36\xef\xd5\x55\xb4\x47\x4e\xbe\xdf\xc5\x18\xd9\x27\x5d\x19\x26\xeb\xac\x7b\xe8\x7e\xe4\x66\x23\x1a\x76\x20\x28\x2d\x59\x5b\x06\xf6\x1d\x66\xc1\x42\xe1\x46\x92\x8a\xea\x04\x53\x8b\x8e\xab\x26\x0d\xd6\x19\xbc\xfe\x5d\x61\xb6\x35\x9b\x06\x28\xad\xeb\xd3\xa1\xd5\x32\xe6\x07\x6a\x35\xd4\x5a\x0d\xbd\x96\x55\xdb\x94\x36\xf5\xe9\xd4\x6a\x35\x6d\x6a\xa8\xf7\xda\xd9\xc1\x7e\xf4\x97\xb7\x40\xdb\x89\xe1\xc8\x72\xc6\x11\xfb\x2f\x1d\xd5\x0d\x54\x7f\xcd\x7e\xbe\xe1\x33\xc4\x5e\x38\xf6\x5d\x98\xe3\x70\x98\x01\xa5\x7b\x0e\x45\x59\xe1\xc4\x71\xd4\x33\x32\x79\x92\xba\xa6\x26\x24\xaf\xdf\x25\x45\x57\x25\xad\x1b\x72\xd7\xef\x92\x52\xab\x92\x36\x74\xa9\xeb\x77\x49\x7f\x95\x36\xa5\xd7\xc6\x36\xbc\xb4\x64\xdb\x00\x00\xb9\xba\x8a\x5c\xdd\x81\x5c\x63\x0e\x72\xcd\x42\xe4\x6a\x77\x44\xae\xa1\x22\xd7\x70\x20\xd7\x9c\x83\x5c\xad\x10\xb9\xfa\x1d\x91\x6b\xaa\xc8\x35\x1d\xc8\xd5\xe6\x20\x57\x2f\x44\xae\x31\x17\x39\x2b\xe9\x7e\x9a\x82\x11\x51\x9a\xf9\x19\x36\x0b\x00\x3b\xc9\x6a\x96\x8e\x01\xcb\xc8\x74\x3d\x1a\x7c\x21\x73\x91\x35\x6c\x5f\xc8\x40\x64\xba\x76\xdc\xaa\x44\xb1\xae\xa7\x39\xbc\x0f\x96\x4f\x85\x9e\x3c\xa4\xb5\xa3\x9f\x5a\x2c\xcb\x47\x3f\xb6\x98\x2b\x48\x39\xb7\xe4\x4b\xa8\x5a\x8e\x12\xc4\xfa\xe1\xd8\xd5\xdd\xd8\x99\xeb\xc7\xc0\xce\x58\x42\x2a\x76\xb5\xbb\x60\xd7\x90\xb0\x6b\xb8\xb1\x33\x17\x90\x81\x9d\xb1\x86\x54\xec\xea\x77\xc1\xae\x29\x61\xd7\x74\x63\x67\xae\x20\x03\x3b\x63\x11\xa9\xd8\x35\xe6\x63\x67\x52\x2b\xe6\x91\xad\xed\x72\x09\xdd\x86\x2d\xeb\x48\x17\x72\x8c\xe5\xa4\x6e\xae\x96\x55\x65\x88\x3e\x4d\x97\xec\xc3\x8e\xc2\x5d\xd4\x68\x77\x56\x9b\x0d\xa6\x81\xae\xda\x54\xc1\x5c\x62\x11\x02\x52\xca\x3c\x87\x99\x6a\xf8\x45\xca\x12\x3e\x21\xc8\xe1\x3d\xf4\x07\x58\xe8\x88\x05\x90\xff\xc4\xd7\xfe\x64\x2a\x4e\xca\xf9\x07\x3e\xa7\x14\x56\x86\xaf\x33\xe9\x76\x7b\x65\x73\xfb\x78\x85\x9d\x23\x2a\x13\x6e\x92\xfe\x19\xdf\x78\x68\x30\xbc\x10\xd2\x7c\x0e\x65\x3a\xf6\x09\x12\xd7\x19\xd2\xa1\x30\x09\xbf\x92\xb7\x63\x03\xc4\x74\xda\x3d\x8b\x12\xfb\x9c\x86\x4d\xdd\xc5\xe3\x29\x4e\x2a\x9b\xdb\xf4\x5a\x9f\xea\xec\x9f\x3f\x63\x36\x2b\x72\x93\xaf\x9f\x3f\x87\x10\xb8\x60\x40\xa2\x58\x15\x74\xdb\x0d\x8f\xdb\x25\x74\xdb\x60\x3b\x22\x59\x26\x74\xdb\x2d\x2f\x37\x49\xe8\xb6\xc1\x87\x71\x12\xb4\x7f\xec\x76\xea\xb7\x67\x5e\xbb\x71\x2f\x6b\x91\x6f\x69\x26\xf2\x68\xc6\x1c\xdf\xd0\x2c\x83\xae\x84\x97\x88\x19\x50\x90\xe6\xd1\x20\x9e\x4c\xe3\x08\x62\xae\x93\x6f\xab\xcf\x9f\x89\x79\x1f\x87\xfd\x15\x56\xf4\xeb\x57\xd9\x00\x40\x78\x7d\x3e\xb0\x71\x87\x9f\xe2\xdc\xaa\xc3\x4f\xb1\xf4\xed\xd7\x38\x09\xc0\x2f\x5d\x14\x10\x6f\x64\x08\xb3\x21\x18\xfc\x01\xad\x6f\xf2\x5b\x9e\x1c\xa6\xf5\xb3\x82\x19\x06\xd7\xaa\x1e\x59\xa8\xd2\xfb\x4f\xd9\x70\x1d\xa0\xe0\x68\xb0\x42\x1e\x34\xac\x3b\x2d\xf1\x95\x3e\x16\x19\xa2\x88\x2f\xdb\x97\xd3\xf7\x5b\x3b\xf9\x65\x13\x7d\xb6\xde\x60\xf5\x53\x6a\x9f\x47\x96\x15\xbf\xc5\xca\xf0\x64\x3a\xf6\x33\x1b\x83\x12\x51\xa6\xff\x8c\x58\x44\x1e\xae\x41\x05\xaf\x02\xc1\xeb\x40\xef\x17\x7e\xc1\x2b\x3c\xc2\x64\x17\xb5\x50\xa5\xde\x58\x47\xfd\x30\x4b\xab\x45\x00\xc3\x4b\x0b\xbc\xbd\x5f\xee\x0a\xee\x7c\xfb\x63\xef\xfc\xb7\x9d\x83\xa3\xfd\xf3\xfd\x83\xad\x6d\xb4\x09\xb1\x0d\x32\x3f\xca\x50\x82\xa7\x09\x4e\x71\x94\x85\xd1\x05\x57\xc4\x10\x32\x9c\xc4\x41\xde\x77\x2b\xcc\xad\xed\x52\x30\x19\x3b\x35\x60\x4a\x97\x82\x9a\xc9\x91\x78\xb4\x53\x94\xe5\x92\x30\x9f\x4d\x8a\x6e\x0f\xfc\xbe\x67\x09\x18\x3c\x88\x24\x1f\x72\x11\xa5\xb8\xd4\x3b\x41\xf7\x64\x0e\xd0\xc9\x08\x93\x51\xcf\x62\x34\x63\x7e\x02\x84\x05\x20\x52\x18\x40\x2b\x20\x57\xf3\x87\xc1\xf0\xa2\x0b\xa4\xcb\x71\xad\xca\x3b\xaa\x81\x2d\x6c\x17\x29\x85\xcd\xc8\x2f\x8c\x5c\x93\x61\x43\x9f\xda\x63\x4a\xb8\x13\xd2\x23\xc8\x7f\xc6\x37\x2b\xd6\xb2\xdc\x35\x74\x30\xbc\x40\x95\x03\x68\xc5\x1f\x57\xa1\xce\xc0\x36\x78\x25\xc7\x40\x6d\x8b\x07\x12\xa5\x13\x7a\x4b\x48\x84\xf7\x8e\x10\xca\xa0\xa8\x4f\xe4\x5c\x11\x0e\xdc\xdf\x55\x29\xc1\x2c\x80\x14\x69\x41\xde\xe3\xf9\xd5\xf3\x0a\xdd\xa6\xb7\xe9\x30\xc7\x49\x85\x5d\x9e\xc1\x10\x7a\xe8\x4f\x14\x5e\x76\x51\x78\x99\xf3\xc6\x5b\xc5\xf4\x40\x99\x6f\x15\x52\x57\x89\x0b\xc5\x24\x07\x5d\x03\x20\x67\x0e\xa1\xf5\xd9\x8d\xb3\xba\x56\x2d\xb2\x87\x2e\xa1\x95\xa4\x27\xc7\x42\x7c\xa2\xa7\x87\xa5\xa7\x2d\xfc\x50\xf4\x24\x20\xdd\x8f\x9e\x54\x3e\x7d\x07\x7a\xda\x8b\xc2\x2c\xf4\xc7\xe1\x17\x9c\x22\x1f\x45\xf8\x6a\x7c\xc3\x30\x0c\xd8\x70\xcc\xa7\x25\xbe\x6b\x5c\x0f\xe3\x64\xb2\x1f\x07\x18\x6d\x53\x67\x35\x88\xd3\x9c\x73\xba\x38\x91\xe9\x14\xac\xab\xc1\xcf\x8f\x53\xad\xd8\x64\xec\x64\xf8\xdd\x91\xec\x83\x91\x55\xc5\xfc\x60\xe3\x14\x77\x24\xb8\x30\x0a\x15\x0b\x1b\x31\x4d\x12\xb9\x58\x54\xd4\x9b\xd3\x29\xa1\x05\x18\x2d\x9e\x6e\x3a\xb5\x5c\x33\x90\x21\xde\x10\x3f\xf9\xa6\x48\x69\xd0\x3c\x15\x67\x44\x72\xa6\x86\xf5\x71\x32\xa1\xd3\xee\xdb\x74\x37\x94\xbe\x73\x92\xda\xc8\xc9\xeb\xb5\xad\x24\xb5\xa3\x01\x5b\x19\xeb\x59\x3c\xa4\x84\x4e\x3d\x00\x6c\xfd\x00\xfb\xa2\x4a\xe9\x85\x03\x36\x3a\x2a\x1f\x86\x60\x0e\xa9\x68\x09\xb4\x67\xf7\x24\x1f\xb6\x04\x4d\xdc\x94\x19\x4e\xca\x18\x51\x51\xa3\xa2\xc0\xcf\x7c\xd4\x07\xd9\x4b\x2d\xe1\x90\xc7\x00\x34\x4d\x75\xc1\xdd\x9d\x75\xc0\x87\x38\x81\xb9\x1c\xc4\xd1\x20\xc1\x19\x5e\x66\xc3\x31\x8e\x2f\x14\xa6\x2c\xdd\x4b\x1d\x2d\x36\xd6\x10\x50\x03\x30\xa7\xfe\x2d\x8c\xa7\xe0\x40\x62\x29\x38\x58\x60\xd3\xfb\x9a\x32\x57\x18\x02\x94\x29\x3b\x09\x6f\xe0\x6d\xb0\x06\x24\xf0\x25\x76\x2e\x89\x3f\x09\x58\x34\x6a\x16\x8b\x46\x10\x46\x17\x0f\xc0\x4d\xf2\xce\x6f\x70\xf2\x60\xf0\x2b\x2f\x48\x9b\x2f\x54\x32\x29\x53\xef\x8a\x63\xee\xa4\x30\x56\xb2\xab\x85\x79\xa5\x43\xe7\xe0\x1e\x38\x0a\x6c\xb3\xef\xc3\x17\xb9\xba\x8d\xa6\x68\x7b\xc8\xbf\xf4\xc3\xb1\xdf\x1f\x63\x6a\x86\x98\xba\xb7\xc5\x73\xde\x99\xd2\x54\xb5\x13\x46\x6c\xe3\x2b\xdc\xa7\x18\x5c\x75\x9f\xf9\x18\x67\xcc\x3d\x9a\x46\x4d\xa3\x90\xf2\x5d\x03\x85\x29\xc2\xc3\x21\x1e\x64\xe1\x25\x1e\xdf\x20\x1f\x05\x38\xcd\x92\x19\x3c\x7b\x28\xc1\x7e\xb0\x1c\x47\x03\x5c\x6a\x9f\x29\x4b\xbd\x80\xc6\x63\xd1\x30\x05\xfe\xd8\x94\xcc\x47\xb2\x52\x9e\x88\x45\x95\x45\xa9\x5f\x54\x9c\x4f\xfe\xbc\x68\x79\xfa\xdf\xc9\xe7\x62\x06\x85\xd4\x12\xe1\xb0\x10\x00\x2a\x5c\x2d\x4a\x51\xcb\x45\xc9\x02\x0c\x19\x02\x22\x11\x54\xd9\x82\xc3\x01\x0b\x98\xc9\x39\xf5\x8e\x34\x21\xd6\xc5\x67\xd6\x9e\xab\x6c\xae\x37\xd6\x57\x9b\x0d\xf9\x13\x55\x89\xd8\xbe\x68\x72\x50\x17\xd5\x95\xaf\xaa\xfc\xdb\x45\x8d\x32\x67\xa7\xd4\xaa\xca\xf6\xe7\x2b\xb2\x91\x73\x6d\xf2\x53\x0b\x1b\xe9\x93\x11\x96\x84\x02\x96\x69\xcb\x47\x23\xd0\x1a\x13\x21\xb3\xc4\x52\xe4\x22\xec\x66\xc4\xf1\x81\x08\x03\x7c\x59\x13\xa1\x89\xad\x6b\x4b\x87\xbe\xc1\x61\x89\x59\x7b\x9b\x2a\x4f\x4d\x47\x6e\xc8\xb6\xce\x55\xa6\xd4\xeb\x3a\xfd\xa6\xc8\x9f\xf8\x94\xe2\x31\x1e\x64\xb4\xe1\xe3\x2c\xf1\x33\x7c\x71\x53\x71\x99\x6b\x4b\xda\x67\x10\x17\x37\xd0\x0b\xca\x4a\x5f\x38\xcd\xc3\xd8\x6c\x1c\xfa\x69\x4a\xd8\xc4\x5b\x3f\xc5\x81\xe2\x31\x27\xff\x15\x1b\x87\x31\x50\xc7\x38\x81\x03\x17\xd9\xd5\xdc\x90\x8a\x17\xb9\x9e\xdc\x8f\xdd\x67\x14\xd8\xa8\xbb\x90\x62\xe4\x24\x33\x36\xf3\x86\xa5\xc8\x6e\x34\x8f\x02\x66\x9f\x07\x71\x71\x43\x51\xf4\x90\xfb\x02\x47\x1f\x03\xcf\x61\xe9\xc9\xc8\xbe\x6b\xf4\x5f\xbb\xcf\xb9\x17\xda\xea\x4d\x91\x87\x0a\x6f\x8c\x74\xcc\x2d\x13\xaa\xb3\x6d\x99\x4b\x56\xaa\x4c\xc3\x6b\xbf\x7a\x53\x75\xd8\x69\x96\x60\x7f\x72\x27\x55\x36\xc8\x50\x4c\xf9\x2c\xdb\xe0\x37\x1b\xcb\xfd\x90\x1a\x6c\xab\x27\x1a\x2a\x9d\x40\x1c\x6b\x49\x33\x5d\x47\x95\x66\x43\x55\x4c\x4b\x0a\xdf\x63\xc0\x4f\x53\xfb\xea\x2f\x0b\x3c\x42\x76\x2c\x7b\xad\x6d\x87\xe5\x22\xe2\xd4\x4f\xe0\xb8\x65\x13\x10\xcd\xed\x0d\x8e\x37\xb9\x75\x15\x17\x1a\x7f\xf8\xe1\xc5\x70\x3c\x4b\x47\x2f\xca\x6d\x73\x14\x8a\x6b\xa3\x13\xc3\xdc\x45\xf5\xa2\x79\x85\x73\x2d\xa4\x35\x9d\xca\xb7\xa5\xb2\xf2\xfc\x7c\x42\xcf\xbe\xbd\x15\xf6\xe3\xcf\xdb\xf9\x14\xa2\x78\xec\x40\x3d\x83\x4a\xa4\x36\xa4\xdb\x4d\x76\xd0\x36\x9c\x83\xd9\x7b\x59\xe9\x5d\xa4\xa0\x97\x55\x94\x13\x9e\x9d\x2b\x97\xaf\x17\xde\x4d\x37\xd5\x1e\x59\x15\x82\x7a\x6a\x99\x5c\xc1\x0f\x54\xfd\x0d\xf6\x43\x3e\x53\x7c\xbb\x03\x3d\x6c\xef\x6d\xcf\x50\x45\x73\x8e\x12\x5e\x52\xaf\x9d\xbb\x68\x9e\x73\x18\x85\xba\x42\x51\x97\x2b\x9a\xa4\x7a\x77\xd2\x38\x8b\xe9\xcc\x0f\x48\xff\x33\xa7\x33\xd7\x04\x2f\x38\x9d\x56\xc5\x6f\xc9\xe9\x14\x75\xef\x31\x9d\x45\x0a\xdf\x72\x57\x07\xdf\x74\x3a\xef\x3d\x5d\x05\x4b\x60\xce\x7c\xe9\x7a\xd3\x82\x49\xa2\x9b\x89\xd0\xf3\x0e\x6c\x62\x1d\xb3\xba\xbe\x44\x1b\x28\xbc\x94\x67\xab\x68\x8b\x60\x3b\x26\x0d\x2c\xdd\x1b\xf9\x61\x04\x39\x4f\x5c\x77\xad\x6f\xc1\x6e\xe0\x9c\x77\x1e\x6d\xb8\x83\x0f\xe8\x2a\x36\x65\x07\x21\x75\x0d\x62\x90\x86\x26\x6f\x4c\xdb\x25\xc4\x9d\xe8\xeb\x22\x8e\xf2\xb6\xc7\xb7\x03\xed\x24\x24\x35\xa1\xcc\x1d\xe9\xd5\xdb\x9e\x65\xef\x31\xc1\xd3\x26\x0e\x45\xfc\xcf\x8c\xab\x31\x28\x95\xfa\x19\x33\xea\x5e\xd1\xeb\x18\x30\x34\x9a\xa5\xd2\x91\xd0\x8a\x30\x61\x29\xe6\x32\x12\x52\x39\x21\xb2\xde\x90\x30\xbb\x2c\x02\x84\xfd\xbc\x1a\x61\x16\x7a\x9f\xe2\x07\x91\x3c\xd3\x12\xc8\x99\x0b\xc3\x5e\x90\xfc\xc1\x54\x32\x51\x87\x7a\x03\x40\x7e\x3c\xe8\x82\x70\x6d\xd0\x65\x59\x79\x32\x50\xae\x02\x34\xcc\xe4\x55\x28\x4e\x5b\x68\xab\x03\x2c\xd2\x6f\x48\xe4\x85\xe4\x30\x9c\xcd\x85\x58\xa1\xc9\x11\xaf\x1c\xe6\xac\xbf\x1d\x1c\xc1\x79\x99\x11\x9d\x59\xe6\x3a\x4e\xa0\x5f\xb9\xa2\xdb\x43\x4a\xbf\xbc\xbc\x59\x9b\xd0\xcf\xf0\x90\x7d\x5d\x2a\xfa\xe8\x5a\x31\x3b\xc2\x13\x0c\x52\x38\xec\xae\x94\x04\xd8\x55\x14\x9c\xf6\xc1\xa1\x1d\x5e\x9b\xd5\xb9\x04\x8b\x2f\x79\xdc\x79\xca\x4c\x69\x42\x79\x8e\xb7\x30\x05\x74\x76\x40\xf6\xdc\x99\xbb\x6e\x03\x5c\x62\xdd\x8a\x7d\xea\x69\xdd\x3e\xad\x5b\x74\xf7\x75\x7b\x9f\xd5\x01\x16\xc2\xa3\x30\x5d\x78\x6d\x58\x31\x61\x14\x0d\x5c\xe4\xb7\x83\x23\x27\x07\x90\x3d\xc8\x0c\x0e\x70\x5f\xb6\x63\xc5\xec\x24\x1f\x9a\x3e\x1e\xc4\x13\xb6\x74\x08\x5b\x08\xe3\x59\x5a\x9e\x79\x88\xc1\x2a\xcb\x1e\x04\x29\xf1\x6e\x54\x9c\xb8\x2f\xe4\x01\x05\x22\x12\x97\x96\x6c\x1e\xfe\xa3\x38\x4e\x31\x9a\x84\xd7\x44\x16\xb2\xf4\x0f\x3c\x41\x4d\x21\x0d\xc9\x84\xc8\xa4\x30\x17\xd9\xc5\x97\x20\x9d\x92\x93\x4e\x3a\xeb\xa7\xf8\xbf\x67\x38\xca\xac\x2a\x06\xa4\x8a\x76\x52\x5a\x0f\x75\x14\x9d\xaa\x41\x19\x25\x6d\x56\xe6\xab\xfa\xc9\xce\x66\xc3\xca\x16\x23\x29\x5f\x6d\xd6\x48\x49\xe4\x0f\x26\x30\xb7\x1e\x0f\xcf\xd0\xef\x1b\xb4\xde\x69\x58\x18\xba\x24\xff\xcd\x4d\xa0\xdf\xf6\x58\x79\x25\xa0\x89\x24\xda\x1e\xfa\x41\x40\x26\x70\x8e\x02\x64\x0a\x69\xae\x7a\x2b\xf4\xbf\x76\xf5\xc7\xe1\xfb\xde\x31\xfa\x5f\xed\xd5\x35\x34\x65\x40\x53\xa6\xcb\xb3\xc1\x3c\xfc\x3c\x48\xd7\x40\x4e\x9e\xfa\xc1\x0a\x7f\x2a\x90\x8d\x0f\x7d\x7e\xfd\x3c\x4b\x79\xec\x7c\x11\x08\x85\x99\x2b\x43\xe0\x64\x81\xc7\x42\xf6\x57\x00\x59\xbe\x7d\x26\x68\x59\x2b\xd9\xf5\x78\x2c\x04\x94\x74\x1f\x09\x80\x52\x11\xcd\x92\x0c\x0a\xc4\xb3\x7c\xe4\x63\xb3\x38\x7c\x89\x71\x25\xbf\xf2\xeb\x35\x4f\x8b\x9b\xa5\x5c\x30\xfb\x81\x7e\xb9\x76\x67\x06\x22\xaa\xd1\x58\x27\x1b\xd2\x78\xb9\x62\x86\xcc\xa2\x4c\xd0\x0e\xf8\x15\x99\x50\x23\x46\xb0\x06\x50\xfa\x62\x99\xe6\x9c\x16\x11\x56\xfe\xa5\x15\xb0\x35\x4b\xef\x85\x78\xbb\x66\xe8\x05\x9a\xea\x0d\xbe\x12\x7a\x81\x08\x28\x0a\x16\xb9\xaf\x8b\xf1\x9e\x39\xb8\x18\xef\xc1\xad\x45\x79\x3b\x17\xb3\x42\xa4\xd2\xe2\xf0\x05\x39\xfb\x51\xdb\x44\x21\x5a\x72\xb9\xe5\xcb\xd0\x69\x9c\x7b\xe9\x4d\x81\xf4\xaa\x61\x87\x36\x72\xdb\x77\x7e\xf8\x97\x41\x7b\x2a\x4a\x36\x33\x84\xcd\x20\xb0\x0f\x02\xcc\xf5\x20\x8e\x06\x7e\xc6\x61\x96\xd6\xc0\x7c\x8a\xa6\x82\xa1\xc0\x92\x1d\xf9\x01\x0d\x64\xc4\x16\xea\xb7\xe1\x32\xb3\x48\xe7\x33\xdf\x84\x23\x40\xb3\x25\xae\xdc\xa1\x9c\xce\x12\x6c\x7c\xe0\x1d\xce\x94\xcc\xc5\xd2\x22\x86\x18\xb0\x68\xec\xa7\x19\x3c\xcf\x5f\xd3\xb9\x78\x7d\x5a\x51\x97\xf3\x32\xaa\x57\xa9\x8b\xd9\x19\x73\x06\xb3\x79\x12\x53\xc1\xc1\x4d\x31\x39\xb8\x0d\x7d\x0d\x4a\x9b\x29\xdd\x36\x17\xd4\xf3\xff\x15\x17\x41\x36\x17\x05\xfb\xcd\x82\xed\x56\xa1\xe8\x1e\xe8\xe1\x8c\xfe\xf7\xe3\x00\xdf\x52\xf5\xe0\x89\x38\xad\xd1\x4b\x11\x38\x49\x48\xdd\xe9\xbd\xed\xb9\xa0\xb0\xb9\xba\x15\xf4\x45\x60\xe9\xc2\x86\x09\x11\x48\xde\x41\xe0\xe0\x47\xc0\x06\x40\x32\x9c\xd4\x08\x9c\x60\x0a\x98\x79\xda\xa9\x8e\xb6\x6d\x34\x71\xab\x78\x23\x2c\x60\x18\x48\x27\x5a\xfd\xd8\x93\xac\x0f\x8b\x6d\x00\x0b\x02\x9c\xa9\xf6\xa1\x16\x3f\x4e\x90\x9b\xc9\x08\x28\x6a\x51\xa4\x2a\x76\xc9\xf7\x09\xd8\x7e\x3a\xf0\xcf\x27\xd6\x3c\x0c\x18\xb6\xa4\x5c\xd2\x56\x8d\x4b\x9c\x27\x06\x02\x15\xb6\x44\xd0\x68\xc0\xa9\x5c\xbb\x9b\xb1\x4b\xfb\xab\x2f\x8b\x9b\x57\xad\x57\xaa\xe8\xe5\xea\xc2\x18\x08\x55\x8b\xe3\x2c\xf3\x1e\xe3\x29\xf2\x33\x34\xc6\x84\x0b\xc6\x11\x5f\x01\x2c\xcd\x07\xb5\x04\x85\xfd\x1a\x18\xae\xc9\xb7\x90\x38\xdf\x4c\xc2\x88\x1a\x89\xb2\x43\xbc\x11\x2e\x51\x7d\x64\x95\xe8\xf4\x49\xf8\x53\x42\x9a\x80\xfd\x31\x3d\xf2\x86\x97\xe8\xa7\x9f\xac\xfa\x78\x3d\x50\xc7\xe1\x9d\x74\x19\x39\x26\xaa\x32\xc5\x79\x3e\xd7\x9b\x2d\x7b\x25\xed\x16\x49\x73\x91\x44\x18\x4a\xb3\x57\x16\x82\xe6\xcd\x3d\x2c\x21\xaf\xae\x92\x83\x0c\xcd\xf7\xe5\x12\xb9\x40\x5e\x67\xa6\x5f\x20\x81\xc3\xef\xb9\x3a\x08\x7e\x15\x4f\x6d\x04\x5d\xa7\xe4\x3b\x5d\xc6\x3f\xde\xb2\x7a\x5c\xbc\xad\xed\x81\xe4\x37\x67\x06\xa8\x7c\x64\x6b\x6f\x9e\xe5\xdf\x3d\x2d\x15\xc0\xf4\x8e\xc9\x1e\x76\x33\x14\x34\x88\xc7\x63\x4c\xe9\x3f\x1e\x72\xd1\x00\x44\x4d\x0c\xc9\xf4\x8a\x44\x0f\x49\x14\x95\x9c\xbc\xc9\x36\x9a\xf8\x57\xd2\x2b\xab\x5f\xa2\xdd\xf5\x83\x3a\xa0\x0b\x21\xa5\x4c\xed\xfc\xe2\x11\x52\x3c\x30\x2e\x48\xeb\x93\xf5\x69\x98\xe3\xba\x00\xa5\xfe\x98\x62\x0f\x3f\x00\x18\xa8\x24\x7d\x1a\x7e\x14\x27\xe1\x25\x95\x55\x38\xc7\xb0\x02\xe4\x57\xa9\xb9\x9c\x2f\x59\x0e\x9a\xb1\x56\xcb\xc9\x35\x77\xe9\x59\xb1\x7c\x33\x18\xe1\xc9\xdd\xe0\xda\x05\x4e\xa6\x32\x07\x8b\xe9\xa1\x04\xcf\x0a\x82\x26\x65\xbc\xcd\x93\x36\xd2\x53\x0c\x15\xb1\xf8\x5b\x5d\x0c\x1b\xc4\xd1\x25\x4e\x32\x45\x86\xa5\xe9\xee\xb8\x31\x25\x58\x7c\x52\xeb\x3f\xb7\xdb\xea\x21\xad\xa2\x3a\xaf\x8a\x97\x25\xed\x61\xe6\xbb\x58\xa9\xa8\xcd\x3f\xd6\x09\xef\x26\x19\x1f\xcd\x4e\xd4\x8f\x44\x16\xab\x69\x9c\xa6\x61\x7f\x8c\xdd\x2b\xd6\xd2\xd4\x62\xce\x4d\xf9\x40\x99\xf6\xa0\xf4\x1b\x3f\x81\xff\x69\x40\x41\x42\x7d\x4e\x56\x70\x57\xfa\x9d\x3b\x3c\x59\x2b\x7d\xc6\x37\x5d\xd5\x2f\xca\x5a\x4c\xf3\x94\xb2\x17\x22\xcb\xb8\x0b\xff\xce\x29\x28\x56\x65\xd7\x74\xe7\xb2\xd7\x60\x22\xbc\x6e\x99\x60\x2f\x2c\xe4\x7a\xf5\xe8\xfc\xbe\x77\xbc\x66\xaf\x20\xb1\xf0\x96\xbd\x84\x58\x38\x12\x50\xfa\x6e\xe5\x60\x8a\xa3\xe3\xe3\x0f\x46\xb5\xf2\xce\x64\xf2\xf4\xdb\x05\xaf\x49\x78\xbd\x17\xa9\xe5\x4a\x9b\x1e\xd1\x55\x9c\x2e\xb6\x8c\x91\x73\xdd\x98\xac\x44\xf3\x0d\x74\x70\x13\x72\xa8\x73\x03\xe7\x06\xb6\xdc\x2b\x03\x76\x05\xf8\x1d\x0e\x43\x7d\x8d\x17\xc0\x81\x2c\x60\x29\x4d\x01\x06\xe9\xe3\x70\xee\x45\x99\x63\x1c\xc5\xf4\x8d\xc6\x00\x59\xd2\x7e\x5c\xc4\x3d\xca\x2e\x69\x8a\xbc\xb8\xa6\x63\x6b\x7b\x09\xbd\x78\x61\xf7\xad\xb0\x96\x5f\xc9\x62\x9a\x6f\xc8\xe5\xca\x31\xa7\x96\x83\x54\x9d\x84\xc9\x2b\xca\xc4\x29\xc6\xc6\x65\x55\x95\x97\x40\x5f\xbf\x52\x72\xcd\xeb\xac\xf0\x49\xbc\xe1\xc7\x5e\x43\x47\x63\x95\x93\x28\x95\xcd\xbb\xd7\xa0\xed\xc0\xd5\x86\xf8\x69\xbf\xdd\x60\x3d\xb7\x11\xa7\x0d\x34\x2b\x2e\x72\x19\xc3\xee\xa5\x0e\x62\xf1\x75\x87\x58\x75\xbe\x7b\xc9\x45\xbc\x99\xe5\x41\x3c\x99\xfa\x19\x6c\x2f\x65\x97\xa1\xbc\x2d\x68\x9b\x98\x24\xfe\x94\xdd\x13\x6d\xcb\xef\x2e\xc8\x3d\x94\xe1\x60\x4c\xdb\x3e\xe6\xe4\xed\x20\x64\x89\xba\x5c\xbc\x51\xa1\x6f\x51\xbc\x34\xf7\x9d\xa3\x96\x91\x23\x2d\x29\x4b\x30\xff\x62\x0b\xd4\x48\xc4\x5d\xad\x02\x79\x67\x3b\xc6\x42\x7f\xcd\x43\x2c\x29\xee\x54\xb5\x5c\x49\xd1\x6a\x0c\xed\xfd\x69\xed\xba\xdd\xec\xd4\x3b\x83\x35\x48\x6c\xd0\x69\x77\x5a\xed\x61\x7b\x78\x56\xe5\xaa\x78\x00\xcd\x1f\xf2\x7e\x38\xce\x91\x25\x50\x70\x8e\x85\xe3\xf0\x25\xea\xe6\x8c\x8c\x86\xb5\x59\x7c\xcf\x2b\x5a\x63\xb2\xbf\xd2\xa2\xc2\x23\x5f\x27\x39\x9d\xde\x79\xc9\xa8\x31\x1b\xf8\x82\xbe\xc3\x1a\x7e\xd8\x00\x0e\xa6\x30\xaa\x2d\xbd\xa9\x9f\xa4\xb8\xa2\x2c\xd4\x82\x8b\xc9\x24\x55\x14\x3f\x79\x35\xab\x57\x02\x29\x8e\x68\x0c\xaf\x39\x8b\x8e\x12\x86\x81\x4c\x91\x7a\xb5\x08\x22\xbf\x8c\x93\x0e\xc3\x2c\x29\x84\x01\xee\x04\xa7\x19\xb5\x6d\xf0\xc7\x96\x05\xaa\xc1\x3c\xad\x9d\xa1\x8d\x0d\x94\xaf\x3d\xf4\xd3\x4f\x7a\xbb\xa7\x75\x56\x86\xaf\x49\x97\x0a\x6a\xfb\x9a\x5e\x60\x98\x2d\x23\x95\xc3\x18\x8b\x5f\x6b\x91\x99\xf2\x34\x3c\xd4\xaa\x16\x58\xd7\xc5\x97\xec\x88\x0e\x57\x41\x39\x0c\xb3\xbc\x01\x7f\x0a\x0d\xd4\xf4\x5b\x6b\xa3\xb8\x72\xab\x53\xef\x94\x63\x14\xd6\xa3\x91\xe3\x18\xe4\x49\xa7\x13\x55\x34\x2f\xbc\x2b\xe2\x8b\xf0\x2a\xf1\xa7\x53\x90\x23\xfd\x8c\x35\x2f\xab\x4c\x90\x4f\x76\xfa\x54\xf2\x4a\x2b\x5c\xbd\x8a\xab\x8f\xe1\xca\x96\x3b\xfc\xd8\x3e\x95\x75\x20\xb9\xf3\x65\x8f\x10\x7a\xb8\x8c\x5f\x24\xd5\x73\x1d\x81\xdc\x5b\xd6\x59\xea\x10\x1a\x05\x94\x6a\xc4\x01\x23\xbf\xd8\xb1\x1c\x9c\x8a\x42\x44\xe9\xde\x8b\x80\x50\xd7\x10\xd5\xa4\x89\x2d\x0c\x2a\xc5\xae\x1d\xc8\xbc\x31\x6f\xba\xfb\x78\xa8\xe6\xca\x27\xcb\x51\xa7\xc0\xfb\x9c\x35\x4d\x6d\x50\xd8\xef\xdc\xef\xfc\x6f\x12\xc3\xc5\xbe\x85\x6d\xfe\xb5\x1b\x18\x59\x96\x76\x8d\x8a\xb9\xac\x84\x7f\xa5\xa9\x8d\x50\x5c\x2d\x1d\xa7\xb0\xc7\x6b\x30\x0f\x52\xa3\xab\x13\xbe\x69\xe3\x9e\x58\x6d\x0e\x69\xa0\x40\xd9\x61\x71\x8e\x75\x7b\xb1\xde\x2d\x84\xce\x42\xd1\x73\xb6\x6d\xf6\xeb\x52\x74\x83\x38\x77\x3e\xb1\x05\x40\xb3\xfa\xac\x1a\x62\x49\xee\x99\x21\x02\x24\xb0\xce\xde\x45\x32\xe9\x41\xff\x72\x98\x70\x05\x6c\x40\x61\xf6\x46\x84\xe3\x0a\xc7\x5c\xd7\x7e\x54\x7e\x3b\x2d\xda\xb4\x95\xfd\xd5\x2c\xc8\x55\x8b\x96\x4f\x84\xac\x44\xdf\x56\xc2\x4b\x4b\x11\x49\x47\xc8\xe8\xc5\x2c\x43\xb5\x82\x39\x20\xb8\x10\x35\x8b\x09\x7d\x60\x5e\x92\xbd\xb2\x14\x96\x74\x81\xba\x85\xb5\xa5\xb4\xa4\x17\x24\xa4\x37\xb4\x1c\xd7\x6e\x4b\x1f\x5b\xd8\x3d\x74\x2a\x26\x4e\x28\xbe\xe4\x6b\x19\xf4\x68\xdb\x93\x4c\x00\x62\x87\xd2\x2e\x9a\xa4\x47\xc8\xed\xfd\x77\xdc\xa7\xb4\x00\x2d\x22\xd2\xf1\x37\xd8\x9b\xf2\xa8\xca\xf3\xd9\x34\xf7\x9e\xb7\xb0\x69\x4e\x76\x2c\x8c\x82\xe4\x51\x7f\x67\x96\xfd\xd0\x28\xea\xfb\xd2\x03\x6e\x29\xce\xd8\x05\x8e\x08\x03\xdf\x60\x57\x61\x1a\x07\x49\xb5\x20\x2f\x26\x0d\xb0\xbc\x53\xb0\xdb\x6f\x38\xbf\xca\xc8\xe7\xdc\xc4\xd6\x1c\xe3\x14\xe6\x86\x21\x4f\x9e\xb2\x89\x29\x51\x17\xe9\xb0\xe4\x7b\x93\xc4\x64\x14\x85\x8f\x75\x9b\x10\x4d\x2c\xac\x8d\xb1\xb2\x35\x7d\xac\xd4\xfb\x17\xd0\x31\xf9\x69\x3a\x9b\xe0\x40\xbd\x4f\xf4\xc7\x09\xf6\x83\x1b\x69\xbf\x53\x0e\x64\xb3\x88\xa6\xad\x2c\x11\xd1\x6c\x31\xb6\x67\xe7\x5f\x0b\x1d\x9a\x08\xe3\x02\x13\xf5\x24\xc5\x0b\xf3\x7a\xb7\xbe\x68\x16\x2d\x0a\xeb\x2f\x94\xb8\x0d\x92\xa7\x2a\xa4\x03\x4e\x05\x48\x10\xbf\x9d\x07\x9c\x1b\x3a\x25\x79\xf5\xb0\xca\xb6\x54\xde\x2c\x76\x8d\xbc\x08\xe7\x84\xb0\xe1\x36\x21\x94\x3d\x99\x4b\x55\xbf\xd8\x40\x85\xda\x51\x06\xad\x40\x29\x6a\x68\x26\xac\x37\x24\xef\xed\x26\x12\xf3\xae\x4c\x3e\x07\x43\xb8\x2f\xa1\xff\x2d\xbe\x2c\x99\x67\x85\x61\x5e\x98\xbc\xa7\xd0\x49\x2b\xe5\xee\x49\xb6\x08\x78\xb8\xd3\x27\x8d\x91\xb5\xbc\xf7\x0b\x57\x18\x4c\x59\xbc\xa0\xf2\xea\x58\x5e\x83\x59\x5e\xb0\x07\x90\x53\x48\x33\x00\xb8\xd8\x2b\x24\x0f\x54\x8e\xa9\x6d\x45\x18\x31\x4b\x5e\x66\x07\xc0\x4c\x66\x2e\x70\x04\xc6\xbc\xc5\xd0\x44\x94\x72\x07\x30\x1a\x3a\xbb\x18\x96\xa9\x33\x00\x15\x96\x24\x24\x6d\xa2\x4e\x0b\x4c\x8e\xe1\x03\xb7\x9f\xdd\x1b\xa2\x78\x12\x12\x19\xc1\x43\x3e\xfd\x74\x15\x8e\xc7\xa8\x8f\x45\x83\x01\x4a\xfc\x28\x88\x27\xe3\x9b\x07\x3a\xdc\x53\xab\x09\x36\x4c\x1e\xda\xfb\xc5\x83\x29\x25\x8d\x7f\x03\x2e\x44\x27\x39\x30\x59\x90\x44\x8d\x2b\xf8\x1a\x0f\x66\x19\xae\xbc\xe0\xd1\xa8\x5e\x78\x2c\x71\x87\xc7\xcc\xb7\x1c\x62\xd1\x03\x41\xf7\xd0\x0b\x32\x1c\xe4\xff\x5f\xb8\xcf\xcc\x14\x8c\xcc\xdd\x38\x35\x7b\x9c\x44\x3d\x46\x5d\x54\xb1\x69\x37\xea\xa7\xd3\xcc\x66\xd9\xa1\xa8\xfe\xc1\x79\x95\x64\x28\x91\x29\x9c\x4a\xa7\xb5\x6a\xa4\x35\xb7\xb8\xd5\xd1\xa5\x2d\xad\x6b\x53\x5a\xa1\xf1\x66\x69\xe2\x81\x5c\x81\x2b\x62\xdc\xe5\x69\x90\xd9\x42\xba\xad\xae\xb0\x44\xde\xd2\x78\x00\xfe\xd6\x80\xb5\x84\x36\xb3\x62\x0c\xc0\x6e\xda\x50\x93\x8b\x64\xd0\x4c\x41\xce\x93\xc9\xf2\x31\x47\x2f\x4d\x7d\xb6\x92\x1a\x3a\x4f\xe1\x6c\x77\x96\x3a\x62\xa2\xd4\x82\x87\xf1\xfc\x48\x2d\xa4\xe8\xbb\x69\xb5\x6d\x9a\x01\x45\xc5\x1d\x30\xbe\xcc\x59\x9e\xc6\x92\x3d\x01\xcb\x21\x7e\xdd\x5d\x1f\x6e\x89\x12\x27\x14\xe2\xf6\x6f\x36\x0d\xd7\x23\xea\xc7\xdf\x6f\xed\xdc\x22\xb2\x7d\x72\x0b\x4a\xdb\x2e\x9c\x4b\x79\x9c\xd9\x16\x6f\x71\x0b\x69\xc5\x2d\x1d\x76\x3b\x3f\x7c\x0e\x86\x5d\x69\x7b\x96\x28\x64\x41\xf5\x38\x73\xa9\x5a\x64\x5f\xfe\x3e\xf4\xe5\x85\xd2\xc1\x77\xa0\x8e\xf8\x9b\xa8\xcd\x2d\x8b\xaf\x94\x26\xf9\x05\x1f\x6a\x57\x58\xd9\xc7\x6f\xd8\x43\x7f\x3e\xb2\x06\x3b\xdf\x8e\xbe\x91\xc2\x41\xdb\x5d\xe3\xcc\xa5\xdc\xb5\xc9\x2e\x04\x3c\x11\x5b\xb8\xb8\x22\x61\x4f\x87\x57\xc8\x18\xec\x99\x6e\x7b\x2e\xef\x4e\x2a\xc6\xd2\xbe\x19\x5d\x5a\x81\x2d\x56\xc1\x60\xc5\x1a\x92\xc0\xa9\x98\x57\xf4\x25\xee\xeb\x0c\x39\x00\x84\x31\x3f\x6a\xfb\x92\x1e\xdf\x40\x63\x3f\xbc\xa6\xc9\x40\xa0\x82\x75\x48\xa5\xb3\x35\x35\xcc\x54\xa0\xbb\xf4\x26\xd6\x13\xdf\x3d\xf4\xc1\x7f\x01\x3f\x7e\x60\x05\xf1\xf7\xce\x98\xbf\x47\x3d\xb1\x8d\x19\x2e\xaa\x28\xbe\x17\x63\x7c\x70\x14\x4d\x45\xf1\x43\x31\xee\x92\x7a\xe2\x6f\xce\xbb\xbf\xb9\xb2\xf8\xdb\x6f\x15\x9e\x62\xdb\xe3\x38\xa1\x3d\xdc\xde\x51\x4a\x1f\xee\xbe\xbf\xb0\x6d\x1d\xf2\xf8\x96\xdc\x3d\x8a\x14\xe4\xb9\x2a\x4f\x64\xba\x94\x53\x5a\xb2\xfc\x95\xb7\x67\x5e\xbb\xf9\xbd\x26\xa5\x7c\xf0\x1c\x94\x8b\xe6\x9e\x54\x72\x4e\x1a\x88\x99\xe9\x27\xb5\xb4\x93\xbc\xa2\x23\xf1\x24\xe8\x47\x73\xe0\xe2\xa7\x9a\x7c\x72\xdf\xcf\x46\x1e\xb2\xa4\xa0\xcc\x8f\xd7\x1f\xe2\x81\x3f\x46\xd3\x78\x7c\x33\x0c\xc7\x28\x1e\x22\xba\x69\xb1\x53\xbc\xe5\xc8\xcb\x62\xdb\x6f\xa8\x05\xb5\x86\x15\xc6\x24\x5e\xef\x90\xf7\xb7\xaf\xcd\xd8\x41\x92\xad\x65\xff\x0f\x83\xa9\x81\x8d\xe0\xac\x4f\x66\x50\x27\xe2\x9d\x95\x69\x12\x67\x31\xf9\x84\x36\xc8\xe9\x43\x2f\xc0\xea\xa1\x0d\x14\xe1\x2b\x82\x40\x31\x84\x68\x36\x1e\x3b\x16\x8a\xc0\x20\x5f\x26\x52\xbc\x23\x5b\x24\x4f\x3e\x27\xc5\x4a\x6e\xa7\x62\xfb\x43\xd8\x4f\xfc\xe4\x66\x9e\x8e\x5c\xca\x0f\xea\x04\x05\xd9\x42\x99\xd6\x93\x08\x17\xbc\xcb\xfe\x18\x85\xd1\x08\x27\xa1\x12\xc0\x55\x89\xe8\xa0\xe7\x19\x35\x23\x8c\x9a\xd3\x59\x22\xec\x1f\x8f\x31\x0c\xee\x71\xc2\xcf\x60\xe4\x67\x1c\x21\x16\xca\x83\x8a\x41\xc6\xa9\x12\xa1\xa2\x38\x80\x5c\xee\x8a\x2f\x71\x92\x84\x01\x4e\xd1\x21\x55\x88\x84\x38\xa5\x0c\x7c\x7a\x83\xc2\x88\x65\x33\xce\x11\x28\xd1\x82\x9e\xab\xe1\x64\x51\x00\x86\xcc\xe5\x28\xb7\x48\xd4\x40\x32\x51\xfb\x37\x27\x94\x84\x15\xe9\xa6\xc0\x24\x51\xf6\x17\x0b\xf1\x38\xe8\xa2\x17\x90\x29\xeb\x85\x6e\x38\x62\x6f\x93\xfc\x4d\x70\x36\x8a\x83\x42\x1f\x79\xa9\xb4\x1e\x23\xdf\xe6\x78\x86\x90\x19\xce\x90\xa2\xaf\x18\x64\xf3\x79\x75\x06\x31\x9c\xfa\x57\x91\xf9\x45\x62\x24\x44\x58\xc8\xd3\xea\xb9\xcc\x89\x37\x67\x17\x13\x1c\x59\x4c\x87\xc9\x8e\x52\x8c\x05\xca\x99\x0f\x3b\x77\xe5\xe5\xad\xe9\x1f\xac\x08\x30\x33\x29\xee\xfa\x15\x0a\xc7\xd2\xc4\x8e\xd3\x0f\xbc\xc9\x91\x9f\x1e\x5c\x45\x8c\xec\x6f\x2a\x2f\x48\xcd\x17\x55\xe1\xf3\x44\x1e\x61\x13\xe4\xe5\xc9\x8b\xb9\xfd\xa0\xb5\x0a\xa7\xdb\x52\xeb\xff\x49\x67\x53\x22\x6a\x45\x61\xb6\xe2\x13\xe1\x94\x6d\x7d\x7e\x72\x31\x23\xa3\x6b\x1d\x0f\x64\xc9\xa0\x50\x30\x4e\xb9\xc7\x6d\xf2\x22\x45\x39\x47\x0f\xa9\x52\x98\x4f\x3a\x5d\xa5\x26\x04\xb9\x83\xca\x7e\xe0\xd8\x76\x10\x57\x8c\x0f\x71\x82\xa3\x01\x69\x00\xc6\x79\xaa\xaf\x57\x63\x18\x98\x5c\x6c\x03\xe8\xdc\x67\x90\x2d\x35\x86\x8d\xa9\x6e\xc3\x4a\x49\x65\xa6\x49\x55\xde\xb3\x88\x8e\x03\x4c\x20\x5d\xb5\x66\x08\xd4\x4d\x3e\x1f\x79\x06\x9b\x4a\x55\x5c\xc3\x11\x51\x1a\x42\xca\x01\x90\x4a\xf5\xef\xcc\x2b\x79\xc4\x72\xb4\xc1\xd8\x26\xbf\xb3\x98\xcb\x8b\x68\xb9\x62\x8e\x67\x36\x02\x4b\x2e\x8f\x93\x6d\xae\x5c\x1e\x41\x5d\x5a\x23\xfc\x9d\xba\x4e\x9c\x54\xc3\x8b\xdf\x85\x6c\x8a\xdc\xd5\x1d\x73\x85\x0e\x18\x33\x63\x49\x02\x80\xa4\xc0\x84\x3e\x08\x50\x1a\x4f\x30\x4d\x3d\x85\xae\x46\x38\x42\x37\xf1\x2c\x11\x66\xf6\x3e\x11\x67\x29\xf0\x07\x8e\x9d\x7b\xdf\x5d\x50\x77\x74\x2e\xda\xcb\x10\x65\x00\x2b\x2b\xe6\xc8\x88\xa1\xbf\xe3\x76\x37\x17\x8d\x52\x73\xda\x8b\xa7\x44\xd8\x99\xe6\x72\x0f\x93\x77\xee\x21\x4e\x49\xc0\x40\xc3\xa4\xc8\x54\x13\xd0\x44\x3e\xf0\x94\xb2\xd5\x49\xf7\xcf\xb2\xf2\xcb\x1d\xc7\x1d\x1a\x51\x2e\xb1\x45\xff\xac\x6b\x5c\x44\x3c\xe4\x97\x6d\x1f\xfd\x09\x18\x4d\xcc\xa9\x87\xd8\x56\x9d\x17\xd3\x37\x6b\x19\x60\xb5\x70\x8b\x25\xd3\x79\x2a\x17\x3f\x43\x1b\x52\xfb\xea\xa7\x05\x52\x17\x39\x36\xd9\x6d\x74\x15\x47\x2f\x32\x2a\x3f\x73\x77\x47\x29\x78\xe1\x38\x8e\xa7\xc8\xef\xc7\x97\x96\x6d\xb0\xb8\xcb\x2f\x38\xb4\x17\xee\x0e\x03\x17\x15\xad\xca\xfd\x14\x6f\x4b\xe4\xd5\x2a\xb5\x78\xc4\xe1\x04\x7a\x0a\xf6\x2f\x8b\xac\x1b\xdb\xc6\x37\x18\xc7\x11\x7e\x04\x8e\x07\x70\xd1\x46\xbe\x87\xc0\x8b\x12\x3b\x19\x29\x36\x77\x23\x93\x73\x91\xa8\xc2\x11\xe7\xa7\x56\x7b\x32\xfb\x19\xd9\x7a\xbb\x1f\x21\x1f\x3c\x6f\xb5\x58\x84\x85\x91\x85\x8c\x38\xef\xc5\x20\x6c\xe1\x69\x84\xf1\x83\x1a\x0e\x31\x0d\x2f\xa2\x70\x18\x0e\xfc\x28\x63\x01\x25\x43\xda\x7b\x00\x49\xdb\xb1\x1d\x93\x7f\x95\x3c\x88\xe9\x59\x59\x7e\xf3\x00\x61\x63\xcc\xe6\x75\xb2\x70\x84\xc1\x97\x4d\xaf\xe6\x8c\x35\xb2\x9a\x85\x89\x91\xd2\x6e\x30\xe6\x0e\x1a\x7e\xb0\x54\x2f\xb2\x7f\xb6\xb2\xb1\x1b\xb6\x30\x0e\xed\x7f\x79\x00\xa7\xb5\xeb\x5a\xad\x56\xaf\x35\x6a\x4d\x0f\xd5\xae\x6b\xad\x5a\xbb\xd6\xa9\xad\x9d\x3d\x1a\x60\x0f\x75\x4a\x87\x5e\x61\xe1\xeb\xf8\x8c\x18\x2b\xf6\x8a\x39\x04\xc3\x72\xe5\x0f\xf4\xbf\x5f\xbf\x42\xcc\x5e\x4d\xd4\x18\xa2\x8a\x98\xde\x1f\x36\x2c\x8a\x42\xf9\x0f\xa0\x4a\x46\x43\xfc\x67\x69\x63\x52\x1d\x00\x25\x8f\x31\x8e\x2e\xb2\x11\x35\x3d\x72\x72\x91\xf2\x31\x63\xf2\x85\xb2\x58\xa4\x98\xed\x68\x10\x07\x84\xde\x31\xfd\xa1\x93\x3b\xbc\x2e\x8e\xfd\x29\x08\x00\x47\x83\x95\x5d\x7c\xed\x6e\x73\x5e\x00\x99\x52\xab\x7d\xe1\xe0\x2e\x39\xb1\x96\x88\xec\x62\x89\x6b\x30\x2f\xac\x8b\xa5\x8a\x32\x24\x9f\xb2\xe1\xfa\x42\xd1\x5c\xd8\x54\x38\x63\xb9\xf0\xa9\xfa\xfa\x15\xed\xe2\xeb\xc2\xf0\x2d\x73\x08\x68\xe0\x67\x38\x62\x7b\xbe\x4a\x41\x0e\xe6\xef\x26\x24\xe9\x1e\x36\x1f\xf0\x13\xc6\x0d\x25\xca\x84\x34\xbf\x8b\xde\xeb\x96\xc5\xa5\x0c\x6d\x08\xec\xea\x3c\x7e\x86\x78\xd3\x70\xa7\x34\x83\x92\x3a\x53\xa2\x81\x9d\x17\x0b\x47\x42\x06\xf6\x57\x83\x61\x59\x7c\x15\xb3\x91\x2f\x42\x1d\xe4\x24\xe6\x2e\x1d\xa6\xc7\x39\x8f\x51\x78\x8e\x03\xf8\xb1\xca\x92\x28\xfc\xbc\x8e\xd1\xa9\xde\xd8\x9f\x4c\x11\xbe\x86\x48\x92\xfd\x50\xef\x1c\xbd\x57\x25\x65\xcc\xdb\x06\x7a\x9f\x3a\xb0\x05\x49\x51\x10\xff\x97\x23\x50\x3a\xd4\x27\x22\x69\x84\x61\xab\x45\x7e\x86\x7c\x94\x85\x13\x8b\xc4\x6d\x0b\xc9\x2e\x77\xd7\x9d\x14\x42\x1e\x1c\x52\x14\x6d\x10\xf4\xd8\x2c\x9c\x86\x3c\x2a\x36\xf9\x4f\xa5\xd1\x42\xcb\xa8\x12\x52\x8c\x5f\xa2\xf5\x6a\x55\x44\xcb\x76\x4a\xf1\x14\x8e\xda\xe3\x25\x14\x8a\x70\xdb\x5f\x37\xf2\xa6\xdf\xbc\xe1\x6d\x58\xca\x8b\x46\x4b\x08\xfe\xce\x6d\x49\x1e\x53\xba\xb8\xee\x35\xa6\xee\x28\xf7\x65\xbb\xbf\x81\xcc\xc1\x2e\x93\x31\xd8\xa4\x42\xb1\xd9\x2e\x6d\xa8\x68\xda\x72\xac\xf8\x61\xe4\xf7\xf5\x93\x87\x74\x00\x28\xcb\x4e\x69\x0c\x0e\x22\x04\x2a\x82\x61\x98\xdd\x57\x14\xcc\x17\xa7\x58\x5d\x0e\x26\x45\x3e\x97\x0d\xdd\x6b\x61\x4d\xa6\x1c\x65\x8b\x8b\xe4\x64\x32\x76\x86\x61\x11\xd5\x4e\x05\x0c\x1e\x67\x7e\x03\x96\x0e\xfd\x03\xd2\x6f\x36\x08\xe9\xa7\x0a\x5f\xb0\x10\xbc\x22\x4a\x6d\xa0\x7d\x3f\x1b\xad\x0c\x70\x38\xce\x6b\xae\xa2\x05\x22\x12\xd9\xcf\xbf\xa5\x76\x1e\x87\x39\x92\x71\xfc\xbd\xab\xdd\x27\x3b\xee\xca\xb4\x60\x9c\x77\x55\x5a\x98\x77\xce\x95\xc1\xc2\x49\x8d\xe2\x2a\x47\x3f\x37\x4f\xce\x2b\x26\x8d\x30\xf3\xfb\x9a\xd3\xa4\x8e\xd4\x5b\x7c\x0a\x24\xb1\x61\x18\x8e\xc7\x3c\xec\x2c\x73\x93\x80\xf3\xd6\x7c\xa1\x84\x1f\xe6\x22\xdb\xa1\x57\x06\xe5\x74\xf1\x29\x35\xcb\x0c\x52\x29\x42\x79\x28\xe3\xb3\x12\x47\x30\xe6\x0a\x52\x77\x9f\xb4\x68\x09\x99\x4c\x22\xfb\x11\x4b\x66\x0f\xe6\x81\x8a\x7c\x4d\xd4\x1b\xf2\xc9\xf9\x95\x3b\xca\xfc\xf9\x15\xda\x20\xff\x3a\x12\xa8\x4d\xce\xbf\x90\x6d\xe6\xba\xe9\x07\xb8\xb3\xde\xd7\xc3\xaf\x8b\x62\x7e\xfa\x19\xc9\x9c\xa3\xe0\x9e\xa0\xc4\xdd\x1d\x6d\xb5\x52\xbb\x7e\x55\xeb\xbc\x42\x2f\x49\x17\xbe\xc0\x9e\xbe\xb3\xb3\xb3\x53\x45\x4b\xf4\xc5\xcf\x3f\xa3\xda\x75\xbd\x06\xdb\x3d\x41\xc0\xb1\xdd\xd3\x2e\x56\x6a\xd7\xad\x4e\xbb\x46\x81\x5d\xe9\xc0\xae\xca\x02\x83\xe1\xc5\xe9\x0c\x3c\x7d\x2a\x80\xc6\x9b\x37\xb4\x26\x5a\x42\x30\xd2\x85\xf5\x59\xdd\xd5\x0d\xa8\xc3\xfe\x8a\xcb\x2e\x6d\xa0\xda\x4a\xdb\x59\x06\xc6\x94\x15\x7d\x49\xed\x6d\x38\xb5\x55\xd1\xcf\x68\xa5\x8d\xfe\x03\xd5\x51\x17\x2d\xd7\xcb\x88\x28\x06\xe7\x50\xc5\x0d\x0f\x25\x03\x7f\x30\xc2\x2c\xbb\xce\x7c\x81\x83\xd4\x3c\x27\xf4\x98\x54\x2a\xb4\x2a\x39\x2a\x29\x48\x92\xdd\x44\x1a\x0c\xfb\x15\x13\xad\xba\x81\xce\x93\x0a\x2d\x0f\x04\xb9\xd6\x5f\xb3\xf4\xe9\x2a\xcf\xe1\x53\x11\xe5\x73\xf8\xe8\x2b\xaa\x95\x0c\x6b\x1e\xe1\x2b\xc9\xd9\x09\x6e\x1d\x99\x02\x24\xe2\xe9\x7b\x9e\x69\x23\x69\x77\x3e\x65\x47\xfb\x79\x86\x34\x38\x1a\x80\x21\x0d\xfd\xaf\xdd\x90\x66\x17\x5f\x9b\x9a\x00\x1b\x38\x52\x70\x83\x02\x5d\xa1\xbf\xcb\xc5\xdf\xd4\xd5\x17\x23\x7c\x5d\x5a\x85\x51\xe2\xe4\xb9\x60\x54\xcd\x52\xad\x3f\x14\x23\x1f\xe1\x6b\x33\x84\x26\x1b\x3f\xe9\x68\x3f\x3f\x91\x90\x35\x70\xe6\x5d\x8f\xa9\x57\xa5\x4f\x9e\xe9\xa2\xc7\x48\x3a\xeb\x26\xa0\x11\xbe\xee\x8d\xfc\xa4\x74\x9e\xad\x74\xee\x81\x0e\x72\xa4\x85\xf4\x20\x77\x75\xcf\x43\x1c\xc7\x8e\xad\x71\x00\x4b\x80\xb4\xaa\xb9\xda\xa7\xde\xa9\xda\xf8\x9d\xad\x2a\x69\xa7\x36\x2c\xae\xeb\x60\x10\x02\xdc\x1f\x71\x18\x55\x5e\xbc\xb8\x43\xc4\x4d\x89\xc2\xe9\x7a\x5b\x44\xd3\xc3\x57\x0a\x25\xdc\xf2\x0b\xc6\x21\x3c\xfd\xf5\x52\x13\x5f\x6c\xd4\x66\x5b\xac\xc7\xf2\x91\x32\x69\x95\xc5\x12\xa5\xd0\x3a\x1f\xf8\xd1\x85\x3e\xb2\xa3\xcc\x22\xab\xe6\x6a\x91\xd4\x74\x72\xa3\x6c\x0b\x6d\x14\xe4\xc7\xa4\xab\xa5\x09\x9a\x09\xe8\xf4\x5e\x94\xb1\xce\xae\xa4\xb3\x7e\x9a\x25\x95\xd0\x43\x8d\xaa\x07\x49\xf8\x72\x95\x05\x59\x51\xeb\x55\x9b\x03\xee\xc2\x7b\x9e\x32\x4c\xab\xa8\x51\xd6\x7d\xf6\x83\x9f\x85\x51\xbd\xdc\xa6\xc5\xca\xf2\x7d\x4b\x3c\xde\x6d\xeb\x62\xd5\xff\xba\xdd\xab\x2c\x02\x0f\xb5\xa6\xc6\xd0\x9e\x7d\x0f\xa3\xb8\xfc\x8f\xda\xc6\xe8\x70\x7c\xc7\x3b\x99\x84\x20\xdd\x91\xe8\xd4\xad\x0c\x93\x78\x42\xde\xf6\xe2\x00\xc3\x26\x55\x76\x43\x92\x01\xde\x63\x4f\x52\xe8\xf6\xee\xdb\x92\x20\xc7\x85\x16\xc3\x77\xbd\x39\xb1\x55\x44\xf7\x27\x79\xb9\x95\xdf\xa2\x44\xad\xc5\x76\x29\x51\x4d\x6c\x54\xe2\xcd\x63\xef\x55\x5a\xd3\xf3\x72\x39\x87\x92\x16\x3d\xef\xed\xca\x80\x11\xf4\x66\x56\x09\xf9\x9a\xd0\xb7\x2a\xbb\x6e\x71\xe1\xad\x4a\x43\xb8\xec\x4e\xf5\xe9\x64\x67\x79\xbd\xdc\x46\xf5\x29\x1b\xae\x8b\x6d\x8a\x3d\xdc\x6d\x93\xa2\x8d\xfe\x75\x7b\x54\xc9\xf6\x1f\x6a\x65\xcd\xb2\xe1\xba\x7d\x83\x22\xa3\xf8\x98\xdb\x53\x96\xdc\x14\x18\x18\x05\x98\x1c\xd1\x3f\x1d\xed\xf5\xb8\xa7\x53\x05\xa7\x03\x7f\x8a\x2b\x05\x1b\xa7\xc9\x96\xd1\xc0\xcf\x06\x23\x54\x31\xd3\x47\x03\x0a\xa3\x24\xbe\x02\xba\x85\x8c\x2b\x95\x17\xfb\xfe\x78\x18\x27\x13\x1c\xb0\x69\x08\xfc\xcc\x37\x53\xd0\x2d\xce\xc0\xe5\x49\xbd\x3b\xff\x66\x73\xb5\x08\x99\x7c\xd7\xcc\x1b\x28\x8c\xb2\xee\x9c\x0c\xcb\x33\x6e\x56\xc7\x65\x0c\xa0\x6c\x0d\xb3\x88\x51\x0f\xb5\x10\x50\xe8\x8a\xc3\xa9\x96\x0e\x40\x23\x52\xf0\x42\x2e\x4c\x1c\xb0\x6c\x66\x92\x17\xba\x33\x13\xaf\x64\x27\x7b\x23\xa5\x44\x9b\xcc\xd2\x0c\xf5\x31\x0a\xc9\x88\x4e\x70\x94\xd1\x3c\x6b\x3e\x5c\xaf\x27\x38\x13\x1e\x0b\xa5\x72\xfb\x6a\x79\x3a\x55\xe5\x3e\xcd\x71\x48\x5d\xab\xf2\x04\xf1\x9f\xf1\x34\x43\xb3\x68\xca\x93\x06\xaa\xd9\x41\x25\x9b\x96\x9a\x85\xfb\xbe\x65\xe3\x00\x99\x06\x37\xc5\x28\x08\x2f\x31\xd7\xe7\x92\x66\x70\x90\xdd\x95\x59\xf3\x68\x23\xfd\x82\x25\xd1\x66\x49\x4c\xb3\x18\x85\x59\xca\xbd\x62\x10\xa1\xe0\xfb\xde\x31\xf5\xad\xc8\xd3\x84\xb8\xee\x4b\xa6\x52\x59\x77\x99\x79\x1f\x02\x2b\x65\x9b\xcd\x00\x64\xe0\x64\x9e\x8a\xda\xce\xaa\x33\x25\x5a\x3e\xda\xf2\x33\x9f\x0b\xeb\xb5\xb2\x92\xe6\x66\x10\xa4\xd0\x06\xcf\x0b\xee\x18\x69\x46\x0b\xe5\x37\x45\x11\x64\xc1\xc8\x3c\xce\x8c\x5d\x10\x5d\xf3\xcc\x09\x80\xf2\x4b\xea\x53\xe2\x4b\x16\x94\xd4\x9e\x18\x38\xde\xe3\x4c\xe6\x39\x45\xa7\xf2\xc2\xe4\xf7\xa5\xea\xcd\xdf\x1b\x59\xc9\x32\xc9\xcc\x4d\xf7\xfa\x3c\x1d\x9d\x1c\x50\x54\x1a\x20\x16\x4c\x54\x05\x25\xfb\x38\x03\x19\xcd\x89\x13\xc9\x68\x4d\x62\xca\x80\xe1\xfc\x48\x69\x9b\xd0\x35\x17\xf9\x72\x53\x22\x1b\x30\x83\x68\x97\x36\xd4\x24\xe9\x65\x29\x98\xe7\x3a\x4d\x91\x7f\xe9\x87\x63\x88\xd8\x45\xf9\x02\x30\x3b\x37\xd5\x9c\x48\xce\x2a\x61\x74\x19\x7f\xc6\xa9\x9e\x64\xb8\xc2\x92\x03\x7b\xe8\x6a\x14\x0e\x46\x56\x56\xdd\xbf\x29\x60\xd5\x66\xab\x7c\xa1\xf4\xe3\x78\x8c\xfd\xe8\x16\x05\xf1\xce\x78\x96\x8e\xd0\xaf\x23\x9c\xd1\x78\x26\x3c\x17\x2d\xb8\x6b\x4d\xfd\x04\x18\x05\x7b\x95\x73\x6d\xc1\xae\xef\x10\x0e\x44\x70\x7a\x18\xf1\xfb\x6f\xf3\x02\xe0\x16\x25\x24\xd7\x9a\xe1\xa9\x72\x5d\x71\x39\x16\x04\x63\xcf\x14\xac\xc6\x5a\xa5\x45\x95\xc5\x47\x07\x7c\x41\x9d\x09\x5b\x22\x39\x71\x5b\xb4\x25\xe4\x35\x37\x4e\x83\x91\x75\xa9\x55\xc8\x47\xc9\xd0\xcc\x45\xf7\xbc\x78\x2e\x2b\x6c\x68\x29\x99\x8b\x0a\x73\xe8\x79\x6d\x7b\x44\xbf\x5e\x3c\x8b\x32\x4e\x5f\x16\x66\x42\x80\x46\x34\x91\xf0\x11\xc4\x2d\xde\x50\xf1\x5f\xd5\x9a\x7c\x6d\xf2\x22\xd7\x90\x33\x0c\x8e\xe2\x59\x14\xa0\xd9\x94\x3a\x14\x0e\xc6\xb3\x00\x6b\x74\x6f\x56\xd3\x30\xca\x8d\x5c\xe4\x0f\xe5\x63\xdb\x0a\x2c\x82\xf8\x2a\x92\xf1\x88\xa3\xf1\x0d\x1a\xce\xc4\xa2\xb4\x44\xd2\x5f\x5d\x45\x63\x9c\x52\xa7\x4a\xbb\xac\x05\x7c\x23\xc1\x13\x3f\x8c\x54\xe1\xaa\x5c\xbf\x26\xfe\x75\x45\xe9\x17\x5c\x9c\xa2\x65\x5b\x66\x76\x6f\xfe\x95\xaa\x98\x73\xaa\x79\x70\x4d\x39\x50\x32\xc7\x43\x69\xfd\x25\x92\x08\xd0\x45\x4f\x40\x1b\x4e\x72\x22\x5f\xd5\x3e\x86\x51\x45\x6e\xf2\x25\x6a\x79\x0a\x9d\xd9\xcc\x27\x79\x06\x6f\x1b\x91\x10\xba\x93\x00\x16\xbb\x6d\x51\x3e\x4f\xd5\x2c\xec\xf7\x1b\x79\x04\xc4\xdb\x25\x69\x3d\x39\x8d\x26\x08\x66\x38\x21\xa7\x49\xb1\x31\x2c\xe7\x07\x04\x70\x86\xb4\x57\x64\xdc\x45\xdd\x83\x04\x57\xb1\xe5\xaa\x77\xcd\x31\x52\x52\x60\xe5\x0c\x1f\xa6\xdc\x2c\xaa\x70\x5f\x99\x85\xe9\xc9\xb0\xe4\x11\xb5\xa0\xa1\x70\x32\xb4\xbc\x21\xcf\xf4\x7c\xaa\xe4\xb1\x45\x8b\xb0\x75\x2b\x9c\x54\xfc\x3d\xb9\xe9\xfb\x1a\xbb\x95\xce\x42\x59\xe8\xe4\x75\x4f\x2b\x37\xc7\x6e\xf8\x17\x99\xbc\x9d\x1b\x1b\x62\x8e\x89\x75\xc6\x0a\x2d\xde\x54\x1e\x26\x4e\x9a\x8e\x4c\xf4\xfc\x0c\x3e\xf2\x53\xc8\x90\xeb\x3c\x71\xcf\x4d\x45\x9e\xb3\x6b\xd9\x07\x8a\x4e\x3a\x83\x4e\xc3\xae\xe1\x14\xc5\x91\x74\x14\xae\x77\x50\xa5\x5d\x6f\x80\x25\x6b\xd5\x72\x2c\xde\xa5\x95\xf9\x31\x58\x3c\xda\xcf\xc3\x0f\x12\xf5\xb5\x28\x03\x59\x61\xc0\xd4\x22\x57\x33\x3a\x08\x0b\xe4\x24\xbf\x6b\x74\x3b\xd2\x10\xa2\x21\x92\xe7\x05\xb9\x2b\x6d\x43\x22\xe6\x40\x09\xdd\x76\xbc\xbb\xd9\x68\x77\xec\x4e\x62\x45\xa9\xae\xef\x1c\x61\x8d\xc7\x56\x2b\x1f\x66\xed\x18\x8b\xf0\x1e\x6e\x0d\x81\xa9\x86\x98\x63\x89\x9d\x6b\x52\xf8\xc2\x79\x78\x95\x09\xa3\x97\x87\x50\x91\x00\xc2\xb2\x8a\x47\x2d\xe1\x58\x49\x00\x5a\x61\x5e\xa6\xd4\xa0\xef\xcd\x6c\x38\x2c\x1b\x33\xdf\x90\x8f\x16\x1b\xeb\x4f\xd3\x00\x58\x86\x3c\xd8\x34\x2d\x7f\xf9\x8c\x7d\xce\x08\xc2\x14\xb8\x1e\x47\xb8\xb4\x0b\x11\x65\x45\xcc\x7f\x68\xee\xf2\x5e\x60\xce\x67\x80\x57\xe5\x05\x43\xca\xa6\x4b\x51\x4b\xce\x57\x9d\xd0\x82\x32\xa1\x28\x63\xe0\x58\x8f\x0e\x8d\x04\x53\xd8\xa8\x10\x2c\xe4\xc1\xc6\x97\x08\xe9\x04\x5f\x1b\x28\xe9\x1c\x6b\x8a\xbf\x0f\xe6\x3b\xb1\xc3\x92\xdc\xa4\x02\x17\x27\x83\x44\x1f\x63\x40\xd9\xcf\x68\xbe\x78\x56\x33\x8f\x19\x8a\xc2\x14\xe1\xe1\x10\x0f\xb2\xf0\x12\x8f\x6f\x90\x8f\x02\x9c\x66\xc9\x0c\x9e\x3d\x90\xd3\x97\xe3\x68\x80\x4b\x45\x19\x2d\x49\xa1\x4a\xa2\x07\x40\x29\x0f\xc8\x0d\x25\x16\xd7\x5c\x90\x41\x78\xa0\x9d\x01\x6d\x70\x72\x14\xc9\x84\x1c\x6a\x09\x47\xe9\x22\x42\x2f\xa8\x36\x9f\xea\x79\xd1\x85\xe8\x7e\xc7\x32\xbe\xe6\x81\xa8\x18\x0c\x9a\xb7\x56\xe6\x09\xf0\x0b\x70\x56\x69\x84\x38\x93\xdd\x91\xe6\xc1\xba\x78\x48\x79\xd7\xe2\x91\x92\xdf\xb5\xeb\x8d\xd5\x66\xa3\x9c\x98\x9f\x32\x8d\x8f\x12\xff\xde\x67\x93\xf6\x42\x04\x4e\x0a\xa3\x0c\x27\x43\xc9\x5a\x18\x39\x57\x05\xe7\xaf\xac\xeb\x9c\x6a\xe9\x76\xcb\xe2\x23\xfa\x68\x84\xc7\x53\x9c\x10\xf1\xa7\xc4\x22\xd8\x61\xb8\x31\xdf\x60\x1d\xe5\x6f\x70\x8f\x47\x65\x26\xdd\xa9\x82\x76\x75\xe5\x9c\xf6\x6a\x17\xba\x54\xb1\x09\x5b\x6e\xfd\x9c\x5c\x55\x31\x1e\x04\xd0\xae\xfb\x3d\x63\x5d\xd8\x03\xe0\x22\xf5\xbc\xc8\x56\x22\x1c\x16\xd5\x2c\x62\x79\x86\x4b\x95\xc2\x17\x3f\x36\x5a\xe9\x89\xb0\xe4\xdd\xfd\xcd\xde\xc3\xd3\x13\x11\xa1\x79\x50\x0a\xd2\x02\xa3\xab\xbf\x05\x4d\xed\x4e\xfc\x41\x29\xba\x9a\xf8\x83\xfb\xd0\x96\xa8\x7e\x2f\xfa\xfa\x8c\xed\x2a\x24\x89\xbe\x7a\xe7\x80\x16\x99\x07\x4a\x64\xb4\x11\x5a\x77\x31\x62\x2b\x3c\xfe\x0a\x4d\xd2\x1c\x1f\x06\x82\x0d\x38\x31\xb0\x1f\xb9\x17\x03\xcf\xd4\x02\x21\x7d\xf7\xfd\x6c\x44\xc3\xfa\x3e\xe3\xef\xd9\x30\xbf\xce\x23\xfd\xde\x9e\x79\xed\xd6\xf7\x1a\xde\x97\x21\x53\xe1\xe1\x88\xab\x0f\x1e\xef\x97\x43\x5e\x34\xee\xaf\xc0\x50\x8e\xff\xeb\x0a\xfa\x2b\xbe\x43\xf0\x5f\x5b\x00\x5d\xf3\x8a\x82\x47\x8d\xcd\xa7\x4c\x22\x00\x29\x1a\xac\xf4\xbe\x20\x3c\x8d\x52\x5b\x72\x81\x71\x85\x91\xed\xb4\xca\x99\x68\xb1\xb2\xdc\x48\x4b\x3c\xde\xcd\x4c\x8b\x55\xff\xeb\xec\xb4\xca\x22\xf0\x50\x9c\xb2\x0f\xed\xd9\x4d\xb5\x28\x2e\xff\x00\x5b\x62\xa3\xfc\xc4\x9f\x0a\xe1\x70\xe2\x4f\x17\x8f\xbd\x60\x71\x11\x37\x41\xb8\xac\x32\xe9\x98\xdf\xd5\x60\x19\x2d\x6d\xa0\xa6\xdb\x66\xf9\x26\xc3\x75\x8b\xd1\x32\xfd\x73\x99\x2e\xd3\x3f\xa7\x01\x33\x07\xdc\xc8\x01\x57\x42\xb4\x84\xea\x55\x8b\x4d\x34\xff\x52\xc6\x32\x9a\x03\x6e\x6a\x80\x1b\x4e\xc0\x0d\x2b\x60\x3b\xe4\x2c\x09\xa7\x63\xb8\x7a\xa9\xd0\x61\x79\xf3\x06\xfc\x26\xbe\xd2\xe7\x06\x79\x5e\x27\x8f\x80\x82\x0d\x8a\x98\x8a\x3f\xe8\x54\x54\xfe\x40\x6f\x48\xeb\x3f\xfd\x84\x00\x9b\x3f\xd0\x4b\x54\x5b\x59\x6b\x4b\x33\x54\x7d\x8d\xfe\x28\x08\x77\x21\xcd\x3d\xb5\x05\x9f\xf8\x53\xb0\x99\xdd\xcc\x2a\x15\x8e\x30\x74\xba\x83\x5e\xa2\x4a\x13\x2d\xa3\x3f\xaa\xac\xa7\xcd\xa1\xd5\xdb\xc9\x88\xcf\x60\x2a\x2e\x82\x80\xa7\xfb\x36\xa9\x91\x7d\x20\x28\xa1\x0d\x24\xa1\xd3\x31\x9c\x49\x20\xb6\x5e\x5e\xdc\x6e\x1c\x3c\x0a\xc7\x18\x55\xe4\x7e\xb2\x70\x01\xae\x58\x23\xd6\x61\x91\x9b\x59\xbc\xcf\x8c\xb3\xca\x50\xef\x61\x27\xaf\xf0\xe4\xbb\xdb\x59\x0a\x56\xbb\x10\xa3\xff\xae\x4d\x2d\xd9\x0e\x41\xed\x7a\xe4\xad\xa4\xbc\xb9\xa5\xa8\xb5\xe0\xe6\x20\xea\x09\x43\x79\xf1\x46\x18\xca\xcf\xe7\xfb\x46\x89\x04\x5f\xe2\x24\xc5\xfb\x52\xc1\xfc\x95\x2d\xae\xd9\x0f\xf9\x67\x27\x75\x17\x02\xb5\x6d\x01\xfc\x4f\xe7\x3f\x84\xfd\x90\x15\xca\x3a\x58\xc8\x69\xd4\x86\x4f\xf9\xc2\x66\xb6\xf9\x7f\x54\xcf\xd0\x06\xfa\xa3\x5c\xac\x4e\x0b\x4b\xd9\xbb\x88\xe2\x04\x7f\x33\xae\x22\x81\xdc\x8b\x02\xf0\x73\xce\xa7\x3b\x24\x6f\x0e\x86\xf3\x78\x86\xd4\x0e\x85\xf1\xc3\xc6\x06\x5a\xae\xcf\xe1\x49\x32\x85\xc9\xb5\xef\xc4\x88\xad\x22\x41\x22\xd2\x5e\xa6\xf8\x43\x1c\x4f\xf3\x25\xe1\xe9\x38\x78\xd2\x8c\x2a\x22\x87\x76\xe3\xe9\x4f\xbb\xe8\xc5\xe6\xdb\xde\xd6\xf6\xce\xbb\xdd\xbd\xff\x7a\xff\x61\xff\xe3\xc1\xe1\xff\x3e\x3a\x3e\xf9\xf4\xcb\xaf\xbf\xfd\xfb\xff\xf8\xfd\x41\x80\x87\x17\xa3\xf0\x8f\xcf\xe3\x49\x14\x4f\xff\x3b\x49\xb3\xd9\xe5\xd5\xf5\xcd\x97\x5a\xbd\xd1\x6c\xb5\x3b\x6b\xeb\xaf\x96\x56\x37\x58\x84\x5b\x71\xb4\x13\x8b\x76\x61\x54\xf3\x21\x76\x78\xa5\xe4\x96\x1b\x8a\x85\xa9\x4d\x14\xd2\xda\xb1\xb9\xa9\x90\x99\x0e\x1d\xfb\x0d\x73\xec\x4a\x89\x90\x24\x2d\x8f\x9c\x9a\x64\x07\x16\xb4\x8c\xea\xd5\x33\xf0\x5e\xc9\x05\xa6\x86\x49\x5c\x1c\x68\xa3\x0c\xd0\xea\x19\xdf\xe0\x65\x31\xcc\x02\x95\x0a\x44\x91\x12\xb9\xe7\x2b\x11\x66\x00\xfd\xaf\xb4\x45\xd9\xb7\x26\x2a\x0e\xde\x83\xd8\x10\x2f\x2d\x29\x1f\x04\xd9\x8a\x1f\x8c\x22\x8d\xd8\x92\xd6\xb0\x08\xb7\x79\xee\x1e\xfd\x90\x2f\xed\x11\xaf\x9d\x99\x7d\xda\x4f\x47\xff\xa7\xa3\xbf\x38\xfa\x7f\x3a\xd9\x59\xae\x77\xd0\xdb\xed\xd2\x0e\x5a\xf5\xce\xdb\x6d\xd9\x47\xab\xde\x51\x9f\xe0\xeb\xdd\x9d\xb6\x28\x32\x7f\xad\xe3\x56\x49\x1c\x1e\xd0\x79\xab\xde\x71\x7a\x6f\xd5\x3b\xff\x00\x8d\x40\xf9\xc3\x3a\x0c\xc6\x7d\xce\xea\x76\x7f\x7f\xb0\x8c\x8a\x03\x7c\x18\x87\x51\xe6\x72\x32\xae\x77\x1c\x4e\xc6\xd6\xc3\x74\x8e\xa9\xdb\xcb\x58\x34\x59\xd6\xd5\x58\x02\x7a\x8f\x13\x94\x4e\xc4\xf7\x72\x56\x03\xda\x5c\x74\x6d\x7c\xd7\xc7\x28\xba\xaa\x84\xcb\x1a\x5f\x7c\x0b\xf9\xac\x41\xa5\xc5\x7c\x8d\x79\x2d\x21\xdf\xf2\x17\x8f\xed\x69\xac\x36\x5c\xce\xd1\xb8\x0e\xb2\x8f\xc0\x50\x75\x33\x26\x22\x50\xbe\x58\x1a\x64\xb1\x68\x41\xd8\xdc\x14\xee\x92\x72\xb4\xd1\x79\x59\x3e\x14\x06\x23\xcb\x0f\x25\xf6\x30\x69\x9f\xfa\x70\xef\x7d\xea\xc3\x77\xb0\x4f\x95\xc1\xe1\xa1\xf7\x29\xeb\x72\xfa\xb0\xfd\xb4\x4d\x89\xbf\x07\xdb\xa6\xd2\x2b\x7f\xba\x1d\x05\xa1\x1f\x55\x16\xdd\xb1\x6c\x47\xf2\xef\x7f\xcb\xfa\xf0\x38\x5b\x56\x99\x65\xf2\xfd\x6f\x59\x1f\xb6\xb5\x4d\xeb\x69\xc7\x32\x76\x2c\x69\xc5\x2c\xb4\x79\x7d\xd3\xdd\x4b\xcc\x8b\x84\x2d\x01\xa4\xf4\x91\x47\xc3\x87\x2f\xec\xee\x84\x2e\xee\x5a\x8d\xfc\x3f\x5c\xac\xd0\x8f\xa4\xfb\xec\x2b\xfd\x96\x2f\xff\x79\xea\x02\x20\x2c\xb7\xb6\xa0\x73\x2f\x6d\x01\xcb\x51\xfb\x2d\x95\x06\x1e\x92\x5e\xa5\x23\xbf\xae\xbd\x1a\x4d\xfc\xc1\x23\xaa\x16\x3c\xc4\x9b\x85\x5f\xd0\xda\x3f\x41\xdd\x60\xe4\x8b\xbd\x83\x2a\x42\x31\x62\x91\xbe\xec\x6f\xb5\xa1\x26\x98\xdc\xec\x6f\xb5\x6d\x32\x1e\x98\x38\x7f\xc6\x37\x34\x0b\x36\xb5\x83\x15\x7d\x05\xe7\x5f\x3f\xca\x78\x12\xef\x38\x99\x50\x1b\xed\xed\x5f\x0e\xcf\x61\xd3\x3d\x89\xdf\xe3\x5c\x18\x44\x57\x57\x57\x2b\xf1\x14\x47\x69\x3a\x5e\x89\x93\x8b\xd5\x20\x1e\xa4\xab\x90\x84\x3b\x5e\xd5\xea\x8c\xb2\xc9\xd8\xa2\x08\xd9\xbe\x9c\xbe\xdf\xda\xc9\xd1\x16\xcf\x25\x83\x21\xcc\xf7\x01\xd1\xf6\x38\xc3\xfb\x85\xa5\x3c\x87\x3d\x8a\x0c\x4c\x4a\x1e\xc2\x88\xbb\xbd\x48\xe1\x9e\x73\x57\x97\x16\xaa\xd4\x1b\xeb\x8a\xa7\x8b\x01\xdf\x61\xa4\x26\x87\xc5\xd0\x13\xa4\xec\x6f\xb5\xe7\x61\x1b\x66\xcc\x16\x59\x0f\x52\x2d\x7d\xc8\x62\x34\xa5\x56\xa7\xb2\x77\x8e\x63\x87\x33\xfc\x62\xb4\xdd\x81\x0d\x4f\x17\xd5\x1b\xeb\x60\x42\xaa\x7c\xa5\x9d\x03\xcc\xb5\x2f\x39\x3e\x4a\xdb\xb7\x77\x76\xbb\x71\x10\xed\x63\xfb\xe1\x60\xa9\xd1\x07\x30\xb3\xfe\x1c\x0c\x0d\xef\x1b\x4a\xf3\x73\x52\x34\xcd\xaf\xf8\x67\x3e\x57\xeb\x5a\x3e\xbf\xbb\x82\xf1\xd4\x69\xac\xd5\x6a\x3a\xe0\x05\xbd\x83\xe6\xfa\xfd\x94\x93\x77\xb7\x20\x85\x3f\xa1\x11\x42\x15\x90\x08\xdb\x87\x0c\xac\x64\xd1\xde\xc5\x4a\x9f\xd7\xa5\xb1\x00\x6c\x80\x0a\x2a\xa7\xfe\x38\x43\x9b\xf0\x9f\xc5\xc5\x62\xa0\x2e\x4a\xde\x0f\x41\x5e\x98\x6c\x1e\x9f\x83\xe1\x0a\x75\x8b\xc0\x15\xde\x19\x0f\xf0\x2b\xc8\x5b\x03\xc5\x95\xfc\x8e\x6a\xcd\x85\x04\x5e\x75\x8a\x2d\xe2\x2d\x59\xe9\x8c\x7b\x98\xb5\x85\x97\x1a\x21\x0f\x66\xa2\x9c\xaf\x0e\x2b\x2c\x97\x5b\x18\x84\x16\xa0\x43\xfc\x1e\xc6\xc6\x96\x12\x6d\x91\x33\x72\x0e\x4c\xf8\x04\x8b\x37\xce\xe3\x32\xdf\x63\x68\x8f\xd8\x93\xa5\x9c\xc4\xc4\x69\xd1\xfc\x85\x05\xcb\x77\x6c\x63\x22\xe0\xd5\x8f\xcc\x98\x45\xc3\x95\x1b\xb4\xbc\xe1\xf8\x58\x8f\x02\x44\x8c\x03\xcf\x01\xe7\x05\xb3\xea\xb2\x44\xcb\xce\xbf\x56\x46\x72\x30\x86\xdc\x09\x84\x41\xe1\xc4\x26\x19\x05\x1b\xf4\xaa\x36\x2f\xfc\xe9\xcc\x12\x84\x26\xc4\xc0\x99\x9f\x95\x83\x52\x9d\x1e\x94\xa4\x81\x2e\x4c\xfb\xa3\x61\x2f\x90\x75\x8e\x82\x0d\x63\xcb\x50\x99\xef\x24\xb2\x62\x31\x63\xac\x6d\x68\xa3\x2c\xd5\x92\x74\x34\x9c\xfe\x2c\xd1\x2e\x44\x80\x39\x5e\xaf\xac\xcd\x75\x29\x1e\x2c\xfb\x1d\xdf\x89\xf7\x2e\xc8\x77\x1f\xd0\xfb\xd6\xe2\x57\x26\xf5\xa6\x3c\x37\x97\x2a\x29\xda\x0d\xe9\xbd\xca\xdd\xf3\x0f\x48\xe1\xea\x62\xd3\xa6\xfb\xb5\x8b\xb3\x2f\x56\xcd\x43\x0e\xb1\xe1\x3e\x60\x0a\xc5\x06\xa1\x42\xce\x65\x7d\xd7\x9e\x63\xba\xb0\xb0\x61\x57\x25\x16\x70\x5c\x29\xde\xef\x6e\x5f\x17\x1c\xdf\x29\x34\xfb\xd9\xdd\xe3\x87\xcf\x6e\x7b\xdd\xe3\x47\xd2\xee\xda\x1a\x39\xd3\xaf\xfd\xad\xcf\xf4\x83\x70\x3a\xc2\xc9\xf2\x23\x9b\x08\xc0\xe9\x5d\x6e\xea\xaf\x39\xc4\x9b\x99\x3b\x1f\xe4\x34\xdf\x83\x8e\x1d\x12\x8e\x93\x8a\x43\xbb\xfc\xd2\x6d\x42\x20\xde\x6b\x99\x30\x94\x1a\xe4\x0c\xe7\x67\x50\x89\xfe\xe4\x8c\x98\x55\xdc\x81\x97\x19\x8b\xaa\x40\x8b\x2c\x90\x4e\x83\x9c\x6e\xe8\xdc\x64\xf8\x3a\x23\xa7\x48\x9f\x3d\xa3\x29\xed\x13\xf3\xcd\xe2\xa9\x36\xfc\x00\x0f\xc2\x89\x3f\x1e\xdf\xb0\x34\xa0\x41\xe9\x9b\x1b\x79\x54\x6e\x59\x2b\x6c\xe0\x4e\x04\x1a\x6a\xb3\x8b\x27\xe3\xb8\x0b\x7e\x8f\x9a\x9e\x23\x9f\x12\xe9\x56\x47\xee\xfc\x62\x17\x3b\x4a\x4d\x87\xa3\x96\x5c\x66\x25\x9f\xdd\x3c\x81\xc4\x2e\xbe\xbe\x63\x26\x08\xcb\xf0\x4a\xe4\x23\xdf\x37\x2c\x38\x9d\xda\xcd\x43\x18\x4d\x67\xd9\x7d\xe6\x94\x93\x87\x4a\x74\x77\xa0\xb3\x87\x22\x8e\x81\xc6\x28\x2c\xf4\x71\xe7\xa4\x12\x30\x5a\xf6\x10\x36\xf9\xe4\x6c\xa0\xbc\x0d\x5a\xe1\xb5\x95\x7a\x7a\x0a\xf5\x70\x8d\x40\x0e\xa8\x2b\x03\xbd\xb5\xeb\xe6\xdd\x3b\x6d\xde\x5d\x6d\xb7\x95\x36\x88\x6e\xbb\xe1\x69\xca\xf3\xf5\x27\x53\xbb\x7f\xba\xee\xdb\xb5\x3b\x1a\x91\xcc\x8b\x34\xe1\xe6\x21\x05\x1c\x80\x85\xc6\xd5\x9a\x88\x8a\x94\xd8\x90\x1d\x55\x1f\x26\x21\x3d\xb8\xbc\xce\xe5\x78\xa5\x95\xc4\x25\x55\x51\x44\x56\x07\xe7\x65\x3c\x48\x70\xf6\x40\x4a\x25\x22\xff\xee\xda\x03\x07\x41\x2f\x19\x9b\xb0\x79\x22\x53\x47\xdf\xb2\x1a\x43\xd9\x39\xd8\x11\x20\xd8\xaa\x33\x12\xfa\x22\xea\xa3\x20\x1e\x75\x0f\xf7\x02\x6f\xb7\x87\x8c\x2f\x0b\x07\xa6\x39\xe1\x65\xe9\xa1\x4a\x8a\x2e\xab\x8f\x93\xdd\x10\xbf\x40\x31\x45\x3b\xfa\x56\x8a\x8b\xc9\xba\x5e\x14\x19\x53\xab\xc4\xf5\x05\x3a\x2c\x7b\x94\xcc\xcd\xf1\x38\xbe\x42\x7e\xd2\x0f\xb3\xc4\x4f\x6e\x10\x53\x2f\x7d\xc6\x37\x96\xb8\x83\x9f\x65\x8d\xc4\xcf\xd6\x86\x0b\x06\x4a\x57\xb7\x94\x1b\xad\x39\xce\x90\x04\xa5\x02\x37\x48\x88\xff\x06\xba\x8d\x38\x41\x61\x14\xe1\x04\xa2\xcf\xc6\xb3\x0c\x04\x08\x3d\x0a\x1f\xc4\x4c\xa4\x3a\x46\x4a\x86\xec\x81\xb6\x62\x04\xa4\xe3\x1a\x3f\xb9\x46\x68\xa9\xb1\x08\x09\xc4\x92\x56\x32\x2e\xd2\x47\x86\x52\xc1\x50\x2a\x68\x34\xf6\xdb\xc1\x11\xcc\x27\xbd\x06\x9c\xfa\x01\x1a\xc4\x51\x9a\xf9\x91\xde\xbc\x35\x89\x94\x3a\xc7\x6e\xc5\x9a\xc0\xfb\x34\x3c\x43\xbf\x6f\xa0\xda\x75\x7b\x40\xff\x67\x73\x87\x31\x0a\x37\x3b\xf4\x7f\xc5\x9a\xb1\x58\xd3\x89\x85\xda\xb3\x8d\x22\xff\x82\x38\x64\xb0\x03\x3d\x46\x14\x32\xc1\xc4\x1f\x24\x12\x59\x41\xbe\x32\x1b\x33\xb6\x0c\x24\x74\xda\xc6\xc7\x1d\x7a\x52\x55\x5f\x9c\x2f\x98\xbb\x45\x20\x83\x61\xfe\x6e\xe2\x8f\xed\x6f\xf6\x58\xf4\x31\xc0\x2b\x84\x25\x56\x18\x09\x65\xc1\x29\x2f\x13\x88\xcc\x28\xfd\xf0\xc1\xc8\x64\x92\xe0\xad\xcc\x0d\x3e\xf6\x58\xd1\xc3\x60\xa8\xff\xa7\x47\x0f\x9b\x23\xa6\x2e\x22\x22\x12\x1e\x9a\xd3\xd0\xdc\x08\x62\xee\x1a\x73\xa3\x88\xb9\xab\x3e\x52\x24\xb1\xfb\x73\xbb\x1e\x55\x4f\xc3\x78\x5b\xf6\x63\x22\x5d\xec\xda\x83\xa3\x15\x06\x1c\x2b\xe4\x98\xf2\x58\x69\x40\x73\x09\x85\x4b\x1a\xfc\x92\x49\xa0\x52\x75\x86\x1c\x9b\xf8\x03\xfb\x25\x91\x38\xf8\x3b\x8c\xe0\x5e\xfd\xad\x15\xe6\xd7\x9d\xd6\xb2\xe5\xf5\x38\xec\x2f\x13\x54\x02\xb0\x6d\x4d\xb5\xaf\x38\x1a\x2c\x83\x4d\xa3\xe5\x3d\x75\xb3\xd4\x3e\x4c\x82\xf6\x7c\xe3\xbb\x74\xe4\x37\xda\x3a\x48\xf2\xb2\xa1\x83\x4b\x47\x7e\xbb\xde\x30\x5f\x36\xd7\x2d\x25\x9b\xda\xab\x24\x9c\xe2\x49\x50\xef\xd4\xac\xb6\x7f\xca\xab\x69\xff\x73\x30\xd4\xdb\xc1\x97\xd3\xcf\xc1\xb0\xe8\xde\x41\xed\x7a\x1c\xe0\xe5\xc1\xb0\x6f\x7d\x9d\x25\x8e\xd7\xcb\x17\x63\x3f\x98\xf8\x91\xed\x73\x6c\x07\x86\x07\xfa\xeb\xa9\x1f\x2c\xfb\x51\x1a\x5e\xbf\x6a\xe8\x83\x40\x3e\x85\x69\x5c\xaf\xd5\x1b\xfa\x88\xb3\x4f\xaf\xd6\x5e\xad\xe9\x33\x44\x3e\x7d\xc1\x49\xcc\x5c\xaf\x2d\x5f\x23\xc7\x37\xaa\x23\x5b\x1e\xe1\x6b\xed\x83\x8f\x75\xe2\xa2\x71\x37\x02\xe3\x7d\x32\xd0\x27\x37\xf1\xfb\xfd\x30\xb3\xbe\x5c\x1e\xe3\x0b\x7f\x70\xf3\xd8\x77\x40\x62\xf5\xc0\x93\xbe\x68\xe0\x65\xbe\x56\xc4\x23\x5b\x22\xf0\x4c\x56\x86\x66\x16\xca\xd6\x81\xf8\xdd\x68\x89\xdf\x84\xea\xf9\x6f\x42\xec\xe2\x37\xfd\x95\x93\x76\x6e\x5f\x0a\xbf\x18\x21\x53\x0c\x28\xfd\x1a\x77\x58\x14\x1d\x4e\xad\xd2\x53\x96\xa8\x4f\x82\x36\xf3\xb7\xb1\x52\x83\x50\x22\x6d\x56\x26\x40\xf1\x46\xd0\x9d\xfc\x86\x92\x9b\x78\x23\x53\x99\x78\x19\xa9\xaf\x24\x9a\x82\x67\x42\x4a\xf0\x23\xa7\x20\x3a\x2a\x03\x36\x50\x8c\x5e\xa4\xdf\x9c\x4c\x16\x55\x44\x2a\x0a\x48\x99\xd7\x2e\xae\x98\x74\x87\x62\x63\x5d\xea\xb6\xeb\x5e\xb1\x36\xd9\x53\xe9\xaa\xdb\x6e\x79\x0a\xe1\x75\xdb\x6d\x2f\x9f\xf8\x6e\xbb\xe3\xa9\xa3\xd7\x6d\xaf\xe9\x37\xc2\x3a\x29\x77\x3b\x35\x8f\x51\x6b\xb7\x03\xf8\x08\x4a\xe9\x76\x1a\x9e\x4c\x2b\xdd\x4e\xcb\xb3\x51\x4b\xb7\xd3\xf4\x64\x0a\xe9\x76\xda\x9e\x4c\x3f\xdd\x0e\xe0\xa5\xd0\x4c\xb7\xb3\xe6\xe9\x54\xd3\xed\xac\x7b\x3a\xdd\x74\x3b\xaf\x3c\x83\x48\xba\x6b\x35\xcf\x42\x4e\xdd\x35\xc0\x9f\x2d\x89\xee\x1a\x60\xcf\x48\xa3\xbb\xd6\xf2\x0c\xe2\xe8\xae\x01\xe2\x84\x8c\xba\x6b\x80\x73\xbe\xce\xba\x6b\x1d\xf9\x02\xdd\xcb\x97\x6c\x77\x8d\x5f\xad\x93\xc5\xdc\x5d\x7b\xe5\xf1\xa5\xda\x5d\xaf\x79\xf9\x12\xee\xae\xd7\xbd\x7c\x71\x77\xd7\x01\x9d\x9c\x82\xbb\xeb\xd0\xb8\x60\x34\xdd\xf5\xd6\xed\x99\xd7\xa9\x3d\x5d\x1e\xfc\xf5\x97\x07\xbd\x11\x1e\x7c\x26\x9d\x82\x95\x42\xdd\x80\x68\x9a\xb3\x74\x36\x25\x03\x83\x59\x7c\x6a\xa9\xdf\x20\xc7\xd3\x90\xe6\xe8\x87\x0d\xf4\x82\x43\x7e\x61\xb1\x08\x11\x4e\x1a\x0f\x78\x5d\x51\x68\x8e\x2f\xda\x39\xc2\x43\x9c\x60\x38\xe8\x25\xe1\x05\x9c\xc9\xc2\x28\xcc\x72\x30\xe9\x6c\x8a\x13\x50\x5d\x6f\x68\xe9\x39\x24\x28\x9b\xb3\x8b\x09\x8e\x32\xad\x00\xca\x62\x34\xf2\xa3\x60\x8c\x95\x71\x93\x61\xf7\xad\x90\x15\x9b\x1a\xa8\x6a\xba\x03\x4a\xba\x6f\x1a\x4b\x9e\x9a\x40\x85\x51\xb6\x2e\x69\xe8\x87\x72\x7d\xa1\x98\x50\x67\xc7\x3c\xe6\xe7\x35\xa8\x12\xfe\x13\x81\x0a\x2f\x64\x6c\x94\x43\x84\x15\xb1\x98\xa6\xff\x02\x48\x97\x21\xbe\x72\xa1\xe8\x6c\x5e\x42\x78\x8f\xa3\x80\xbe\x7e\x55\xcb\x73\x82\x03\x2c\x41\x67\xcc\xab\xff\x40\xd6\x9c\xb0\x1d\x81\x45\x67\x07\x6e\x54\xad\x1a\xad\x38\xb1\xaa\x77\xec\x68\xb9\x5b\x5a\xac\xc6\x5e\x94\x35\x1b\x8b\x36\xb1\x58\x8d\x9d\x71\xec\xdf\xa5\x4a\xa7\x05\xef\xf3\xf2\x77\x24\xa5\x15\x4a\xc1\x1e\x92\x5f\xdd\x64\xf8\x00\x92\x03\x19\xaf\x6d\x79\x97\x15\xfa\xdb\xa5\x8b\x2e\x6f\xab\xcc\x8a\xc8\x4b\x2f\xa6\x42\xc8\xa1\xbd\x15\xb8\xa1\x0d\x3b\xce\x16\xcd\xc2\xf6\x35\xcb\xbe\x7a\x93\xd9\x8c\x9f\x17\x72\x17\xb4\xa1\xb2\x48\x3e\xed\xbc\xfe\x69\x78\x76\xa7\xe4\xd9\xb9\x39\x77\xf8\x05\x53\x55\x6d\xee\x38\xaa\x16\x15\x8c\x35\x4f\x6d\xe1\x21\xe6\x46\x68\xeb\x88\x32\xdf\xd6\xac\x67\x64\x34\xc9\x6b\x02\x0f\x45\x44\xea\x93\x99\xb9\xd9\xae\x3f\x9d\x8e\x6f\x58\xc3\x7e\x72\x31\x23\x2c\x3c\x2d\xf2\x57\x64\xfc\x7a\x65\x9a\xc4\x59\x4c\x70\x94\x39\x77\x91\xe1\x84\xbe\xfb\xd8\x15\x2c\x9d\xfa\x93\xac\xf3\xd7\xc8\x3a\x10\x30\xfa\x2f\x88\x4b\x64\xcd\xa9\x54\xc2\x44\x02\xb6\x58\x7a\x8f\x87\xb2\x5c\xb7\x4e\xaa\x9c\x30\x66\x21\x95\xa4\xaa\x4b\xed\xe6\xcf\x26\xe9\xb9\xf8\x4a\xa7\x65\xe7\x22\x27\x84\x4d\x6c\xd0\xe1\x5b\xf1\xfb\x29\xfd\x91\x86\x11\x0b\xc6\x4a\x58\x46\xed\xba\x5e\x63\x7f\x55\xf4\x55\x4d\xe3\xcb\x96\x57\xa5\x6a\xb5\x50\xdf\xdf\x6a\x6b\xd6\x14\x36\x03\x10\xdd\x6b\x12\x6d\xb0\x51\xb5\x18\x80\xf0\xb4\x37\x85\xb7\x63\xb9\x26\xd8\x9e\xab\xf8\xd4\xe4\xa4\xb5\xeb\xce\x5a\xab\xdd\x68\xd6\xea\x1e\xaa\x5d\xe3\xe1\x20\xf0\xfb\xeb\xaf\x2c\x79\x15\x6b\xd7\xaf\xd6\xfb\x7e\x30\x18\x62\x0f\x06\xa6\xd9\x68\xb7\xd6\x3a\x6a\xb9\x33\xe7\x8d\x98\x96\x46\x4f\xee\xc5\xbe\xc8\xa4\x67\xdb\xbb\xae\xfc\x29\xc2\xe0\x5e\x3d\x7f\x0f\xa9\x77\xdc\x3b\x86\xfb\xfa\x9a\xcf\x06\x45\xe2\x9c\xc0\xe3\xe9\x05\x51\xe8\x88\xc0\xbb\x7f\x2e\x95\xde\x3f\xe5\x0f\x67\x36\x97\x10\xe9\x33\x21\x38\xb3\x00\xf9\xab\x54\x2a\x12\x4c\xea\x29\x8e\xbe\x22\xf9\x25\xec\x75\xad\xaa\xe6\x23\x8e\xbe\x96\x04\xd8\x68\x55\x2d\x00\x21\x94\xb1\xe2\x92\x6e\x82\xbb\x9f\x71\xc8\xae\x72\x43\x61\xbf\xee\x57\x86\xb4\x86\xa4\x31\x45\x4b\xa8\xa6\x8b\x0f\x4a\xe9\xba\x56\xba\x5e\x58\xba\xa1\x95\x6e\x14\x96\x6e\x6a\xa5\x9b\x85\xa5\x5b\x5a\xe9\x56\x61\xe9\xb6\x56\xba\x5d\x58\xba\xa3\x95\xee\x14\x96\x5e\xd3\x4a\xaf\x15\x96\x5e\xd7\x4a\xaf\x17\x96\x7e\xa5\x95\x7e\x55\x3c\x3b\x35\x6d\x76\xe6\x4c\x66\x5d\x2b\x5e\x3c\x9b\xf5\x86\x56\xbc\x78\x3a\xeb\x4d\xad\x78\xf1\x7c\xd6\x5b\x5a\xf1\xe2\x09\xad\xb7\xb5\xe2\x6d\x83\x1b\xac\xae\x12\x86\xfc\x39\x8c\x2e\x48\xd5\xd0\x1f\xf7\x6d\x62\xb3\x4f\xb6\x81\x53\xeb\x40\xf5\xe1\x93\x75\x50\x06\xf0\xc9\x3a\x00\x01\x7c\x6a\xda\xd0\xe9\xe5\x77\xd0\xea\x37\x82\xc4\xce\x4e\xc5\xf7\x50\xdf\x43\x03\x0f\x05\x9e\xb4\x40\x3d\x84\xd6\x3c\xb2\x85\xd6\xce\x74\xde\x10\xd0\x7a\x81\x87\x44\xd5\x7c\x84\x3c\x84\xea\x0d\x0f\x9d\x9c\xd6\x8d\x7a\x03\x5a\x8f\xb6\x44\xab\xe6\x8b\x96\xd4\x5b\x23\xf5\x1a\x46\xbd\x3e\xad\x27\x90\xf4\xa5\x7a\x4d\x0f\xa1\x06\xb4\xd7\x34\xea\x15\xf5\xaf\x25\xfa\xd7\x5a\xa8\x7f\x6d\xd1\xbf\xf6\x42\xfd\xeb\x88\xfe\x75\x16\xea\xdf\x9a\xe8\xdf\xda\x42\xfd\x5b\x17\xfd\x5b\x5f\xa8\x7f\xaf\x44\xff\x5e\x2d\xd4\xbf\x7a\xcd\x63\xfd\xab\x9b\x04\x53\xd4\xc1\x7a\xdd\x63\x1d\xac\x9b\x14\x53\xd4\x43\x82\x25\xed\x61\xdd\x24\x99\x42\x12\x6d\x7a\x9c\x44\x4d\x9a\x29\xec\x63\x4b\xf4\xd1\x24\x9a\xc2\x3e\xb6\x45\x1f\x81\x6a\xcc\x4e\xbe\x7b\xe7\xe8\xa4\x87\x50\x9b\x76\xd2\xa4\x9b\x80\x56\xb4\x76\x92\xd0\xdb\x2b\x5a\xd1\x24\x9c\x01\xad\x68\xef\x64\xdd\x43\xa4\xa3\x27\xa7\x75\x93\x72\xfa\xb4\xa2\xb5\x93\x84\x63\x34\x6a\x50\xd1\x24\x9d\xa2\x3e\xb6\x45\x1f\x1b\x76\x5e\xe3\xea\x23\xa1\x39\xda\xc7\x86\x9d\xd9\x38\xfb\xd8\xe6\x7d\x6c\xd8\xb9\x8d\xab\x8f\x2d\xd1\xc7\x86\x9d\xdd\xb8\xfa\xf8\x2a\xef\xa3\x9d\xdf\x38\xfb\xd8\x12\x7d\xb4\x33\x1c\x57\x1f\x09\x63\x64\x7d\xb4\x73\x1c\x57\x1f\xd7\xf3\x3e\xda\x59\x8e\x93\x56\x9b\x1e\xef\xa3\x9d\xe7\xb8\xfa\xd8\x10\xb4\xda\xb0\x33\x1d\x57\x1f\xd7\x44\x1f\x9b\x76\xa6\xe3\xea\x23\x59\xfe\xb4\x8f\xcd\xba\x7d\x41\xee\xee\xba\x89\xb5\x05\xb8\x36\xed\x5c\x67\x77\xd7\xde\x49\x32\xac\x64\x6d\x9d\x9c\x36\xed\x5c\x67\x77\xb7\x60\x41\x76\xa0\xa2\x9d\xeb\xec\xee\x3a\x3a\xd9\xf2\x50\xa3\x09\x15\x4d\xd2\x29\xea\x63\x3d\xef\xa3\x9d\xe9\xb8\xfa\xd8\xca\xfb\x68\x67\x3a\xae\x3e\xc2\x44\xd2\x3e\xda\x99\x8e\xb3\x8f\x35\xd1\x47\x3b\xd3\x71\xf6\xb1\xe9\xb1\x3e\xb6\xec\x4c\xc7\xd5\xc7\x9a\xe8\x63\xcb\xce\x74\x5c\x7d\x6c\x8a\x3e\xb6\xec\x4c\xc7\xd5\x47\xc2\xca\x69\x1f\x5b\x76\xa6\xe3\xea\xe3\x2b\x31\x8f\x2d\x3b\xd3\x71\xf5\x91\x2c\x0f\xd6\x47\x3b\xd3\x71\xd2\x6a\x9b\xd3\x6a\xcb\xce\x74\x5c\x7d\x6c\xe4\x7d\x5c\xb3\x2f\xc8\xbd\x3d\xb7\xa0\xda\xa1\x9d\xb4\x73\x9d\xbd\x3d\x7b\x27\x81\xe6\x80\x07\xb4\xec\x5c\x67\x6f\xaf\x40\x0c\x68\x83\x08\x68\xe7\x3a\x7b\x7b\xf6\x4e\x12\xde\xd1\x80\x61\x6d\xdb\x45\x1d\x57\x1f\xc9\x7c\xd0\x3e\xb6\xed\x4c\xc7\xd5\xc7\xa6\xe8\x63\xdb\xce\x74\x9c\x7d\xac\x89\x3e\xda\x99\x8e\xab\x8f\xf5\xbc\x8f\x76\xa6\xe3\xea\xe3\xba\x98\xc7\xb6\x9d\xe9\xb8\xfa\x08\x34\x47\xfb\x68\x67\x3a\xae\x3e\x82\x48\x4e\xfb\x68\x67\x3a\xce\x3e\x36\x3d\xde\x47\x3b\xd3\x71\xf5\xb1\x25\xfa\xd8\xb1\x33\x1d\x67\x1f\xeb\xbc\x8f\x1d\x3b\xd3\x71\xf5\xb1\x21\xfa\xd8\xb1\x33\x1d\x57\x1f\x5f\x89\x79\xec\x34\xcd\x05\x09\xd7\x28\x19\x4e\x26\x38\x08\xfd\x8c\x39\x95\x81\xbb\x82\x5a\x8e\x1c\x71\xd1\x06\xaa\xc0\x7f\x97\x90\xaf\x6b\x58\x69\x99\x3a\x2b\x53\x27\x65\xfa\xf6\x32\x0d\x56\xa6\x41\xca\x0c\xec\x65\x9a\xac\x4c\x93\x94\x09\x0c\x6d\xae\xa6\xaa\xdc\xb1\x58\xea\x2e\x18\xd0\x16\x32\xa5\x8b\x6c\xba\x7e\xe6\xdb\x0e\xe6\x7e\xe6\x8b\x50\x3e\x7e\xe6\xbb\x95\x63\xd1\xdb\x30\x4b\x4f\xe2\xcc\x1f\x0b\x98\xd1\x96\x9f\xf9\xd4\x83\xe4\x25\x5a\xb7\x40\x87\x3a\x1f\xf0\x30\xe3\xd0\x85\xc7\x09\x94\x37\x3a\xe3\x4c\x79\x25\xd0\x3c\xcd\x41\xfe\xfc\xf3\xcf\xa8\x0d\x17\x6f\xb5\xeb\xf5\x5a\x7e\xdf\x96\x97\xf8\x17\x6a\x36\x0c\xe2\x50\xfb\xb2\x8b\x36\x10\xa8\xdd\x87\xe3\x38\x4e\x2a\x52\x27\x57\x15\xdd\xbb\xab\x73\x50\xf6\x03\xda\x90\x9e\xf4\x85\x23\x50\xaf\x54\x2a\x39\x6e\x4b\xa8\xd3\xa2\xf9\xd2\x5e\x41\x30\xd1\x56\x95\x2a\x6c\xec\xfa\x59\x5e\x95\xe1\x9c\x2b\x67\xe5\xb7\xe5\xb5\xb3\x26\x38\xa6\x9a\xd5\xc1\xcd\xd3\xcd\x1a\x5c\x62\x91\xce\xb6\xca\x74\xf6\x83\xb5\xb3\x1f\xee\xda\xd9\x0f\xd6\xce\x7e\x28\xdb\x59\xb3\xb7\xb2\x13\x55\x45\x74\x9f\x07\x9b\x82\x9c\x7a\x76\xff\x41\x30\x78\xa7\x6e\x0c\xe0\xa3\x68\xf3\xa4\x2a\xcc\x2b\x3f\xc7\x1b\x52\xd1\x79\x5b\xc8\x77\x97\x19\xc6\x3b\xbd\xdf\x16\xba\xf7\x70\x5c\x71\xa1\xa2\xeb\x7f\x81\x09\x5c\x61\xec\x9e\xda\xef\x2e\x76\xd9\x2d\x59\xa5\xb2\xab\x5c\x4b\xec\x2e\x7c\x1f\x41\x69\x61\x57\xb9\x8b\xd8\x75\x5e\x42\xcc\xbf\x71\x38\x62\xb9\x81\x61\x0e\x59\x04\x9e\x00\xc6\x54\x2d\x5a\x22\x59\x39\xb8\x21\x14\xb2\x7a\x50\xb0\x82\x53\xa6\xb8\xa1\x83\xc7\xfc\xfa\xdf\xd8\x78\xe1\xf3\xb9\x41\x0b\x2e\xef\x4a\x1e\x41\x83\x7c\xb5\x7b\x38\xd0\x5f\x02\x49\x4d\xf5\x75\xed\xa1\xd4\x43\xea\x15\x1a\xf0\x49\xb4\x81\x7c\xb4\x84\x2a\x95\x3e\xfa\x89\x6e\x8e\x95\xff\x4b\x7e\x06\x55\xc2\x06\xae\xd1\x12\xca\xa4\xf6\x44\xc0\xe2\x88\x4c\x53\x4a\x57\x2a\x8d\x53\xde\x6c\xa0\x65\x94\x56\xa1\x5a\x5f\x33\x7a\x13\x58\x69\xe7\xff\x72\x58\xc1\x76\x5c\x19\xa0\x9f\xd0\xff\x7d\x1c\xac\xb4\x43\xd0\x5c\xac\xfa\xe8\x77\x34\x40\xbf\x13\xc4\x1e\x1e\x19\x4d\x00\x9c\x8b\x0c\x41\xa4\xd2\x47\x5f\x1f\x78\x70\xe4\xdb\xea\x63\x57\x9a\xf4\xb9\x89\xf7\xcb\x04\x59\xe3\x7e\x62\x9a\x8b\x22\xac\x06\x13\x8c\xc3\x59\xcc\x51\xfa\xae\x61\xcd\xd8\xba\x14\x46\x2e\xfb\x5b\x6d\x8b\xef\x57\x71\x79\xd3\xe1\x2b\x8f\x2f\xa6\x5c\xe6\xab\x19\xf9\xf7\xb7\xda\x56\x93\x01\xe7\x24\xcc\xc9\x55\xff\x50\x53\x70\xa7\xd0\x0e\xf3\x27\x4e\xf6\xf2\x7b\x88\x89\xa3\x4e\x65\x62\x22\x76\x27\xfe\x80\x4c\x86\x92\x19\xde\x9c\x0f\x56\xcc\x9c\x93\x3c\x9b\x3d\x9d\x97\xc2\x0c\xec\x2c\xb2\xb5\xc3\x02\xaa\xf1\xb7\x76\x31\xfb\xe7\xc7\x64\xa3\x8b\xed\x25\x8b\x33\x84\x76\x30\x0e\xfa\xfe\xe0\x33\x8b\xab\x39\x89\x03\x58\x52\x84\x66\xc4\x7c\xc3\xcb\xde\xce\x5b\x22\x02\x59\xc4\x03\x30\x73\x82\xaf\x8a\xb5\x1c\x58\xb8\xd0\x56\xf6\x09\x00\x66\xcc\x23\x56\x7d\x6f\xe7\xed\xca\x76\x44\x63\x95\x83\x01\xd5\xce\x5b\x8b\xc1\xcf\xd4\x61\x2e\xc3\xcc\x0c\x0b\x4c\x66\xdc\xa2\x29\x0b\x41\xc5\x05\x12\xfa\x68\xbb\x67\x96\x42\x79\xd0\x42\x72\x28\x0f\xb5\x3c\x8f\x51\xfe\x1e\xdf\xa4\x59\x82\xfd\xc9\x66\x14\xb0\xde\x59\xac\x23\x63\x66\x16\x2b\xc0\x79\xac\x01\x9b\x90\x7d\x84\x27\x18\x82\x8c\x83\x31\x26\x9d\x27\x16\x2b\x13\xfc\xe7\x23\x7c\x9d\xd1\xd7\x76\xf1\x1d\x5f\xbe\x65\x31\x53\xa1\xf5\x95\x74\x1c\x0e\x70\x85\xa3\x20\x6e\xea\x05\x2e\x36\xfb\x49\x65\xd6\xb6\xf0\x3f\x65\xd6\xee\x31\xba\x60\x38\x3c\x0a\xd3\x85\xc7\xf6\x9b\xd1\xcd\x49\xde\xa1\x3e\x1e\xc4\x13\xe6\x75\x4f\x08\x22\x8c\x67\x69\x39\x92\x11\x5d\x2c\x25\x8e\x17\xf4\xa6\x32\xb7\x0b\x9a\x6f\x84\x79\x60\x83\xf3\xde\x65\x1e\xac\xe5\xf2\xb5\x6a\x34\x2e\x87\x63\xa6\xcd\xe7\x9f\x21\xb3\xeb\xa5\xf5\x48\x23\x4a\xa3\x0d\x14\x5e\xb2\x29\xac\x39\x56\x62\x7c\x89\xd1\xde\x2f\x70\xfe\x4c\x67\xfd\x14\xff\xf7\x0c\x47\x59\xc1\xe9\x19\xf0\x15\x0e\x0c\x73\x0d\xa0\x75\x7c\xb4\x09\x31\x27\x81\xfc\x31\x2a\xc7\x74\xa0\xa1\x60\x45\x00\xf1\x90\xda\x95\xd5\x55\xc4\x66\x24\x7f\x67\xcd\x96\x5b\x1c\x35\x86\x9a\x9e\xe7\x16\x82\x10\x09\x46\x34\x0a\xe7\x68\x83\x5e\x18\x16\x5c\x9c\xd8\x79\x5b\x64\x70\xcd\x37\x9d\x45\xe2\xd4\x75\x9a\x4f\xc2\xc7\xf7\x2e\x7c\xa0\xff\x9c\x26\x38\xc5\xc9\x25\xa6\x62\x48\x3c\x23\xa2\xbc\x24\x7e\x80\x1a\xc3\xcf\xc2\xfe\x98\x71\x60\xb4\x95\xa0\xb7\x49\xe8\x47\xe8\x1d\x75\xcf\x44\xc3\x70\x8c\x71\x34\x58\x19\x00\x08\x1e\xf2\x19\x22\x60\x6b\xf4\x73\x72\x04\x45\xfe\xcb\x8f\xd0\x6e\x32\xeb\xdf\xa0\x3f\x46\xe4\x3f\x2b\x57\xb8\xff\x9f\x17\x13\x3f\x1c\xaf\x0c\xe2\x89\x5d\xde\x39\x39\xe2\xcd\x15\x88\x3d\x72\xa1\xd2\xd2\xcf\xb3\x3c\xdf\x4b\x34\x20\x07\x05\x9a\x32\xe9\xf9\xb3\x67\x64\xd0\x81\xf4\x44\x3a\x24\x50\x12\x51\xa5\x50\x15\x66\x9d\xfe\xfa\x13\xad\xae\xc6\x97\x38\x19\x8e\xe3\x2b\x52\x07\x36\xbe\x3a\x4f\x07\x4a\xea\xd5\x3b\xd5\x9f\x48\xd9\xd7\xe2\x73\x43\xfe\xbc\xae\x7f\x6d\xb2\x3d\x8c\x35\x06\x78\x02\x2a\x04\xac\x68\x77\x75\x15\xf1\x66\x51\xbf\x4e\x8a\x00\xca\xd0\x74\xed\xb5\xa8\xd2\xc8\xab\x88\x32\xcf\x00\x01\x5a\x88\x96\x6a\xaa\xa5\x58\xb1\x67\x80\x0a\x2b\x77\x0b\xff\x12\x82\x94\x4b\x2c\x2d\xf5\x9b\xd2\x77\xf8\x87\x97\xa1\x45\x96\x96\xfa\x8d\xd7\xcf\xdd\x05\x96\x96\xfa\x75\xf6\x9d\xfc\x0b\x1d\xe7\x8d\xc2\xc3\xd2\x06\xf4\xfc\xcd\x1b\x96\x0f\x52\x7e\xdd\xa0\x2a\x40\xe5\x2d\x43\xc8\x6c\x49\x54\xab\x5d\xd7\xea\x4c\xeb\x97\x17\x65\x5c\x8f\x14\x22\x2f\x6f\x75\xea\x60\xcb\xa3\x32\xa0\xff\x55\x69\x84\xbd\xa4\x37\x48\x9c\x94\xf2\x97\x55\x46\x30\xd2\x14\xac\xae\x22\xb2\x4b\xc0\x4d\x0c\x0a\xa5\x85\x44\x17\x8f\xb1\xd2\x5e\xa4\x08\xe0\xa5\x28\x8e\xc6\x37\x74\x39\x6e\xfd\x7a\x70\xb4\x85\xfe\x40\x6f\xd0\x3a\xc0\xe4\x0d\xd6\x6d\x58\xd0\xbb\x38\xb5\xb3\xec\x1b\xef\x2f\x5f\x4b\xca\x59\x40\xac\xab\x15\xc7\xeb\xbf\x50\xe6\x5c\x54\xe4\x34\x8a\x6b\x32\x8c\xd9\x2a\xe3\x89\xa2\x59\x3e\x60\x06\xea\x45\x12\x0f\x72\x4b\x3d\x20\x34\xd8\x1b\x29\x96\x81\xd0\x1d\xe4\x20\x34\x5f\x16\xe2\xd2\x01\x21\x6c\x93\xe6\x29\x2b\x7a\xa6\x8b\x46\xec\xb3\x84\xab\xaa\x7a\x5e\x44\x28\x42\x0e\xc1\x08\xdd\x4d\x38\x42\x0b\x0a\x48\x48\x95\xe7\xcc\x43\x57\x4e\xf7\xf2\xd9\x4b\x2c\x8d\xd7\x9a\x64\x25\x8a\x4b\x02\x96\x53\xc4\x92\x0a\x2f\x20\x69\xb5\x9e\x24\xad\xef\x5d\xd2\x72\xc8\x57\x0e\xf5\xce\xc9\x51\xb1\x9c\xb3\xa8\x7a\xc7\xc2\xd2\x75\x5e\xfe\xc4\xc4\xff\x79\x4c\xbc\xf0\x34\xfb\x08\x2c\x7b\x2f\x1a\x24\x18\x22\x37\x30\xe0\x1a\x48\x26\x87\xe4\x93\xbb\x8c\xa8\x31\x8d\xe3\x0b\xdc\x96\x7f\x45\xb5\xbf\xd5\xe6\x50\x76\x57\x98\x7f\xde\x26\x65\x16\xd8\x05\xda\x4f\xbb\xc0\xdf\x62\x17\xd8\x1e\xe3\x41\x96\xc4\x51\x38\x40\xbd\x38\xc0\xfd\x38\x9e\xaf\xf0\xdf\xee\x15\x29\xfc\xe9\xd7\x85\x76\x84\xed\x9e\xaa\xf0\x27\xcf\x0f\xb5\x03\xc8\xac\x5d\x65\x20\x6a\xbd\x22\x2d\x26\xc1\x47\x59\x48\x8f\x85\x5f\x80\xef\x84\x1f\x4f\xbd\xd4\x9b\xaf\x37\x83\x32\x0b\xac\xe3\xbf\x77\x72\xe4\xff\x39\xeb\xf8\x60\x96\x4d\x67\x59\xf9\x4b\xbb\x83\xc2\x4b\xbb\x83\xc5\x2f\xed\x74\xa9\xee\x40\xbb\xc4\x3b\xf8\x6b\xaf\x83\x1e\x5d\xaa\x33\x75\xf3\xe2\xcd\xc3\x4a\x76\x05\x0d\x7d\x2f\xd2\xdd\x3f\xe9\x84\x7d\xa0\x5d\x6b\xba\x84\xa8\x83\x12\x97\x16\x07\x0b\x5e\x5a\x3c\x65\xb1\xfb\x7b\x30\xdf\xcd\x8f\xc7\x7b\xe8\xb7\x95\x57\x8d\x26\x37\x10\x47\x69\x46\x96\xf7\xc5\x8d\xc1\x7d\xa7\x7e\xb0\xb2\x19\xa5\xe1\x6f\xa4\xb4\xc8\x05\x37\xf5\x03\x99\xfd\x05\x7e\xe6\x4b\x17\xa1\xae\x0b\xd0\x54\xbd\x01\x25\xb5\x8e\x73\x83\x5f\xc5\x00\xf8\xb5\x5a\xb4\xaf\xa7\x15\xe9\xbb\x12\x8a\x00\x51\xcc\xa2\x4c\xf4\x4c\x0b\x66\x05\xb6\x78\x87\xf4\x9b\x01\x8c\xbe\x58\x56\x31\xfb\x97\xf6\xdd\x68\x8d\xc6\xb4\x19\xfb\x29\x8d\x9c\x85\xa6\x71\x1a\xaa\x1e\xf8\xa4\x51\xf2\x9d\xd4\x3f\x8c\x79\x67\x45\x0b\x4b\x1a\x46\xcb\xa8\xae\x35\x72\xe8\x07\xf9\x33\x0c\x94\xc8\x36\xa2\xbe\xa6\xac\x44\x6e\x2b\x0f\xa9\xa5\x36\x92\x87\xd4\x92\x4b\xdb\x82\x6b\xa9\x96\xd9\x4b\x1a\x20\x6e\x87\xc8\x2d\x70\x67\x91\x85\x38\x74\x8a\x78\x87\x33\x29\xe1\xbc\x32\x55\x54\x81\x2f\x46\xb3\x78\xe6\xa4\x3e\x57\x54\x34\x97\xc9\xf1\x97\xf5\x3d\xbf\x08\x92\x50\x60\xfb\x8a\xe1\x21\xa1\x81\x71\xf4\xf6\xf9\xb3\x5b\x2b\xdf\xe4\xcb\xe5\xfa\x55\xa3\xb9\x10\xef\xbc\x5f\x62\xb2\x27\xde\xf9\xad\x78\xe7\xde\xf1\x01\x82\x90\xb8\xe5\x58\xe7\x1e\x0b\xa0\x7b\x5f\xd6\xf9\x97\xb3\xc3\x7c\x49\xcc\xe1\x87\x16\x56\x45\xd3\x01\xd8\x23\xd0\xad\x24\x7e\x14\xc4\x93\x8a\xc1\x01\xab\xd5\x15\x4d\x52\x2a\x86\xc3\x52\x87\x9d\x1a\x5c\xae\xd1\x3a\xf3\x08\xb8\x27\x46\xa5\x33\x2a\x4e\x9c\x0b\x31\xaa\xbf\x77\xe6\x85\xff\x51\x8c\x6a\x75\x6f\xbb\x87\x5e\xad\xbd\x5a\x5b\xae\x23\x46\x1b\x68\x1f\x67\xa3\x38\x40\x0d\x17\xb7\x82\xd0\xde\x77\xe5\x56\x9b\x41\x40\xfd\x07\xd5\x05\x51\x82\x0b\xf0\xd5\x4b\x6a\xd3\x3f\xbe\x68\x95\x06\xfe\x0f\x4e\x62\xc8\x1d\x96\x8d\x30\x4a\x70\x2a\xf1\x45\xa5\x23\xa4\x1c\xeb\x31\x79\x36\xf0\xbe\x13\x2f\x60\x0b\xf1\x0b\xc3\x41\x5d\x8d\xce\xe6\x01\x34\x85\x67\x5f\xd8\x71\x84\xd1\x24\x4e\x30\x15\x1e\x97\x97\xa1\x6f\xae\x51\xe4\xeb\x7d\x79\xb9\xe4\x02\x87\xf9\x5c\x64\x81\xaf\xdd\x2f\xca\xf9\xd3\x02\xff\x66\xa7\x38\x14\xc5\xf1\xb4\x9c\x18\xf2\x91\x93\xa3\x73\x65\x0b\x62\x77\xaf\x89\xbc\x48\x11\xcd\x89\xa6\x16\x22\xba\xfb\x85\x9b\x7d\x22\xba\x6f\x45\x74\xff\x47\x62\x7e\xc5\x24\x27\xf1\xc0\xbf\x50\xf8\x2d\x7d\x70\x96\xcf\xb7\x86\x00\x5c\xa9\x14\x8b\xc0\x55\xf4\xf5\xab\xfe\xea\x4e\x5b\x8c\xbd\xc7\xf3\xe3\x0a\xac\xae\xa2\x4f\x04\xbe\x5a\x2f\x34\x22\x05\x80\x66\x41\x94\xb9\x1a\x85\x63\x8c\x2a\x3f\x54\x72\x5f\xeb\x3c\x06\x37\x78\x1c\x1a\x31\xb7\x85\x09\xa7\xa1\xc8\x0c\xc5\x96\x84\x54\x15\xa5\xee\xd8\x0d\xf1\x78\xcb\xec\x5e\x12\x05\x2d\xc4\x4b\xfe\xde\x8e\x5b\x96\x1c\x5d\x34\x49\xd6\xe3\xf2\x95\x3c\x13\x12\xb4\xf6\xd7\xe7\xf9\x78\xdc\x24\xe1\xe5\x62\x62\x1b\x31\xaf\xc5\x97\xe3\xdd\xcd\x7a\x1e\xeb\x99\x3c\x49\x1f\xcd\x44\xe0\x36\x07\xd1\x43\x3f\x4d\xc9\x42\x5e\x26\xa8\x05\xe8\x3d\xbe\x41\x5b\x38\x09\x2f\x69\x4e\xc8\x1d\x3e\x28\x8d\xe2\x98\xd3\x87\x6f\xdf\x6f\xed\x34\xf2\xd6\xc4\x73\xc9\xc4\xe3\xbd\x38\x1a\x86\x17\x33\x96\x89\x32\x86\xac\x90\x69\x51\x7e\xc9\x24\x9e\xe2\x24\xbb\x41\x7f\xd2\x63\x31\x78\x93\x02\xf3\x3d\x19\xd1\x1c\xc7\x29\x79\x08\x23\x96\x2e\x20\x8b\x85\x2f\xcd\x0a\xda\xc2\x43\x7f\x36\xce\xba\xa8\x85\x2a\xf5\xc6\x3a\x24\x52\xae\xba\xe0\x3b\x12\x9a\xe3\x84\x27\x32\xcf\xc1\x91\xf1\x9f\x87\x66\x98\xb1\xe4\x99\x29\x80\xca\x0f\xf5\xd2\x87\x2c\x46\x53\x9c\x0c\xe3\x64\x22\x01\x57\x20\x4b\xe9\x1f\x07\xc3\x8b\xae\x6b\x94\x11\xbd\xf8\x3a\x86\x98\x33\xf5\xc6\xfa\x6a\xb3\xa1\x85\xe0\xa6\x5d\xa1\xa8\x6b\x9f\x72\x84\x94\xc6\x6f\xab\x45\x09\x49\x8b\x12\xc8\x93\x59\x09\x72\xd2\xe2\xeb\x6d\x7e\x16\xd1\x03\xe0\x73\xb7\xa4\xab\x72\xc6\x50\x32\x7e\x03\x1b\xdd\x70\x7f\xb3\x61\x9c\xc0\x29\x26\x6f\xf4\x01\x12\x83\x7e\x0e\x86\x46\xd2\x78\x4a\xed\xfc\xf4\xa8\x98\x61\x2d\x52\xf1\xcf\x7c\xb2\xd6\x69\xfa\xc9\x7b\x83\xf1\xd4\x69\xac\xd5\x6a\x3a\xe0\x82\xec\xf5\x83\xe1\x85\xdd\xf0\x82\x4c\xc4\x86\xf8\xc9\x09\x8f\x14\x77\x05\xc3\x30\xd7\x3b\x5c\x57\x50\x0f\xba\xb2\x2c\xe8\x2e\xf9\x66\xa7\x0c\x36\x50\x0b\x7f\x58\x29\x59\x39\xf5\xc7\x19\xda\x84\xff\x2c\x9e\x88\x96\xbb\xd1\x48\x7e\xed\xf7\x21\x3b\x9a\x48\x3d\x18\xae\xb0\xa8\x24\x15\xde\x19\x0f\xf0\x73\x4e\x2a\x2b\x2e\xcf\xab\x56\x73\xa1\xdc\x2e\xea\xd4\x5b\x0d\x08\xc3\xcc\x91\x14\x96\x79\xd9\x83\xef\x3e\xa3\x55\x42\x3e\x94\x07\x79\x62\x76\xec\x66\x89\xee\x04\xe5\x20\x9b\xd2\xc1\xa6\xe9\xe6\x0d\x7d\x8e\x2d\xd4\x13\xc8\xc9\x7b\x51\x80\xaf\x6d\x35\x4e\x6b\xd7\x4c\x01\x64\x89\xd6\x39\x27\x44\x97\x40\x45\x08\xcb\xe2\x8d\x33\x7f\x7d\x8e\x0d\xaf\x94\xbf\x71\x56\xe2\x5b\xde\x06\x99\x95\x15\xf6\x64\x33\xc2\xc8\xb7\x16\x5a\x34\x7f\x31\xc7\xc8\x42\xfd\xc8\x04\x75\xad\x83\x3c\x2e\xd2\x1b\x8e\x8f\xd5\xb8\x40\x74\x92\xe5\x39\xe6\xc9\xb2\x81\x02\xf3\x34\xbe\x79\xaf\xf5\x39\x43\x2c\xa3\x77\x9e\x1a\xd8\xfc\x3e\x3f\x1b\x03\xc0\x57\x86\xd8\x3a\xba\x66\x71\x91\xc5\x28\x7f\xc5\x3a\xee\x40\x64\x4f\x8c\xb1\x1d\x74\x28\x47\xb3\x63\x60\x2d\x58\x28\xb6\x1c\x75\x6a\xcb\x21\x4d\x9f\xd3\x98\x03\x01\x3f\x57\x9a\x80\xd1\x13\x23\x2d\x7f\xb4\x8d\x75\x99\xf1\x46\xf3\x42\x41\xd9\x3a\xcb\x47\x5f\x7e\x67\x0f\x58\x25\x35\xf1\xdb\xc1\x91\xda\x1d\x70\x9d\xb2\x78\x5c\x1b\xe3\xf6\x07\xb5\x81\xf9\x83\xdb\xc0\x48\xb3\xf9\x1a\xfd\x51\x30\x7a\xe4\x2f\xaf\x71\xfa\x07\x98\xc3\x18\x1d\x39\xfd\x43\x37\x8b\xe1\x7f\xb7\xe6\x6b\x3d\xe0\x14\xf9\x93\x98\x03\xd3\x4d\x43\xa3\xb6\x29\xd1\x98\xc4\x69\xed\x6c\x69\xa9\xd8\xa4\x48\x02\x2e\x1d\x7d\x39\xdf\xb0\x04\x31\x63\x7b\x59\x5e\xaf\xc8\x80\x52\x3e\x46\xdc\x6b\x43\x2f\x13\x6c\xa6\x70\x23\x5f\x70\x13\x7f\x28\xd1\x32\x4c\x6d\xe9\xf6\xe7\x47\xaf\xb1\x88\x06\x0f\x10\xc4\x86\x8a\x08\x42\x32\xa4\x42\xa1\x4b\x4c\x58\xac\x9a\x87\x1c\xb2\xe9\x7d\xc0\x14\xca\xa6\x79\x90\x1d\x71\x94\x74\x09\x30\x1e\xd2\x05\x55\x36\xec\xaa\x58\x4c\x0a\xcd\x11\x9e\x6e\x8b\x6c\xd1\x28\x34\x7b\xa0\x1e\x3d\x85\x2e\xcf\x09\x7b\x7b\xe6\xad\xfd\xbd\x7d\xe8\x17\x48\xeb\x3e\x3f\x39\xfa\xe3\xea\x8e\x9c\xe9\xb5\x5d\x59\xaf\xff\x09\xda\xa5\x63\x30\xce\xec\x71\xe3\x5d\xaa\x44\x92\x5f\x16\xe9\x91\x04\x1e\x47\x78\x96\xfa\xfd\x31\x66\xe1\xc0\x24\x74\x8e\x91\x9c\x6a\x91\x42\xd1\xdf\xbc\x43\x6a\x86\x35\x69\x5b\x38\x82\x6c\xca\x88\x19\xda\x32\x1b\x63\x53\x93\x24\xca\x43\x8c\x95\x30\x45\x3e\xa2\x09\x98\xd1\x25\x4e\x52\x88\x5a\x36\xf2\x33\x14\xe1\x8b\x31\x1e\x64\x38\x20\x6c\x78\xc0\x52\xaa\x66\x4c\xe1\x93\xc5\x68\x1c\x66\xd9\x18\x2f\xd3\x00\x97\x2b\x2a\x50\x9c\x24\x71\x82\x82\x18\xa7\xd1\x8b\x0c\xf9\xc3\x21\x1e\xd0\xba\x14\xa9\x17\x29\x4a\xf1\x60\x96\x84\xd9\x8d\x27\x2a\xf6\x67\x19\x0a\x33\xa8\xc4\x6b\x84\x59\x2a\x02\x2a\x84\xe3\x30\x63\x4e\xdc\x34\xaf\x6b\x48\xf8\xf3\x04\x47\x74\x3f\x48\x6d\x8a\x32\x3a\x20\x1f\x68\xe7\x84\xba\x4c\x7b\x2b\xcf\xdf\x5d\x93\xb6\x15\x1f\x52\xde\xcb\x66\xd0\xce\x03\x46\x6e\xbd\x0d\xa7\x86\xcb\xa2\xd3\x42\xc8\x4e\x68\x64\xf7\xc2\xce\x73\xda\x6f\xa2\x5d\xf2\xcb\x92\x38\xee\xfd\x69\xed\xcc\x43\x95\xf7\xa7\xcd\x33\x16\x2c\x00\x7d\x25\x8f\xec\x2a\xa0\xde\xa9\x5a\x92\xc8\xbd\x3f\xad\xd3\x4a\x35\xb5\x52\xb3\xb8\x52\x83\x56\xaa\xab\x95\x6a\xc5\x95\x9a\xb4\x52\x43\xad\x54\x17\x95\xd4\x3a\xb6\xec\x48\xc6\x90\x71\x2f\x43\xd7\xa0\xf5\xc4\xa0\xf5\xec\x83\x66\xe2\x23\x0d\x17\xeb\x13\xbd\x30\x19\x0e\x79\xda\x41\x8a\x34\x0d\xb2\x5a\xab\x91\x2f\xb6\xfe\x9a\x13\xd1\x54\x21\xd7\xad\x90\x1b\xa5\x20\xd7\x9c\x03\x2f\xc1\xd0\x20\x37\x4b\x41\xae\xbb\x66\xc7\x93\x60\x68\x90\x6b\x1a\xe4\xf9\x13\xd9\xf3\x93\xe4\x06\xf5\xf5\x74\xaa\x74\xaa\xfa\x34\xfe\x85\xa9\xc9\xc8\xe8\xe4\x13\xd6\x93\xde\xa4\x19\x9e\xa0\x61\x3c\x4b\x50\x16\x4e\xf4\xb9\x5f\x30\x28\x6f\x84\xaf\xb3\x63\xb2\xfa\xdc\xf1\x63\x2d\x11\x6f\xf7\xe3\x20\x1c\xde\x50\x4e\x48\xe9\xb0\x04\x16\xeb\x6e\x2c\x7a\xa7\xd4\x71\xe0\xb7\x53\x48\x79\x09\xd1\x56\x8c\x4c\x71\xb6\x24\xb9\xbf\xa0\x14\x67\xb3\xa9\xfa\xa1\xc0\xa3\x63\xfe\x61\x7f\xef\x17\xea\xda\x51\x74\xc2\xdf\xfb\xe5\xbc\x86\x36\xd0\xde\x2f\x66\x6a\x34\xa9\x48\x9d\x16\xa9\x5b\xa3\x19\xcb\x4b\x1a\xa6\x32\x9d\xf5\x2f\x31\x11\x15\x5c\x47\xff\x1a\x0d\x7e\x0c\x6d\xd3\xe8\xc7\x5f\x11\x7d\x72\x45\x3f\x96\x8b\xb3\x30\xc7\xa2\x7c\x7e\x1d\x6a\x0f\x73\x2c\x9a\x6d\x88\x66\xeb\x4a\xb3\xf5\x79\xcd\xd6\xd5\x66\xeb\x8b\x35\x0b\x61\x74\xc2\x1a\x5f\x82\x04\x48\xd8\x50\x57\xa0\xab\x6a\x13\xaa\x36\xf8\x62\x86\xaa\x35\x75\x99\x3a\x66\x84\x91\x75\x11\x6b\x45\x40\xad\x35\x7a\xae\xd7\x63\xfb\xd3\x8f\x75\xfa\xb1\x6e\xfd\xd8\xa0\x1f\x1b\xd6\x8f\x4d\xfa\xb1\x69\xfd\xd8\x2a\x6a\xb3\x5d\xd4\x66\xa7\xa8\xcd\x35\xd1\x66\x81\x46\xaa\x14\xe7\x41\x8b\x73\x1f\x54\x8e\x03\x21\x53\x49\x21\xfb\x11\x3d\x48\x72\x57\xa7\xf2\x5a\x92\x3e\x4a\x71\x66\xb5\x88\xbd\x77\xee\xed\x1d\x06\x37\xf7\x32\x03\x2e\xa4\x96\x3e\xa6\xa1\x86\x7e\x03\x22\x44\x95\xdf\xc8\xdc\xf3\x55\x02\xcf\x62\xef\x7d\xad\x57\xac\xd3\x8a\x0d\x56\x71\x4d\xab\xd8\x76\x56\x6c\xd0\x8a\x2d\x56\xb1\xae\x55\x5c\x73\x56\x6c\xd2\x8a\x9d\x33\x81\x9a\x52\xb1\x9e\x57\xbc\xd7\x2e\x56\x14\xa5\x9e\x22\xc2\x63\xc7\x1f\xb3\x94\xec\x2c\x78\x3c\x3c\xde\x25\x7a\x3c\x87\xc3\x18\x9c\x80\x63\x8b\x1f\x6f\xc5\xd7\xea\x84\x87\xa4\x1c\xbd\xc2\x9b\xee\xb8\xd8\x8b\x4e\xa6\x7e\x61\xc7\x93\xdf\xdc\xe6\x1f\xc3\x4b\xfa\xa5\xd3\x5a\x6d\x36\x74\xb5\x9c\x58\x26\x82\x60\x2b\x25\x5d\xa1\x94\xf5\xa1\x7c\x91\x44\x50\xcd\xe0\xe7\xd8\xbf\xc4\x28\x1e\x07\x4e\x56\xbb\x80\xfc\xd0\x3b\xa7\x93\xdb\xd3\xe3\x1d\x2a\x2d\xf6\xfc\xf1\x60\x36\x26\x2b\x2c\xc2\x57\xce\x66\x7b\x2c\x11\x4c\x8f\x26\x82\xa9\x5d\xb7\x82\x26\xfc\x1f\x5a\xe2\x12\x9a\x9e\xaf\xa5\xc7\xf2\xc2\xf4\x68\x5e\x98\xda\x35\xab\xd1\x84\x98\xf2\x3d\x2e\xa0\xd6\xaa\xe8\x0d\xaa\xf4\xce\xa5\xe7\xff\x40\x75\xd4\x45\xb5\xaa\x09\xb1\xc1\x20\x36\x28\x44\x06\xb0\xc5\x20\xd6\x35\x88\xf5\x12\x10\x9b\x0c\x62\xd3\xe8\x56\x85\xb6\xa3\x40\x6c\x94\x80\xd8\x62\x10\x5b\xd6\x5e\x37\x35\x88\xcd\x12\x10\xdb\x0c\x62\xdb\xda\xeb\x96\x06\xb1\x55\x02\x62\x87\x41\xec\x58\x7b\xdd\xd6\x20\xb6\x4b\x40\x5c\x63\x10\xd7\xac\xbd\xee\x68\x10\x3b\x73\x21\xe6\x62\x3f\x05\xaa\x54\x5f\xd3\xab\xeb\xde\x31\x82\xa6\xc9\xee\x73\xb1\x7c\x8f\x45\x44\x4a\x5d\x5c\x03\xaf\x0e\x49\xd7\x7a\x96\x24\x1c\x3c\x5d\x7e\x32\x1b\x64\x68\x14\x5e\x8c\x90\x1f\x05\x68\x1c\x5f\x21\x3f\xb9\x98\x41\xf8\x17\x70\x73\xfe\xef\x99\x9f\x18\x89\x7b\xa0\x01\x1f\x6d\x90\x56\xb8\x14\x67\x51\x1e\x5c\xf4\x69\x11\xba\x4b\x58\x8f\x4f\xbc\xcf\x0a\x06\x09\x4e\x67\xe3\x0c\xc5\xc3\xa2\xe6\x47\x74\x0b\xa8\x5c\xf8\xe8\x25\xba\xf0\xa9\xeb\x4a\x7d\xad\x8a\x96\x10\x7d\xd5\x67\xaf\xda\xf0\xaa\x0f\xaf\x6c\x48\x8e\x29\x20\xa9\x2b\xf4\x48\xf8\x12\x5d\x5c\xc3\x0c\x57\x81\x20\x78\x01\x21\x76\x4a\x05\x6c\x89\x60\x48\x87\x7e\x3b\x38\x42\x10\x4e\x52\xfe\xf8\x8e\x72\xb8\x8b\x11\xfa\x1d\x5d\x8c\xcb\x32\x39\xbb\x52\xe5\x37\xc6\xe2\xde\x51\x16\x57\xa9\xbc\xcb\xb7\x6f\xb2\x93\xbd\x93\xc4\x82\x2a\x2b\xd0\x51\x0b\x74\xf2\x02\x3a\x3d\xff\xc6\xb8\xe1\x3b\xca\x0d\x2b\xb4\x99\x7c\xbf\x7d\xc7\xf9\x1f\xec\xb7\x4b\x88\xb4\x66\xc2\x68\x30\x18\x0d\x0e\xa3\xae\x22\x50\x37\x30\xac\xa9\x05\x6a\x45\x18\x36\x19\xf4\x26\x87\xde\x50\x31\x6c\x68\x18\xd6\x2d\x18\xb6\x18\x8c\x16\x87\xd1\x54\x11\x68\x1a\x18\x36\xd4\x02\x8d\x22\x0c\xdb\x0c\x7a\x9b\x43\x6f\xa9\x18\xb6\x34\x0c\x9b\x16\x0c\x3b\x0c\x46\x87\xc3\x68\xab\x08\xb4\x0d\x0c\x5b\x6a\x81\x56\x11\x86\x6b\x0c\xfa\xda\x99\x42\x22\x02\xc3\x8e\x86\x61\x5b\xc1\xb0\x54\xe2\x8f\x94\x27\x9d\x10\xba\xd6\x12\x69\x27\xe6\x5d\x77\x51\x58\x19\xbe\xce\xe4\x7b\x27\x59\x93\xca\x43\x29\x28\x69\x1c\xe8\x6d\x91\x79\x7f\x35\x1d\xfb\x04\x9b\xeb\x0c\x39\xc1\xb1\x38\x33\x95\xbc\x65\x1b\x44\x71\x71\x55\xa4\xd4\x55\x93\x77\xc8\x25\xab\x45\x77\x50\x72\xc1\xd2\xc6\xc8\x9e\x7a\x37\xd2\x6d\xb7\xbc\xfc\x52\xa4\xdb\xee\x78\xec\xae\xa4\xdb\xa9\xdf\x9e\x79\x6b\x7f\xef\x48\x84\x4f\xf7\x55\x4f\xf7\x55\x8f\x76\x5f\xa5\x2d\xf1\xfc\x3e\x47\xbf\xc9\xf9\x7b\xdd\xe1\x3c\x54\x56\xb8\xf7\xe2\x68\xfe\x5e\x3d\x9a\xbf\xbf\xeb\xd1\xfc\xbd\x7a\x34\x7f\x5f\x74\x34\x9f\xa7\x60\x7e\xba\xa9\x7a\xba\xa9\x7a\xba\xa9\x52\xbe\x3c\xdd\x54\x3d\xdd\x54\x3d\xdd\x54\xe5\xcd\x3e\xdd\x54\xe9\x1f\x9f\x6e\xaa\x1c\x8f\x4f\x37\x55\x4f\x37\x55\x4f\x37\x55\xf0\xf7\x74\x53\x55\x4e\x89\xfb\x74\x53\xf5\x74\x53\xf5\x74\x53\x25\xfd\x3d\xdd\x54\x3d\xdd\x54\x3d\xdd\x54\x3d\xdd\x54\xfd\x4f\xbe\xa9\x7a\xb0\x3b\xaa\xbb\xdd\x4e\x95\xb9\x97\x2a\x71\x23\xf5\x58\x77\x51\x7f\xef\x7c\x28\x4f\x77\x51\xff\xfc\xbb\x28\xf9\xee\xa8\xd7\x9a\xeb\xe8\x24\xdf\x1c\xf5\x5a\xd2\xb5\x11\x3c\x3c\xfe\x9d\x11\xf5\xd2\x14\xb7\x46\xf6\xa0\x02\xdc\x43\xbb\xe8\x5a\x09\xdc\x38\x65\x8f\x62\x29\x66\xba\xa9\xaf\x88\xc2\x0c\xa5\xfd\xf8\xda\x84\x73\x2c\xd0\x39\x96\xaf\xe9\xf8\x9f\x4d\x9a\x6c\xb4\x3b\xee\x43\x39\x3b\x74\x87\xf3\xd5\xb8\xef\xf1\x8d\x4d\x8f\xab\xb6\xe8\x71\xff\xf1\xb9\x0d\xb3\x41\x21\x43\xc0\xa3\x4a\x84\xe8\x5f\xf2\x38\x39\x54\x87\xac\x12\xd9\xda\xf8\xd8\x9f\x2a\x80\xcc\x48\x68\xca\x67\x23\x28\x9a\xed\xec\x4f\x7a\x51\xf9\x03\x2d\xd1\xf1\x59\xe2\x8d\x56\xd1\xbf\xa0\x57\x8e\x58\x0a\x57\xfe\xd4\x8e\x33\xec\x1b\xa6\x86\x40\x9a\x80\x63\xbb\x63\x3c\x79\x4d\x66\x7c\xfe\xf4\xf4\xac\x2a\x7e\x96\x55\x43\x10\xcd\x1f\x2c\xcb\xac\x00\x74\x6f\xb5\x1c\xd7\x84\x80\x16\xc4\xc8\xbf\x4e\xa6\xc7\xae\x32\x54\x5a\x16\x4e\xce\x8d\x76\xc7\xa1\x10\xa9\x39\x95\x21\xd6\x46\xcb\x2a\x46\xa4\xf5\xa4\x29\x46\xf2\x41\x0b\xb5\x2f\x7f\xe4\xc3\x39\x37\x03\x3c\x28\x07\xd5\xea\x9f\x65\x3c\xb5\xf9\x10\xab\x29\xa2\xcb\x28\xa2\x2a\xb5\xc8\xb2\x88\x42\xd0\xa0\xd3\x84\x71\x8c\x2a\x95\xef\x0a\x09\x3b\x08\xd7\x4a\xb4\x05\x04\xeb\x26\xd6\x9c\x50\xd5\xf7\x6a\x67\xbf\x92\xba\x15\xb6\xa6\x48\x15\x86\xd7\x59\x9e\xd7\x20\xd2\xf3\x18\x68\xc7\xa7\x4f\x10\x07\xc5\x72\xa3\x95\x93\x7a\x68\x9c\xdd\xc9\x58\x28\x73\xc5\xc4\x32\x05\xbb\xef\x55\xee\xed\xb5\x1e\x42\xe8\xed\xb5\x16\x96\x78\xcd\x3d\x56\x13\x77\x7b\x2d\x6b\x6c\x0b\xb8\xa1\x09\x71\x70\x87\x1d\x7e\x2b\x89\xa7\xca\x2e\xcf\x5e\xc0\x20\x7c\x83\xa8\x78\x01\x69\x4e\x0d\x34\xa7\xe9\xf9\xc9\xc4\x93\x52\x22\xd4\x1c\xaa\xbf\x6a\xc8\x60\xf5\x58\x73\x04\x75\x29\xea\x97\xb6\x8a\x09\xa8\xae\x0a\x42\x8d\x18\x57\x4a\x88\x21\x6d\xf0\x82\xc5\x77\x18\x64\x3c\x0b\x36\x70\x61\xf8\x42\xf0\x22\xbb\xf8\xcf\xb0\x99\x2f\x2f\x5b\xf7\xf0\x05\xd8\x3d\x9a\x93\x00\xe9\x3b\x5a\x6d\x64\x88\x1e\x66\xc5\x01\xa4\xc5\x57\x1d\xa3\xf9\xe2\x95\x47\x0a\x15\x9f\x34\x7b\xad\xc7\x3a\x66\xde\x2f\x5d\xdf\xb7\x3c\x5f\x3e\xda\x29\xf0\xdb\x06\x71\x26\xac\x0a\xa7\x38\xb9\xc4\xcf\x9f\x55\x06\x55\xd4\xa8\xd5\x1b\xa8\x7f\x83\x7a\xff\xdf\xff\x1b\x24\xe1\x00\xed\xe3\x34\x0a\xc7\x2b\x68\x73\x3c\x46\x49\x78\x31\xca\x52\xc4\xca\x07\x2b\xcf\x9f\x3f\x3b\xc2\x41\x98\x66\x49\xd8\x9f\x01\x7c\x3f\x0a\x20\x28\x4f\x18\xa1\x34\x9e\x25\x03\x0c\x6f\xfa\x61\xe4\x27\x37\x84\x1d\x4c\x52\x8f\x45\x69\x48\xe0\xbf\xf1\x2c\x43\x13\xe0\xe9\x03\xe0\xac\x1e\xf2\x13\x8c\xa6\x38\x99\x84\x59\x86\x03\x34\x4d\xe2\xcb\x30\xc0\x01\x0d\x3a\x41\xd6\xe9\x30\x1e\x8f\xe3\xab\x30\xba\x40\x83\x38\x0a\x42\xba\x86\x49\xa5\x09\xce\xba\x6c\xc5\x2f\x23\x15\xad\x14\x14\xc3\x14\x9f\x41\x1c\x60\x34\x99\xa5\x19\xd9\xa8\xfd\x30\x02\xa0\x7e\x3f\xbe\x24\x9f\xa6\x37\xd0\x45\x14\xc5\x59\x38\xc0\x1e\x8d\x2b\x34\x0e\x53\xd0\x2c\xcb\xed\x45\x81\x86\x4c\x10\xa6\x83\xb1\x1f\x4e\x70\xb2\xe2\xc2\x21\x8c\xe4\x81\xe0\x38\x4c\x93\x38\x98\x0d\xf0\x83\xa3\x81\x58\xd7\x82\x78\x30\x13\x71\x30\x48\x8d\xd5\x38\x61\x31\x32\x26\x7e\x86\x93\xd0\x1f\xa7\xf9\x30\xc3\xdc\x40\x35\x09\x75\x32\xcf\x27\xbb\x7b\xc7\xe8\xf8\x60\xe7\xe4\xd7\xcd\xa3\x6d\xb4\x77\x8c\x0e\x8f\x0e\x7e\xd9\xdb\xda\xde\x42\x6f\xff\x8d\x4e\x76\xb7\x51\xef\xe0\xf0\xdf\x47\x7b\xef\x76\x4f\xd0\xee\xc1\x87\xad\xed\xa3\x63\xb4\xf9\x71\x0b\xf5\x0e\x3e\x9e\x1c\xed\xbd\xfd\x74\x72\x70\x74\x8c\x7e\xdc\x3c\x46\x7b\xc7\x3f\xc2\x87\xcd\x8f\xff\x46\xdb\xbf\x1d\x1e\x6d\x1f\x1f\xa3\x83\x23\xb4\xb7\x7f\xf8\x61\x6f\x7b\x0b\xfd\xba\x79\x74\xb4\xf9\xf1\x64\x6f\xfb\xd8\x43\x7b\x1f\x7b\x1f\x3e\x6d\xed\x7d\x7c\xe7\xa1\xb7\x9f\x4e\xd0\xc7\x83\x13\xf4\x61\x6f\x7f\xef\x64\x7b\x0b\x9d\x1c\x78\xd0\xa8\x59\x0d\x1d\xec\xa0\xfd\xed\xa3\xde\xee\xe6\xc7\x93\xcd\xb7\x7b\x1f\xf6\x4e\xfe\x0d\xed\xed\xec\x9d\x7c\x24\x6d\xed\x1c\x1c\xa1\x4d\x74\xb8\x79\x74\xb2\xd7\xfb\xf4\x61\xf3\x08\x1d\x7e\x3a\x3a\x3c\x38\xde\x46\xa4\x5b\x5b\x7b\xc7\xbd\x0f\x9b\x7b\xfb\xdb\x5b\x2b\x68\xef\x23\xfa\x78\x80\xb6\x7f\xd9\xfe\x78\x82\x8e\x77\x37\x3f\x7c\xb0\xf6\x92\xe0\xae\xf4\xf1\xed\x36\xfa\xb0\xb7\xf9\xf6\xc3\x36\x6d\xe9\xe3\xbf\xd1\xd6\xde\xd1\x76\xef\x84\x74\x27\xff\xd5\xdb\xdb\xda\xfe\x78\xb2\xf9\xc1\x43\xc7\x87\xdb\xbd\x3d\xf2\x63\xfb\xb7\xed\xfd\xc3\x0f\x9b\x47\xff\xf6\x18\xcc\xe3\xed\xff\xfd\x69\xfb\xe3\xc9\xde\xe6\x07\xb4\xb5\xb9\xbf\xf9\x6e\xfb\x18\x55\xe6\x0c\xc9\xe1\xd1\x41\xef\xd3\xd1\xf6\x3e\xc1\xf9\x60\x07\x1d\x7f\x7a\x7b\x7c\xb2\x77\xf2\xe9\x64\x1b\xbd\x3b\x38\xd8\x82\x81\x3e\xde\x3e\xfa\x65\xaf\xb7\x7d\xfc\x1a\x7d\x38\x38\x86\xd1\xfa\x74\xbc\xed\xa1\xad\xcd\x93\x4d\x68\xf8\xf0\xe8\x60\x67\xef\xe4\xf8\x35\xf9\xfd\xf6\xd3\xf1\x1e\x0c\xda\xde\xc7\x93\xed\xa3\xa3\x4f\x87\x27\x7b\x07\x1f\xab\x68\xf7\xe0\xd7\xed\x5f\xb6\x8f\x50\x6f\xf3\xd3\xf1\xf6\x16\x8c\xee\xc1\x47\xe8\xea\xc9\xee\xf6\xc1\xd1\xbf\x09\x50\x32\x06\x30\xf8\x1e\xfa\x75\x77\xfb\x64\x77\xfb\x88\x0c\x28\x8c\xd4\x26\x19\x82\xe3\x93\xa3\xbd\xde\x89\x5c\xec\xe0\x08\x9d\x1c\x1c\x9d\x48\x7d\x44\x1f\xb7\xdf\x7d\xd8\x7b\xb7\xfd\xb1\xb7\x4d\xbe\x1e\x10\x28\xbf\xee\x1d\x6f\x57\xd1\xe6\xd1\xde\x31\x29\xb0\x47\x9b\xfd\x75\xf3\xdf\xe8\xe0\x13\x74\x99\xcc\xd1\xa7\xe3\x6d\xfa\x53\xa2\x58\x0f\x66\x12\xed\xed\xa0\xcd\xad\x5f\xf6\x08\xda\xac\xf0\xe1\xc1\xf1\xf1\x1e\xa3\x13\x18\xb2\xde\x2e\x1b\xee\x95\xe7\xcf\x5e\xae\xaa\x3a\xaf\x7d\x3f\x1b\x3d\xac\xde\xab\x5c\xd4\x69\x1a\xf8\x58\x14\xa1\x8f\xa5\xac\xb3\xe1\xc2\xce\x8f\xb2\x14\x65\x7e\x9f\x4b\x2c\xa4\xca\xf9\x97\xb1\x35\xd8\x66\x2e\x47\xd5\x3c\x84\xea\x1e\x42\x0d\x0f\xa1\xa6\x87\x50\xcb\x43\xa8\xed\x21\xd4\xf1\x10\x5a\xf3\x10\x5a\xf7\x10\x7a\xe5\xa1\x7a\xcd\x43\xf5\xba\x87\xea\x0d\x0f\xd5\x9b\x1e\xaa\xb7\x3c\x54\x6f\x4b\x16\x96\x6b\xb4\x2e\xf9\x46\xe0\x91\xf2\x04\x46\xbd\x4d\xe1\x92\x7a\xd0\xd6\x2b\x06\xbf\xc1\x60\xd4\xa1\x8d\x1c\x4e\x93\xb5\xd5\x62\xb8\xbc\x62\x30\xd6\x25\x3c\xd7\x18\xac\x0e\xc3\xa5\x4e\x61\xd6\xe5\x58\xcb\x75\x56\x97\xe3\x52\xa3\x30\x00\x0f\x8e\x67\x93\xc2\x22\xf0\xeb\x72\xbf\x65\x38\x2d\x56\xb7\xcd\x70\x5f\x63\x30\x1a\x12\x9e\x75\x06\x6b\x9d\xe1\xc2\xfa\x5d\x6f\x9e\x55\x5f\xcb\x73\x91\xcc\x99\x0b\x8e\xc7\x9a\x34\x56\x0d\x06\x93\xe3\xdc\x51\xc7\x03\xfa\xd6\xd4\xfa\xde\x61\x75\x9a\x39\x2c\xa8\xdb\xce\x71\xe6\x30\xf8\x78\x40\x5b\x75\xad\xef\x50\xa8\x2d\x75\x70\x8d\x21\xd8\xc9\x07\x57\x00\x69\x48\x03\x4d\x91\xcd\x01\xad\xb3\x3a\xd2\x60\xc1\xc4\xb4\xf3\xc1\x15\x30\x9a\xd2\x40\x53\x64\x25\x84\x1a\x6c\x64\x6b\x12\x30\x3e\x1a\x6b\x62\xf6\x04\x85\x22\x36\x3a\x14\x59\x75\x36\xd2\x79\x2b\x83\xa2\xc8\xc6\x0a\xd0\x93\x5b\xe2\xb4\xd5\x94\xc6\xb3\x93\x7f\x53\x68\x7a\xcd\x83\x4f\x30\x54\x9c\x5e\x5f\xe5\xb4\xc7\x69\xaa\xde\x96\x86\x75\x8d\x95\x55\xe6\xa3\x9e\x13\x81\x98\x8b\x57\xac\x20\x27\x9e\x75\xa9\x0c\x47\x7c\x0d\x7e\xcb\x67\x29\xb1\x96\x5b\x79\x55\xde\xbe\x58\xf3\xf2\x9a\x58\x57\x40\xe6\xa0\xf8\xfa\x6c\xe7\xb4\x2f\xfa\xd9\xc8\x51\x10\xe3\xc4\x48\x86\xc2\x45\xda\x94\xcc\x5b\x20\x0c\x31\x65\xf0\xdb\x39\x02\xd0\xcf\xb5\x7c\x21\x42\x83\x2d\x86\x48\x47\x43\xba\xa9\x0e\xbe\xe8\x74\x3d\x87\x23\xc6\x4e\x2c\x68\xf8\xae\xc0\x11\x0c\xa4\x2e\x0d\x52\x27\x6f\x57\x2c\x3c\xb6\x80\xeb\x4d\xcb\x7c\x88\x0e\x68\x88\x73\x40\x62\xc1\x35\xa4\xff\xb6\xc5\x2a\x56\x07\xa8\x6d\x29\xd7\x52\x67\x46\xcc\x64\xde\x29\x54\xaf\xa3\x33\x25\x4b\xf6\xf9\x88\xac\x10\xcb\x7c\x20\x11\xaa\xb9\xe6\xa1\xda\x75\x7b\x73\xbd\xb1\xf6\xea\xd5\x2b\xf2\xbb\xb3\xbd\xf5\x6a\xfb\xed\x66\x9d\xfc\x5e\xdf\xa9\xbf\x7d\xdb\xdb\xea\x91\xdf\x9b\xaf\xda\xcd\x9d\xad\xd6\xb6\x3a\xdf\xa3\xc4\xd9\x40\xbb\xb6\xd9\x58\x7f\xbb\xdd\x81\x06\x7a\xad\xad\xad\x7a\xa3\x05\x0d\x6c\xad\xd5\x9a\xdb\x3b\x4d\xf2\x7b\x6d\xb3\xb3\xb5\xd6\xd9\x86\x86\x39\x42\x67\x56\x7d\xc0\xd1\xde\xe1\xf6\xfe\x56\xbd\x53\x83\xf0\xfb\x73\x74\x48\xa2\x6c\xae\x45\x92\x5e\xd1\x5d\xf9\xae\x77\x45\x54\x99\x08\x48\x38\x82\x60\x77\xd6\x5a\xed\x46\xb3\x06\x23\xb8\xbd\xd3\xdb\xda\x7c\xbb\x0e\x1d\x7c\xb5\xfe\x76\x73\xab\xb7\xb3\x4d\x7e\xd7\x6b\xcd\x46\xbb\xb5\x06\x83\xd3\x6b\x6e\x35\xb6\xeb\x3b\xb5\x33\xa7\x6a\xbc\xac\x52\xde\xaa\xd8\x2d\xed\xa5\x54\x2f\xb8\xa9\x99\x6f\x8e\x4f\xb1\x00\xdd\x6b\x6e\x16\xe9\xb8\xbe\xd9\x3f\x97\x4a\xf3\xcb\x83\x73\xd3\x90\x09\x15\xdd\xa9\x48\xf5\xd0\x06\xaa\x98\x05\x10\x35\x00\x95\x1a\xcb\x0d\x1f\xa4\x97\x8b\x19\x95\x1a\x00\x99\x5d\xa9\x06\xd0\xb4\x2e\x35\xc1\x15\xa8\xc6\xd0\x3c\x5b\xe7\x5d\x24\xee\x1f\x08\x29\x3a\xaf\x1c\x81\x01\x9c\x8f\xc6\xee\x02\x09\x14\x48\x9c\x05\x40\xfc\x3c\xff\xe2\x86\x00\x32\xd1\xf9\x17\x37\x04\xd8\xa6\xcf\x53\x37\x04\xd8\x34\xce\xd3\xc4\x1e\xd1\x7a\x75\x95\xac\xb2\xcf\xe4\xd0\x7c\xe9\x27\x21\x91\x8e\x2d\x97\xb4\xfe\xd8\x43\xfd\xb1\x87\x06\x63\x0f\x05\x63\x0f\xe1\xb1\xa5\x21\x3f\xf1\x50\x3f\xf1\xd0\x20\xf1\x50\x90\x78\x08\x27\x7a\x63\x3e\x41\xc5\x27\x08\xef\x9a\x2e\x23\xfd\x04\x82\x8e\xc3\xc7\xba\xfe\x71\x40\x3e\x0e\xe8\xc7\x86\xfe\x31\x20\x1f\x03\xfa\xb1\xa9\x7f\x84\x03\x03\xa6\x1f\x5b\xfa\x47\x91\xa6\xda\x57\xf3\x52\xf3\x2e\xe9\xb7\x82\x56\x53\x42\xf8\xef\xd2\x06\xaa\x5b\xd7\x76\x46\x96\x8f\x3f\x46\x4b\xf9\x9a\x5a\xfa\x32\x3e\x0d\xcf\xce\xaa\x5f\x6d\x4e\x0c\xe0\xb5\xf3\xa6\xde\xa9\xfe\xf9\xfc\x99\xca\x1a\x49\x1b\x68\x58\xaf\xf4\xc7\xde\x60\xec\x05\xe3\x2a\x5a\x42\xa3\xb1\xdd\xf7\xe6\x16\x09\x85\x5c\xf8\xa6\xd9\xa0\xaa\x36\x0b\xb4\x86\x0e\xcd\x18\x79\x03\x5a\x6b\xdd\x09\xad\xa9\x43\x33\xa6\xca\x80\xd6\x69\x39\xa1\xb5\x74\x68\xc6\xdc\x4a\xd0\xfe\x5c\x5d\x65\x10\xd7\x6b\x4e\x88\x6d\x1d\xa2\x41\x10\xc8\x1e\x26\x9d\x4c\x62\x66\x9d\x2e\xf2\x05\x25\x71\x36\xae\x64\x5e\x4a\xa6\xd5\xe6\xb4\x01\x34\x90\x2d\xe1\xb1\x7d\xca\x61\x45\x18\x4b\x8a\xfc\x01\xdd\x06\xb6\x2f\x40\xee\xd0\x2e\x59\x93\x75\xab\x1b\x10\xac\x97\xbe\xad\x36\x2c\x33\xe3\x26\x51\xa0\xea\x27\x68\x49\xa2\xd6\xe4\xee\xd4\xda\xae\xf4\x13\x6f\x90\x78\x41\x02\x23\x9e\xdc\x8f\x5a\x5b\x3a\xb4\xfb\x52\xab\x0a\xed\x5e\xd4\xda\xd0\xa1\xdd\x9b\x5a\xeb\x3a\xc4\x07\xa6\xd6\x04\x6e\xad\x0b\xc8\x35\x71\x90\x2b\x70\xd4\xc4\x46\xae\xc0\x88\x6d\x5f\x80\x45\x53\x72\x4d\x9c\xe4\x0a\x1b\x80\xad\x36\x6c\x0d\xa6\x85\x86\xce\xca\xf7\xe4\x74\x0c\x20\x43\x82\xd5\xaf\x26\x61\x92\x7f\x36\x50\x65\x97\x9a\xe6\x0e\x08\x67\x0e\x2c\x3d\xdd\x65\x26\xbc\xbb\xd4\xfc\x36\x20\xe5\x6c\x23\xb2\xcb\xcc\x74\x77\xa9\x21\x2d\x26\xe5\x7c\x6b\xb9\x26\x2b\x07\xc6\xb2\xb0\x23\xf4\xad\xe5\x5a\xac\x1c\x18\x26\xf7\x49\xb9\x81\xb5\x1c\x18\x30\x2b\xc3\xa2\x8b\xb5\x3b\x2c\xb5\xc6\x3d\xcc\xb3\x02\x3f\xf3\x85\x30\x44\x1e\x2c\x1b\xff\xfc\x34\x8c\xbc\x64\xf4\x36\xcc\xd2\x93\x38\x03\x8e\x47\x61\x46\x5b\x7e\xe6\x53\xab\xad\x97\x68\xdd\x02\x1d\xea\x7c\xc0\xc3\xcc\x48\xda\x08\xe5\x8d\xce\x6c\x06\x81\x99\x85\x18\xb1\x7c\x8b\xd4\x98\x29\x07\x49\xa4\xc9\xf6\x19\xfa\xba\x41\x13\x0b\xe7\x36\x12\xa2\xc4\xbf\x50\xb3\xa1\x53\x6b\x0e\xa9\x52\xa9\xe4\x45\x97\x10\xe1\x0f\x04\xe4\xab\x2a\x01\xd5\x22\xeb\xb6\xde\x72\x08\xd0\xbc\x2a\x1d\x8e\x5c\x78\x96\x5e\x96\x17\x9e\x0d\x60\x4c\x70\xd6\x80\xcd\x13\x9c\x6d\x1d\x95\xf3\x74\xe4\xf9\x30\x79\x8e\x1d\x30\x8e\xb1\xa4\xed\x58\x5d\x85\x93\x20\x82\xec\x2e\xd4\x21\xcb\x6a\x38\x35\xa5\x27\x2f\x33\x9b\x4b\x31\x59\xc2\xea\x96\x65\x74\x0b\xe1\xec\xa2\x0d\x24\x8b\xef\xf7\x3b\xbf\xb5\x4b\x1d\xdf\xec\x27\xb2\x5d\x38\x8a\xed\x5a\x9c\x49\x50\xd1\x19\x6c\x57\xb8\xeb\xed\x2a\xc7\xab\xdd\x85\xcf\x55\x94\x42\x76\x95\x33\xd5\xae\xf3\x30\x35\xdf\x14\xee\x88\xde\x84\xd3\xc9\x65\x19\x2c\x02\x18\x6c\xb5\x28\xbb\x31\xd7\x26\x48\x61\x53\x83\x71\x1c\x15\x33\x28\x30\x25\x20\xa5\x72\xed\x02\x3c\xba\xcd\x20\xe8\xe7\x73\x83\x48\x68\x3d\x93\xd6\x18\x9a\xf0\x55\xb1\x8b\x82\x9f\xb7\xf4\xf6\x1f\xc9\x16\x71\xc3\x7a\xe5\xda\x43\x37\x1e\xfa\x62\x4b\xf3\x51\xa9\x5c\x83\x67\xe7\x0d\xfc\xfb\x25\xcf\xd6\x7e\x6b\xc0\x69\x14\xc3\xa9\x5c\x57\x7f\xaa\xdc\x54\xa9\x3b\xf9\xff\x25\x0f\x5f\xaa\xd5\xea\xff\xcf\xde\xf7\x2f\xc7\x6d\x23\x0d\xfe\x1d\x3f\x05\x76\xab\xd6\x1e\x45\x23\x89\x00\x7f\x81\xb6\x95\xbb\x44\xb1\x3f\xe7\x62\xc7\x2e\x5b\x7b\xf1\x57\x2e\x7b\x17\x24\x41\x0d\xe3\xd1\x8c\xbe\x19\xca\x1a\xed\xc6\x5b\xf7\x1a\xf7\x7a\xf7\x24\x57\x68\x80\x24\x48\x02\xe0\x8c\x2c\xe7\xdb\xec\x5a\x5b\xeb\xcc\x0c\x1b\xdd\x8d\xfe\x85\x26\x7e\x34\x1e\xd8\xb0\xf9\xa3\xd8\x04\xa2\x7f\x08\x8c\x2d\x6b\x16\x5c\xc1\x38\xae\xbb\x80\x01\x78\xbb\xde\xbb\x3b\xf9\x07\x30\x67\xc7\x18\x6e\x23\x33\x21\xb4\x5f\x5b\x54\x16\x5c\x90\x4a\x6c\xa6\x0b\x23\xa6\xcd\xc3\x87\x0b\xe0\x6a\xf3\xcd\x37\xdf\x4c\x7c\x72\xb0\xd0\x99\x92\x1f\x9c\xbb\x61\xea\xcd\x30\xf2\x1e\xb8\xed\x36\xc3\x58\x6f\xfb\x51\xfb\x5b\x60\xcf\x53\xfd\xb9\x5a\xca\xc8\x34\x44\x63\xb9\x9f\xc7\x02\x7d\xd3\x8b\x79\x94\x67\xb4\x3b\x59\xea\x09\xbc\xc9\x3d\xc5\xe2\x3d\xc3\x2e\x1c\x7b\xab\xab\x9a\x5b\xd3\x76\x9b\xe1\xe4\x60\x6f\xab\x4d\x0d\xb0\xdd\x56\xa5\x5a\x39\x4f\x9e\x7d\x7b\xf2\x1b\xa8\xc6\xd1\xfc\x3d\xbf\x86\xa6\x6b\x9e\xad\x78\x65\xb9\x3b\xc9\xa2\x50\xb8\x72\xf0\x16\x15\x2a\x2f\x32\x6c\x54\xf3\xe4\x9c\x65\xad\x7a\xf4\x2d\x56\x06\x0d\x75\x80\x87\x5a\x3a\x67\x99\x41\x53\x5f\x7d\x94\xeb\xc0\x96\xad\x51\x35\xa4\xf9\x76\xa2\x8f\x6f\xa7\x71\xfc\x65\x8b\xd3\xbf\xc2\x91\x95\xcf\xbd\x74\xdf\x2b\xac\xa6\x11\xb6\x96\x4c\x7b\xf5\xe4\xdb\x03\xbc\xc5\x4a\xc6\xf0\xae\xea\xdb\x5c\xbf\x38\x86\xd3\xa7\xed\x12\x46\xb9\x28\xab\x89\xa1\x00\x55\x77\x49\x83\x17\x59\xce\x52\x9a\x18\x6a\x33\x79\x9b\x84\xa6\x2c\xcf\x0a\xde\x59\xe3\x30\x01\x66\x7e\x4e\x38\x2e\xbc\xee\xb3\x4f\x5f\x02\xb1\x65\xe8\xe6\xe4\x7b\x38\x83\x3e\x40\xb0\xcd\xdc\xb3\x79\xba\x58\x3c\x4a\xcd\x93\xc5\x90\x30\x9a\xa7\x8a\xe1\x75\xd5\x3c\x51\x2c\x1e\xf1\x66\x9a\x78\xc0\xa9\x75\x9e\xd8\x3a\x27\x6c\x79\x5b\x80\x79\x1f\x24\x4f\x98\x5a\x6a\xc1\xfc\x2c\x13\xff\x6e\x09\x8c\xee\xd9\xd3\xfa\xaf\x9e\x50\x32\x23\xaa\xcf\x39\xfc\xfc\xa6\x44\x07\xc8\x7f\x8b\xde\xa9\x8f\xb4\xfd\x88\x03\xed\x73\x64\xbb\x3b\x52\xb1\x34\x59\xc0\xe1\x58\xf9\x6e\x09\xaf\x0f\x3e\x36\x97\xa9\x31\xbf\x09\xc1\xd4\xd2\x84\x09\x24\x21\x20\x61\xf2\x4d\x26\x86\x03\xb2\x1c\xed\x03\x21\xdb\x44\x23\x7a\x88\x88\x67\x95\x1a\x4c\x9b\x4d\x26\x29\xba\x8b\x32\x99\xe7\x8a\x8f\x39\x60\xf6\x36\x21\x93\xab\xb0\x23\x53\x7c\xe8\x21\x0a\xc6\x48\xa4\xe8\x1d\xca\xd0\x3b\x94\x4b\xcc\x11\xcf\x13\x9e\x32\x53\xd1\xa1\x1e\xe6\x68\x07\xe6\x25\xef\xe2\x53\xa6\x7a\x71\x80\xbc\x4d\xec\xf1\x20\xf0\x49\x60\xa7\x75\xf4\x75\x43\x8e\x7a\x7b\xe8\xeb\xa3\xad\xfb\x22\xf0\xfb\x61\x92\xfb\x9c\xf4\x67\x79\x90\x45\xa5\xc2\x5f\x72\xd3\x74\x1f\x3a\x46\x99\x69\x8a\x0f\x01\xc9\x87\x0f\x91\xef\xa9\x5e\x82\xfa\x8d\x77\x8b\xa2\x63\x64\xe2\x83\x6d\x77\x5a\x6b\xab\xc9\x40\x35\x89\x56\x4f\xb6\xb1\xfe\x09\x6f\xd4\x99\x08\x84\x09\xc3\x41\xe5\x13\xd4\x99\x04\x84\xc9\xc2\xcc\x0c\xe3\xeb\x13\x85\xb9\x19\x26\xd0\x27\x09\x79\x1f\xe6\xcb\x04\xdf\x3f\xeb\x04\x9f\xc8\x85\x0f\x8b\xf9\x72\xb9\xd2\xe7\xdc\x8e\x60\xa0\x56\x7f\x9f\x44\x04\x6a\x21\xb4\x98\x47\xe6\xe9\x06\xd3\x74\x9f\x69\x86\x6e\xc7\x79\x20\xe3\x74\xdd\xef\x71\x36\xe8\xcb\x14\xc2\x60\x32\x40\xa4\xcf\x3b\xcd\x1e\x40\x03\xd7\xc4\x41\x37\x21\xef\xce\x19\x88\x67\x5f\xa6\x0b\x6e\x75\xba\x00\xf4\xb1\xc5\x4c\x81\x59\x2d\xed\x24\x81\x52\x8d\xfd\xd8\x94\x00\xb0\x4f\x0b\xd0\xdf\x75\x81\x8d\xf5\x8c\x91\x30\xfa\xdc\xb5\x31\x14\x95\x7f\x9f\xe9\x83\xc1\xf4\x80\xfe\x0e\x4f\xc2\xa8\xf3\x16\xaf\x9d\xc2\xee\xcf\x0a\x10\x12\x6c\x37\x2f\x20\x00\x3b\x38\xe1\xbb\x44\xfe\x9b\xce\x0d\x64\xd8\x0b\x13\x9e\x53\xf1\xca\xef\x47\x71\x96\x87\x5e\x0c\x9f\xbd\xd8\xcb\x73\x0c\x9f\x8b\xd8\xe3\x61\xe2\x9b\xe7\x0c\x8a\x22\xf3\xbc\xd4\x87\xc9\x85\x88\x86\x14\x87\x58\x7e\x0e\x8a\x84\x16\x0c\x10\xa4\xbc\x60\x41\xc1\x82\x1d\xa6\x0b\xb6\xca\x3c\xb5\xb0\xaf\x44\xa7\xb5\x74\x9c\xa2\x85\x88\xda\xa4\x33\x07\xc7\xc3\xe4\xc5\xb2\xb0\xf4\x65\x88\x1e\x19\x71\x09\x09\x76\x1d\xa4\x45\x93\x91\x61\xba\xe3\x1d\x83\x81\x9a\x10\xf3\x21\xf6\x2f\x43\xf5\x27\x0c\xd5\x42\x2b\xdb\x0d\xd6\x46\xe5\x74\x86\x6b\xa9\x20\xe7\x80\x4d\x48\xff\xa8\xb3\x76\xae\x59\x0d\x47\xf7\xe3\x44\x0c\xe0\xc9\x97\x79\xfd\xff\x9e\x81\xf9\xf7\x77\x2c\xef\x07\x79\x89\x43\xf9\xb7\xe6\x54\x2e\x5a\x2d\x2f\x17\x39\xca\xba\xe7\xf5\xb4\x1e\x3c\xe9\x5f\x9d\xf2\x63\x77\x19\xa0\x9e\xa8\xe5\x2d\x0e\xf9\xc4\x94\xc1\x20\x7d\x49\xb9\x5c\xbf\x58\x95\xe7\x7c\xb2\x30\x0e\x63\xeb\xff\x5a\x55\x3f\xd5\xef\xf9\xe2\xcb\x64\xd1\x7f\xcf\x6c\x26\x82\xa5\x3a\xd1\x31\x22\x0f\xea\xcf\x0f\x8f\x25\x86\xfa\x07\xc7\xdc\xf0\x1f\x26\x0b\xf4\x27\x05\xb6\x67\x9d\x2f\x54\x3e\x5a\xb0\xf9\x9a\x8f\xef\x0a\xec\xcf\x8f\xd5\xef\xe3\xab\xcb\xee\x1b\xae\x41\x2c\x67\xbc\x7a\xbc\x62\xf0\x99\xcd\xbf\x2b\xab\xb5\x41\x40\xcd\x12\xfe\x02\x1d\xa0\xc9\x02\x2a\x7b\xee\xa1\xaf\x3b\x93\x1f\xfd\x99\x2c\x8d\x56\x3d\x4b\xad\x57\x66\x87\xdf\x40\x21\xbd\xfa\x3d\x57\xb3\x72\xce\xd1\x44\x3d\x7b\x88\xd4\x96\xcc\xbe\x14\x5b\x6d\x5a\x05\xdd\xa0\xa0\x56\x29\x3f\x79\x23\x81\xa0\xec\xe8\x40\x10\x60\x0b\x17\xcb\xab\xc9\x62\x8a\x30\x3a\x42\x64\x6f\x8b\x8a\xed\x08\x6e\x42\xd9\x05\xad\xbf\x67\x2c\x9e\x2d\x51\xec\xef\x8f\x4c\x85\x2e\x3a\x10\x75\x86\x34\x69\x71\xde\x7c\x8d\x4d\x24\xde\xdb\x65\xd3\xc3\x0c\xfd\xb3\xaf\xb4\x3d\x39\x5c\xcf\xcb\x8c\x4f\xbc\xbd\x2f\xab\x5e\x5b\xaf\x7a\x0d\x1e\x15\xf0\x28\x34\x3d\x3a\x83\x47\x83\x05\x23\xc8\x59\xe0\x51\xfc\xc9\xcb\x68\x91\xa3\xd6\xfd\x6f\xbd\x8c\x76\xc6\xce\xcf\x99\xb7\x69\x16\xd3\xf0\x40\x28\x43\x68\xd8\x68\x3c\xa9\x5b\x3e\x7c\x88\x88\x5c\xf4\xaa\x7f\xf9\xe6\x9b\x6f\x50\xbc\xb7\x87\xd0\x3b\x33\xa6\xee\x5f\x07\x13\x0e\x06\x98\x30\xdd\xdb\xdb\x0e\x53\xb7\x9d\x6f\x0c\x2f\x9d\x9e\xe0\xb6\xdf\xc6\x4d\xf2\x5d\x60\xad\xdb\x58\x32\xab\x75\x1b\x6f\xea\x7a\xd3\x5b\x32\xdb\xc5\xe4\x0f\x31\x25\x3b\x76\xbb\x6e\x67\xbe\x93\x00\xb5\x86\xa3\x94\xb8\xaf\x7a\x0e\x45\x7e\x55\x0f\xf7\x9d\x0b\xa6\xb6\xd5\xcf\x0c\x4e\x35\x4e\x38\xba\x8b\x0a\xd8\xec\xf6\x0f\xf1\xf1\xcc\x76\x85\xcb\x39\x83\x0a\x73\x0c\xdd\x45\x29\x80\x33\xb9\x3a\xf8\x0e\xa9\x75\x42\x13\xff\x90\xac\x94\x67\x82\xf1\x66\xa9\x55\x2d\xb6\xa9\xb5\x56\xb9\xf5\x4f\x3e\xc1\x89\xf6\x04\xfb\x9d\x47\x9d\x46\xe6\xb1\xad\x21\x06\xf7\xd4\x4c\x38\xd8\xb8\xac\x9c\xcc\xa1\x5d\xa4\x30\xca\x27\x58\x7b\x82\xb1\xfe\x28\x96\x3b\x5b\xe5\x23\x12\x9a\x47\x3c\x58\x40\x16\x94\x66\x68\xbf\x26\xbb\x2f\x84\xba\x2f\x2f\x7a\xb3\x2e\x1e\x43\x43\x82\x8e\x6b\xc1\xec\x0b\xd1\x9a\x28\x88\xc0\x75\x66\x40\x20\x62\x5d\xbf\x4e\xbb\xf8\x13\xe1\xd1\x94\x7e\x41\xed\x4c\xb8\x2d\x01\x9b\x96\xf9\xd0\xc8\x12\x69\xbf\xda\x3a\x1a\x59\x0e\x9d\x54\x42\x10\x15\x31\xd1\xfa\x77\x59\x1a\x95\x30\xa1\x82\x81\x92\xe1\x85\x19\x26\x52\x30\x50\x12\xfc\xcc\x0c\x13\x2b\x18\xf0\xf9\xd9\x97\x65\xd8\x2f\xcb\xb0\x5f\x96\x61\x87\xd9\xe6\x97\x65\xd8\x7f\xca\x39\xde\x30\xda\x79\x8e\x37\x8c\x46\xe7\x78\xf5\x77\xb6\xe1\x1c\x6f\x18\x7d\x99\xe3\xbd\xf5\x39\xde\x30\xda\x76\x8e\xd7\xa4\x9c\xee\x1c\x2f\x28\xc8\xbd\x69\xbb\x59\x3b\x33\x2f\xcd\x52\xef\x77\xbd\x34\xbb\x89\x82\xdf\xe4\xe2\x82\x86\xce\x97\x59\xe0\xee\x2c\xf0\x26\x82\x35\xd5\xc3\x4d\x14\x68\xbf\xbf\x8e\x02\x55\xa5\x1b\x20\x0e\xb5\x3a\xd1\x3b\xd5\x74\xd3\xfa\xf7\xf2\xc9\xf3\xbf\x3c\x7f\xfc\xf8\xd5\xa3\xd3\x57\xfd\xd9\xe2\x17\x3f\xfc\xe5\x87\x9f\xbe\x7f\xf4\xfa\xd1\xf0\x56\xee\x97\xcf\xff\xfc\xd3\xf7\x7f\x39\x79\xfe\xd3\xab\xd3\x6f\x7f\x6a\x5a\x6a\xe4\xe4\xb4\xf2\xc9\x76\xd3\xca\x5a\x8b\xd5\x6c\x59\x17\x6d\xe9\xcd\x49\xd7\xa4\xc5\xdb\x35\x9e\xa2\x6b\x5b\xa9\xf2\x4a\x4e\x89\x54\xe8\x21\x22\xc1\x03\x54\x19\xa6\x44\xb4\x3e\xbf\xd9\xa0\x7d\x14\xa2\xaf\xd1\xb5\x3c\x3d\x58\xd5\x87\x34\xe1\x13\xd9\x83\x99\x4a\xf4\x27\x14\x0d\x72\x11\x48\x03\xf9\xd5\x6b\x74\x8c\xae\xd1\x9f\x50\x68\xca\x12\xf9\xd5\x7f\x0a\xac\x04\x7d\x8d\x04\x1d\x5f\xd0\xd9\x33\x00\x6f\xe4\xb4\xdc\xeb\xde\xcf\xd7\xf2\xe7\xff\xb4\x4c\x05\x6b\x62\xbb\x28\x51\x09\xd7\x09\x18\x84\xd6\x48\x66\x23\x25\xb3\x91\x07\x34\x37\x06\xc1\x34\xa0\x52\xba\xe8\x5a\x82\x5e\x5b\xa6\x95\x5a\x03\xe9\x8a\xf1\x1a\x2e\xf8\x19\xf6\x5a\xc8\xb5\xdf\xf5\x8f\xa3\x7d\xeb\xad\x72\x74\xad\xe1\xe9\xe3\x57\x2f\x05\xaf\x1b\x0f\x9b\x8c\x41\xbf\x77\xc2\x32\x3f\x26\xc0\x80\x44\x6d\xac\xcf\xd6\x57\x3d\xdb\x32\x82\x3d\xad\xc1\x2c\x22\x54\x37\x4f\xfc\x82\x1e\xa2\xf8\x01\xfa\xc5\x31\x33\x07\x7d\x80\xa3\xa9\xe6\xaa\x28\x35\xf9\xb4\xac\x5e\x2c\xd7\x50\xc7\x55\x58\x15\x5c\x96\xfb\xcb\x1e\x3a\x40\xa6\xdd\xd4\x35\x72\xbd\xd1\x43\xa4\xea\x45\x98\x80\xc5\xdf\xa0\x83\xef\x8e\x11\x90\xd1\xb0\x58\x68\x75\x77\x54\xeb\x54\xbf\x39\x06\xb2\xf6\xcd\xd5\x03\xca\xcf\x34\xca\x1d\x54\x07\x86\xf7\x9e\x86\x81\xed\xa6\x96\x34\xc3\x5a\xf0\x4d\x05\x06\x34\xa2\x16\x6a\xdf\x89\x7e\x74\x84\x5e\xac\xca\xf3\xb2\x2a\x3f\x70\x74\xb1\x9c\x5f\x2f\x96\xe7\x25\x9b\xa3\xe5\x07\xbe\x42\xff\xf1\x78\x42\xf6\xee\xa3\xcd\x3b\x8a\xf6\xd1\xe6\x5d\x04\xff\x86\xf0\x6f\x20\xc2\x8c\x19\xa5\xb2\x68\x49\x5e\x9e\x1f\x78\x87\xbc\x4d\xec\xd8\x32\x6f\x61\x4e\x61\x38\x36\xda\xc7\xc8\xa2\x57\x2f\xc0\xcb\x39\x3e\x35\xfc\xd4\x05\xc6\xfa\x3a\x9b\x0e\xec\x67\x6f\xd7\xd5\x94\x35\xf8\x4f\xc5\xcf\x2f\x96\x2b\xb6\xba\xee\xdc\x44\x27\x5c\xe0\x54\x1f\x88\xac\xab\x94\xc6\x5b\x67\xcc\xde\x7f\x6a\xec\xd9\x18\xdf\xbd\xb5\x1d\x7f\xbb\x95\x1d\xbf\xb3\xae\xe3\xbb\x56\x75\x6e\xff\x2a\x81\xe5\x65\x75\x71\x59\x3d\x85\x57\xeb\x0e\x2c\x82\x24\x3d\xe7\xeb\x72\xc5\x73\xed\xa2\x81\xb4\xac\xd6\x75\x41\x68\xd9\xb8\xf3\xb6\x50\x37\x7e\xbe\x98\xd7\x6a\xd2\x6a\x70\xb3\x15\xbf\x8f\x08\x09\xa6\x88\x84\xd1\x14\xf9\x34\x98\xa2\x10\x93\x7e\x63\x75\x67\xc1\x7d\xf1\x4c\x7f\xd4\xbf\xb4\xa0\x7e\x69\xb6\xde\x5b\xa0\xf7\xae\x87\xed\x06\xf7\x17\xc0\x4c\x2d\xdc\x84\x58\xbf\x7b\xd7\xdf\xde\xbc\xb5\x44\xfb\x2d\x4c\x4d\xfc\x01\x1e\x69\x72\x0b\x7e\xd5\x98\x1d\x2c\xc2\x8d\x95\x12\x00\x4e\x9a\xdb\x7a\x61\x04\x88\x3c\x0f\x1d\x20\x31\xd0\x36\x37\x25\xe8\x92\x10\xd9\x8b\x4f\x3e\xd7\x8a\x9e\x61\x62\xce\x20\x34\xe3\xe4\x59\xdd\x89\xa7\x6c\x01\x73\x3f\xbd\xae\x1d\x21\x62\x9a\x43\x4b\xd7\xcb\x55\x3a\x2e\xff\x1e\xfa\x4f\xa9\x24\xf8\x8c\x94\xa8\xbb\x28\x26\x64\x6d\x9d\x36\x7f\x46\xe0\x0e\xfa\x3e\xb8\x88\xf5\xae\x62\x16\xd6\x2b\xa8\x05\x79\x67\x3d\x41\xd2\x29\x24\x48\x6e\x52\x41\x90\x74\x4a\x07\x92\x9b\xd7\x0c\x54\x0c\xe3\x31\x8e\x71\x97\x65\x7c\x23\x9e\x71\x97\x69\xbc\x0b\xd7\x46\x3d\x48\xe3\x6a\xa6\x46\xca\x45\xb5\x94\xd6\x6c\xd6\xf4\x9c\xc1\x64\x5e\xed\xce\x06\x51\x08\x88\x43\xb8\x6f\xf6\xdd\x31\xc8\xc5\x06\x33\x5f\x5e\x21\x05\x33\xbe\x1a\xf1\x52\x0c\xb0\x6b\x8b\x0f\xc8\x44\x19\xfc\x40\x7e\x94\x49\x2f\x7c\xb6\xbb\xc0\xe9\x8c\x57\x6c\xf8\x64\x87\xb7\x06\x0d\xd9\xb3\x52\xbc\x82\xcc\x2f\xcf\x17\xd0\x39\x83\x5b\xd5\x12\xac\xd3\xec\x29\x6a\x33\x69\x23\xf0\x8e\xef\x24\x3a\x8d\x8e\x96\xda\x37\x14\x0b\x21\xf1\x57\xa7\x9e\x8d\xf6\x5c\xb0\x4f\x35\xd8\xf9\xf2\xca\x9a\x97\x5a\xa5\x75\x6a\xcc\x73\x4c\x3d\x39\x15\x5a\x38\x7d\xb3\xb1\xf1\x7e\xba\x91\xb6\x76\x0c\x3d\xb0\x03\x81\xb1\x1d\x03\xeb\xdb\xed\xbe\xb9\x99\x19\x38\xc2\x6a\xdb\xa3\x00\xba\x34\x11\x7a\x09\xe0\xf5\xd0\xb5\x58\x7e\xba\xc1\x2d\x38\xde\x06\x5c\xda\xd7\xe9\x06\xbb\xf4\xa8\x60\x9f\x36\xb0\xa0\x47\xa7\x79\xaf\x2f\x57\xe0\x51\xf2\x3a\x11\x61\xea\xe3\x56\x7e\xba\x09\x54\x2c\x40\x93\x89\xe2\xad\x39\x1a\xac\xe8\xab\xf3\xc1\xb6\xd7\x1b\xc0\xf6\xb4\xc1\x26\xa3\x86\xc4\xf6\xb4\x87\xed\xd9\x38\xb6\xdf\xd4\xa9\x3a\xa1\xd0\x61\x9f\xa8\x1f\x12\x2d\x66\x8a\x76\x7a\xdb\x7b\x39\x5b\xa2\x17\xa5\xc3\xb2\x05\xc9\xfa\xce\x47\xfc\x40\xfb\x2a\x53\xb9\xe6\xfb\x27\x9b\x7c\x47\x72\x0d\x5a\x97\x19\x0b\x20\x69\x41\x63\x01\xa9\x86\x7e\xda\x42\xdb\x43\x12\x0c\x16\xb3\xe5\x73\x99\xa5\x1c\x77\xe6\xc3\x74\xbe\xac\x9d\x7d\xb9\x84\x44\xcf\x11\xe2\xc5\x0b\x74\x4b\x62\x74\xe2\x41\xf3\x95\x49\xdd\xe9\x87\x0f\x5b\x26\xc1\xb4\xeb\xfe\xc1\x55\x9a\x3e\x41\x07\xda\x73\x9b\xa1\xa3\xae\xeb\x34\x38\x8c\xc8\x9f\xed\x88\xbc\x3b\xe7\xd1\x76\x77\xab\x19\x8f\x7e\x97\x15\x57\x1a\x1a\x98\xed\x18\x32\x17\x05\x37\xee\xf9\xb3\x11\x1a\x4f\x77\xa4\xe1\x1a\xdb\x56\x6c\xb1\xbe\x58\xae\x9d\x56\x02\xe1\xf7\x45\xf9\x54\x3a\xc6\xe9\x1b\x6d\x42\xb1\xb5\x43\xeb\x98\x27\x1b\x6e\x33\xf0\x29\xc8\xb1\xd1\xcf\x1a\x3f\x2e\x4a\xc4\x2a\x18\x02\x21\x5e\x9a\x73\xc2\x53\x0f\xfa\x60\x2c\xda\xda\xbc\x1c\x79\x4d\x00\x30\xc2\x9d\x7a\x75\x77\x24\xb4\xcd\xe5\x4f\xbd\xba\x33\x0a\xce\x32\x6e\x1d\x1d\xa1\x93\x99\x2b\xf8\x6d\x3f\xac\xdf\x70\xc8\x18\x0f\x8d\x48\x0b\x5f\x75\x1c\x6e\xc6\x95\x11\xe3\xde\x2d\xa4\xd6\xad\x4e\x1b\x83\xdb\xbe\xc9\x06\x37\x8d\x26\x5a\x12\xb2\xb7\xcd\x00\x28\x11\x90\x1e\x02\x32\x40\xe0\x94\xa2\xc8\x3d\x56\xcb\x2b\x87\x10\xe7\x9a\x37\x9c\xb6\xae\xf1\x0e\x4d\xfe\xa1\xd8\x97\x3f\xdc\xad\x99\x81\xaf\xae\xf8\x31\xd7\xbc\xe6\xb4\x75\x21\x1d\x23\xfc\xd0\x62\x9c\x2f\xaf\x3e\x7d\x82\xf6\x87\xa5\xe9\x8d\x64\xa0\x6f\xab\xa7\x75\xa6\x21\xc5\xf8\xd6\x9b\xcc\x84\xe7\xa3\x2f\x6d\x1d\x2c\x36\x47\xec\xe4\x2b\xdd\x16\xc2\x25\x1d\x8b\x1d\xff\x5c\xdb\xa2\x0c\x93\x34\xb7\xbe\x2b\x6a\x00\xdf\xcc\xf8\x88\x76\xc3\x69\xa0\xaf\x61\xf2\x6a\x38\x0f\x74\xd3\xbd\x54\xf8\x26\x5b\xa9\x60\x93\x54\xc6\xcb\x79\x77\xbf\x13\xde\x43\x47\x5d\xfe\xf7\xd0\xd7\xfd\x1f\x80\x38\x2c\xd0\x34\xbb\xb9\xfe\x49\x36\x41\x7d\xf2\x1c\x9e\x3e\xcd\x58\x33\x6f\x9c\x83\x44\x47\x46\xd5\xeb\x20\xf5\x2c\xe0\x10\xe7\x91\x71\x33\xdd\xab\xff\xba\xe4\xfc\x6f\x7c\x88\x74\xc6\xd6\xb3\xda\xb8\xb7\xba\x8b\x7e\xc0\xc5\xa7\x4c\x16\x8e\xcf\x09\x6d\x9f\xd2\xdb\xd2\xf9\xdd\xe7\x10\x5b\x7a\xf6\x59\x39\x2d\x35\x54\x13\x73\x7a\xc2\xb9\xd3\xdc\x9c\x86\x4a\x4d\xcf\xe9\xa8\x6e\x3a\xaf\xd8\x8a\xc2\xdd\x89\xa7\x83\x4e\x3c\xbd\x69\x27\x9e\x0e\x3a\xf1\x74\xb7\x4e\x98\x55\x25\x4d\x57\x39\x59\xb5\x44\x2b\x5e\xad\x4a\xfe\x81\x1b\x36\x20\x22\x75\xb8\x5b\xc6\x83\x8b\xcb\xf5\xac\x66\xc3\x24\x22\x03\xe4\xb3\x21\xe4\xa7\x97\x27\x36\x9c\x1e\x6a\x48\x4f\x87\x2e\x6c\x3d\x4f\x74\x4b\xbb\x26\xed\xf1\x4b\x6d\xa1\x34\x84\xb3\xe6\xb0\xd3\x16\x11\x62\xcb\xc5\x9c\xfa\x63\xbb\x3f\xd3\x29\xf6\x2f\xdb\x35\x6f\xb8\x5d\xd3\xdf\x75\xb3\xa6\x3f\xb6\x55\xd3\x77\x6c\xd4\xf4\xbf\x6c\xd3\xbc\xed\x6d\x9a\xfe\x96\x9b\x34\x0d\x6a\xe9\x6c\xd1\xf4\xb7\xd9\xa0\xe9\xdb\x8f\xe1\x37\x1b\x0f\xef\xd3\xe0\xe3\xdb\x29\xc5\xff\x22\xdb\x35\xfb\x05\x76\x42\x4c\x7e\xb3\x3d\x9c\x75\xb9\x1d\x41\xf3\xf7\x55\x6e\xe7\x46\xbb\x2d\xd5\xe3\x76\xb7\x67\x0d\xb3\x53\x41\x9e\x10\x93\xce\xb6\x90\x10\x13\xeb\x36\x13\xba\x65\x41\x1e\x01\xd8\xd9\x6a\x42\x55\x55\x8b\x10\x93\x5b\x3b\x42\xac\x77\xdf\x5a\x93\x67\xb0\xc9\xc1\xdb\x64\x69\x9a\x26\x79\x98\x4f\xb5\x82\x3d\x7b\x53\x13\x64\x44\x12\x46\x12\xc2\xf4\x72\x3e\x7b\x86\xba\x3d\x86\xa6\x09\x0e\x13\x0f\x87\x4c\xaf\xfe\x63\x26\x82\x43\x52\xf0\x4c\xd6\x0c\xaa\x6b\x03\x6d\x49\x24\x8a\x7d\x9f\x44\x91\x2c\x2b\xa4\x2a\x07\x99\x89\x50\x9e\x06\x01\xa3\xb1\x5e\x57\x68\x4b\x22\x79\xea\x65\x84\x7b\xb9\x5e\x86\xc8\x4c\x24\x88\xd3\x30\xa0\x38\xd7\x8b\x14\xf5\x52\xd3\xdb\xae\x52\x24\xec\xe9\x86\x55\x8a\x70\xf4\xa5\x4c\xd1\x2d\xe5\x44\x74\xe7\x32\x45\xa2\xc9\x58\x5e\xa4\xc7\x8c\x61\x66\x44\xbf\x94\x29\xba\xfd\xdc\x88\x6e\x5b\xa6\xc8\xa8\x9c\x6e\x7e\x44\x47\xcb\x14\xf9\xd4\x5d\xa6\x48\x0c\xe3\xf7\x29\x31\x65\x4b\xe4\x5f\x24\x5b\xfa\x97\x3e\xdc\x72\xbb\x07\x5b\x3e\xd3\x91\x95\x9b\x27\x51\xf2\x51\xd3\x5d\x85\xe8\x2f\xf5\x0e\x5e\xc3\x5d\x37\xdd\x4d\xbe\x87\xec\xe2\x62\x7e\x3d\x51\x3f\x4e\x11\x5b\x9d\x5d\x9e\xf3\x45\xb5\xee\xdf\xc9\xa3\x1f\x9f\x69\xf9\x81\x52\x4a\x2d\x89\x1e\x79\x6f\x13\x10\xca\x48\x91\x40\x5e\x91\xc7\x84\x32\x4e\xc8\xde\x74\x08\x17\x63\x3f\x0e\x82\x04\xca\x0c\x12\x9f\x17\x51\x98\xe5\x7a\x6a\x30\x68\x90\x86\x99\x57\xa4\x59\x01\x17\x20\x64\x41\xee\xa7\xa4\x30\x21\xe6\x49\x1a\xe6\x29\x0b\xe1\xf6\x6c\x4c\x93\x3c\x4d\x33\x27\x62\x3f\x09\xa3\x8c\x84\x29\xa4\x33\x7e\x40\xd3\xd0\xa7\x26\xc4\x61\x52\x60\x8c\x0b\xe0\x38\x8d\xbc\x30\xf7\x70\xe2\x44\x9c\x10\xbf\xa0\x84\xc1\x95\xdb\xac\xc0\x49\x50\x24\xa9\x09\x31\x4b\x71\x16\xf2\x1c\x38\xce\x59\x94\x53\x8c\xa9\x13\x71\x4e\xbd\x98\x31\x29\x63\xe6\x7b\xbe\x47\x02\xa3\x8c\x31\xa1\x7e\x98\xca\x3b\x23\x82\x30\xf6\xa2\x22\xe5\x4e\xc4\x24\xf0\x31\x0d\x53\xb8\x3b\x22\xe0\x3c\x48\x09\xcd\x8c\xa2\x08\xbd\x2c\xce\x33\xb8\x40\x3c\x0f\x8b\x22\x0d\x38\x71\x22\x8e\x49\xca\xc3\x3c\x06\x51\x14\x24\x4e\x69\x12\x19\x95\x47\xbd\x9c\xa7\x58\x5e\x5e\xe1\xa7\x38\x4a\xa2\x14\xbb\x65\x9c\xe6\x99\x17\xc9\x0a\x95\x24\xcc\x62\x4c\xfc\xd0\x84\x38\xc3\x49\x5a\x60\xc9\x40\x56\x44\x09\x89\x92\xc0\x89\x98\x07\x49\x1a\x25\x19\xc8\x2e\xe1\x05\x0e\x58\x6e\x94\x31\x2f\x52\x1e\xc4\x14\xae\x11\xf7\x69\x50\x90\x90\xfb\x4e\xc4\x5e\x91\xe1\x24\xcf\xa0\x01\x4d\x69\x96\x87\xa9\x91\x63\x12\x78\x19\xc3\x59\x06\x97\xb4\xc7\x2c\x4b\xb2\x28\x74\x2b\x2f\xe7\x09\xc9\x22\x70\x90\x30\x21\xa9\x47\x62\x23\xe2\x80\xc5\x01\x0d\x18\xbc\x23\x44\x9c\x45\x3c\xa0\x6e\x8e\xc3\x2c\xf5\x58\x92\x03\x27\x69\x1e\xe0\x22\xcd\x03\xa3\x4b\x47\x45\x42\x69\x0e\x88\xa9\x8f\x71\xe8\xa7\x6e\x8e\x13\xea\xf3\x10\x87\x04\x5c\x9a\x47\x51\x5e\x30\xb3\x83\x50\x1f\x67\x51\x04\x19\x3e\xc9\xd3\xc0\x27\xd8\x73\xc7\x0a\xcf\xf3\x49\x9c\x51\x79\xe7\x7b\x91\x12\xec\x1b\xcd\x2d\x2d\xc2\x24\x2e\x32\x55\xdf\x94\x17\x1e\xe7\x6e\xab\xc8\x22\xee\x79\x69\x01\x86\xef\xe7\x8c\xd2\x22\x33\x5a\x45\x1e\xb2\x38\xc1\x01\x20\x4e\x7c\x8f\xb1\x98\xb8\x45\xe1\x45\x19\x8b\xfc\x50\x5e\xef\xe2\x79\x3e\x25\x66\x07\xc1\x01\x49\x48\x22\xdf\xbd\x3c\xe6\xf1\x88\xc7\x6e\x51\x90\x38\x8d\x3d\x46\x21\xb8\x04\x51\x4e\x48\x51\x18\x5d\x9a\x70\x2c\xc4\x04\x22\x0b\x33\x12\x65\x09\x89\x9c\x88\x83\x9c\x64\x51\x5e\x80\x55\x84\x2c\x0b\x08\xe3\xb9\x31\x56\xf8\x3e\xf5\x72\x0c\x22\x4b\xf2\x24\x4c\xfd\xbc\x70\x22\x8e\x42\x8f\xc5\x7e\x18\x48\x07\x61\x45\xe4\xe7\xdc\x6c\x6e\x11\xf3\x58\x0a\x71\xdb\xcf\xe2\x38\x25\xcc\x1d\x36\x29\xce\x48\x96\x10\x19\xdd\x62\x9e\x33\xce\x23\x13\xe2\x84\xc4\x84\x64\x52\x64\x38\xa0\xc4\x0f\xfd\xd4\x89\x98\x91\xb4\xe0\x94\xc9\x38\x9b\x15\xd8\xf3\x23\xa3\x83\x30\x8a\x59\x14\x05\xc0\x71\x9a\x05\xc4\xf7\x3c\x77\x74\xcb\x48\x90\xd2\x34\xf6\x20\xce\x7a\x05\x4d\xe2\x04\x1b\xa3\x5b\x1c\x65\x21\x66\x20\x63\x2f\x0a\x83\x94\xfb\x6e\xab\xc8\x71\x42\x38\xc5\x09\x20\x8e\x78\x11\x12\x6c\x1c\xf3\xf2\x28\x49\xbc\x88\x80\x2e\xc2\x30\x0a\x59\x32\xe2\x79\x45\xe0\x71\x3f\x94\xb2\x0b\xe3\x18\x13\x8f\x30\xa3\x1d\x7b\x11\x63\x9e\xec\x99\x4f\xd2\x34\xc7\xa9\x5b\x79\x38\x61\x41\x86\x31\x84\xcd\x94\xe6\x24\xf7\x32\x23\xc7\x98\xfb\x71\x94\x79\xd2\x8e\x71\x80\x59\x1a\xba\xa3\x1b\x89\x03\x1a\xc7\x01\xd8\x71\x5e\x50\xce\xd3\x24\x31\x21\xf6\x83\xd4\x4b\xb3\x14\x7a\xc6\x71\x92\x06\x74\xc4\xdc\xfc\x04\x67\x5e\x96\x82\x52\xb2\x30\x4b\x42\x16\xf9\xc6\x78\xcc\x73\xca\x58\x00\x61\x93\xfb\x01\xa6\x2c\x73\x9b\x5b\x98\x26\x59\xc6\x82\x42\x8e\x0c\x91\xcf\xfd\xd8\x88\x38\xa2\x84\x47\x85\x0c\x56\x79\x94\x92\x94\x32\xb7\x28\xe2\x80\x16\x94\x70\x70\x90\x30\xe7\x45\x4a\xcc\xb1\x22\xa6\x2c\x8c\x7c\x39\xd2\x04\x3e\x8e\x49\x11\xb9\xad\x82\x06\x19\x8d\x29\x96\x99\x10\x2e\x3c\x96\xc6\xc6\xb0\x49\xb3\x2c\xf6\x88\x54\x1e\x66\x51\xe0\x27\xdc\x9d\xbb\x25\x5e\xca\x8b\xa2\x60\x32\x8b\x8c\x7c\xcc\x89\xd1\x2a\x58\x10\x7a\x51\xc6\xc1\xf3\x72\x4e\x49\x9a\x73\x77\xee\x96\xf2\x22\x61\x7e\x21\x47\x06\x92\x45\x71\x82\xcd\x79\x45\x14\xe3\x98\x16\x72\x08\xf3\x63\x12\xfa\xc4\xad\xbc\x8c\x91\xd8\xe7\x19\xc8\x98\x33\x12\x45\x38\x31\xca\x38\xc7\x34\x4a\xa9\x1c\x9a\x88\x30\x24\xd2\x9d\x04\x1c\x26\x22\x2c\x67\x71\x9e\x83\x83\x64\x39\xf7\x78\x8a\x8d\x61\xb3\x08\xe3\x3c\x28\xe2\x42\x0d\xba\x3c\xc7\xb1\xdb\x8e\xbd\xa8\xf0\xa2\x58\xe6\x0b\x31\xc1\x71\x54\xa4\x46\x97\xf6\x58\xe4\xc7\x79\x06\x0e\xc2\x48\x46\x13\xca\xdc\x23\x08\xc6\x7e\x91\x50\x2f\x50\x13\x77\x89\x97\x33\x23\xc7\x38\x8d\xb1\x97\xfa\x32\x1e\xfb\x38\x0b\x62\xec\x96\x31\xa1\x79\x1a\xc7\x45\x28\xad\xc2\x0b\xe2\x9c\x1a\xe3\xb1\x4f\x32\xc6\xd2\x18\xac\x22\xf0\xb2\x98\x04\x89\xdb\x41\xfc\x2c\xe1\x29\xf7\x40\x14\x38\xcc\x92\x94\xa7\x46\xe5\x05\x3e\xce\xa3\x38\x83\x9e\x25\x19\xf6\xbc\x3c\x70\xdb\x71\x90\x65\x61\x1e\xc8\xc4\x3b\x4b\x7d\x1e\x90\xd4\x38\x34\x89\x74\x85\x24\x09\x04\xab\x22\x8b\xc2\x98\x8b\xf0\xea\x8a\x15\x45\x96\x46\x05\x93\x83\x24\xcb\xa3\x82\x71\x23\xc7\x51\x16\x04\x38\xa1\x80\x38\x60\x41\x1c\x52\x1c\xab\x49\xd4\xb7\x8e\x63\xab\xed\x7b\xe1\xcf\x37\x3d\xa1\x6a\xbb\x06\xed\xe7\xce\x09\xd5\xbf\xdc\xec\x84\x6a\x88\xc9\x76\x4b\x07\x86\xe5\x88\xdb\xaf\x3e\x7a\xd3\xa5\x83\x88\x79\x09\xaf\x27\xdc\xfd\x34\xcb\x12\xcf\xb2\x74\x90\xa6\x51\xcc\xb8\x1c\x7e\x69\x90\x31\x16\x77\x53\x17\x07\x11\x3f\x8b\x78\xe1\xc7\x10\xc9\x0a\x9e\x04\x05\x15\x91\xcc\x04\xc9\xc2\xa0\x28\x42\x1f\xbc\x20\x2c\x70\xee\x47\xc5\xb6\xb3\xfa\x21\xf6\x78\x48\x64\xf0\x61\x39\x8f\x28\xc9\x2d\x4b\x07\x49\xea\x85\x11\x95\x06\x49\x52\x9f\x47\x19\x2e\xb6\x24\x82\x0b\xea\xe7\x89\xb4\xf9\x22\x0d\x70\x9a\x47\x96\x9e\x84\x29\xf7\xb2\x5c\xa6\x41\xd8\x8f\x39\xc1\x71\xb2\xcb\xd2\xc1\x6d\x9f\x23\xdd\xa6\x34\x2c\xc0\x79\xf6\xca\xaf\x4f\xb0\xbd\xf4\xeb\x13\x62\xaf\xfd\xfa\xc4\xb7\x17\x7f\x7d\x12\xd8\xab\xbf\x3e\x09\xed\xe5\x5f\x9f\x44\xf6\xfa\xaf\x4f\x62\x4b\x01\x58\xd9\x41\x28\x0f\x6b\xdc\x07\x2e\x9f\xcf\xe5\xf3\xe1\x61\x0f\x29\x03\x68\x6e\x3c\x02\x25\x9f\xcf\xe5\x73\x4b\x73\x02\xcd\x89\xb5\x39\x99\xcb\xe7\x96\xe6\x3e\x34\xf7\xad\xcd\xfd\xb9\x7c\x6e\x69\x1e\x40\xf3\xc0\xda\x3c\x98\xcb\xe7\x96\xe6\x21\x34\x0f\xad\xcd\xc3\xb9\x7c\x6e\x69\x1e\x41\xf3\xc8\xda\x3c\x9a\xcb\xe7\x96\xe6\x31\x34\x8f\xad\xcd\xe3\xb9\x7c\x6e\xd8\xd6\xb7\x65\xd1\x63\x69\x19\x26\xe4\x4c\x1a\x45\xbf\xe2\x1e\x6c\xb9\x95\x06\x61\x6a\x95\x4a\x5b\x30\xb5\xca\xa4\x1d\x98\x5a\x65\xd2\x04\x4c\xad\x72\xa9\x7e\x53\xab\x5c\x6a\xde\xd4\x8a\x4b\xad\x9b\x5a\x71\xa9\x70\x53\xab\x42\x2a\xdb\xd4\xaa\x90\x7a\x36\xb5\x3a\x93\x3a\x36\xb5\x3a\x93\xea\x35\xb5\x9a\x49\xd5\x9a\x5a\xcd\xa4\x56\xe7\xa6\xba\x83\xae\xa3\xbb\x5b\x5e\x87\x6a\xad\xa7\x5d\xd3\xff\xb9\x94\xb5\x87\x6d\xc7\xcd\x1f\xc1\x08\x5e\x2f\x9f\x0d\x41\xb6\x28\x14\x2d\xc9\x08\x11\xfc\x5c\xd6\xa7\x0d\xf4\xaa\xd1\xe8\x6b\x44\xde\x02\xa4\xb9\x96\x6b\x8b\x63\x2e\x71\xa8\xf3\x05\x7d\x1c\x70\x6a\xfe\x46\x15\xa8\x8f\x8e\xd0\x7f\x40\x35\x62\x3b\xf1\xba\xa4\xf3\x4e\x15\xaa\x37\xb3\xa6\xce\xf1\x66\xec\x2c\x9e\x02\x9b\x6b\x2d\xdc\xe7\xf1\x24\xd4\xac\x53\x05\x7b\x26\x8b\xff\xea\xc5\xab\xe7\x50\xa2\xb8\x2e\x07\xdc\x81\xa3\x03\x38\xd8\xf4\xfa\x0e\x75\xc1\x62\xd7\x09\x53\x09\x39\xef\x70\x31\x1f\x72\x31\x33\x71\x31\x1f\x72\x31\xd3\xb9\xe8\xc2\xc5\x43\x38\x4b\x25\x63\x5d\xa5\x96\x9a\x39\x1f\xb4\xda\xdb\xbb\x14\xdf\x6e\x35\x8a\xb7\xd3\x28\x6e\x35\x8a\xb7\xd2\x28\x9e\x75\x0a\x7c\xcf\xea\x2a\xdc\x5a\x61\xee\xb9\xaa\xd5\xad\x09\x09\x2b\x09\x77\xc1\x60\x1f\x73\xa2\xa9\xb4\xc6\x17\x8d\xaa\x14\xcf\x3b\x6c\xcc\x0d\x6c\xcc\x4c\x6c\xcc\x07\x6c\xcc\x3a\x6c\x74\x11\x46\x03\x7c\x24\x72\xea\x74\xa7\xda\xe1\xae\x50\x12\xb7\x6a\x8f\x5d\x6a\xff\xb9\x8c\x65\xe4\x32\x0e\xcc\x3d\xc8\xb9\x82\x74\x9c\x09\x97\x90\x38\xd2\x02\x89\xf5\x56\xe8\x1a\x56\x32\x80\x8d\x99\x45\x1f\x76\x5e\xc3\x8e\xf2\xd0\x46\x9a\xb9\x10\x5a\x19\xf7\x47\xae\x2e\x78\x1b\xca\x66\x12\x7c\x06\x35\xdb\x04\x1e\xa1\x49\x6f\x0f\x3d\xac\xbd\xb3\xf9\xe5\x7f\x20\x8c\xee\xa3\xc1\xb6\xe9\x21\x1f\xe2\xdf\x5a\x83\xe3\x6c\x88\x7f\xf7\x1b\x6f\xb1\x70\x81\x6f\xca\x05\x48\x71\x4b\x1e\xa4\x76\x86\x1c\x48\x4d\x0c\xe8\x9b\x91\xb6\xa3\xe2\xcf\xa5\x4d\xbd\xed\xa8\xf7\x73\x69\x62\xce\x5e\x13\x5f\x15\xc5\x9f\xa1\xbb\xa8\x98\xa9\xb2\xf8\xe2\x8b\xf9\x1c\x9f\x6c\x23\x7d\x9f\xcf\x45\x9b\xb9\x6a\x23\xbe\x9c\xcd\x1d\xc5\xf4\x67\x50\x4d\x5f\xa0\x4e\x25\x1d\xf8\x9c\xc9\xcf\xa9\xfa\x6c\x6f\x3e\x87\xe6\x82\x4a\x2a\x49\xc2\xe7\x4c\x7e\x4e\xd5\x67\x77\x49\xfe\x99\xac\xc9\xaf\x02\x8e\x1c\x57\xd8\x5c\x96\x97\xde\x93\xc5\x0f\xd8\xac\xae\xd8\xaf\x1e\x76\x6a\xf6\xcf\xb4\x5b\x24\x58\x3d\xea\x38\x2b\xf3\xc3\xdb\xd4\xa4\x41\xa4\x68\xce\xba\x34\xe7\x1d\x9a\xb3\x2e\xcd\xb9\x4e\x73\xb6\x0d\x4d\x2c\xfb\xc9\xd5\xd0\x20\xcf\x9b\x70\x39\x28\xd0\xba\xec\xff\xac\xbe\xb4\x42\x7b\x18\xb4\x0f\x05\x4d\xbf\x7e\x26\xcb\x70\xbb\x69\xca\x7e\x2a\xe0\x9a\xe6\xac\x4b\x73\xde\xa1\x39\xeb\xd2\x9c\xeb\x34\x67\x2d\x4d\x63\xd6\x39\x7e\x0f\x81\x99\xd7\x1f\xa1\xfa\xd2\x8f\xf6\xc3\x54\x3f\x82\xf3\xfe\x58\xba\x8e\x51\xfd\x08\xc1\xe0\xc7\xd2\x16\x42\x3f\xc0\x45\x09\x02\x66\x36\x6f\x58\x34\x39\xa5\x04\x14\x04\x67\x6d\x5f\x64\xb8\xa8\xb0\x1e\x2e\x66\xdb\xc4\xaa\x96\xac\xf8\x57\x48\xc4\x4d\xb3\x02\x52\xd9\xcc\x44\x30\xbb\x11\xc5\x1f\x8d\xa1\xa7\x4f\xf1\xc7\xd2\x44\xf1\xc7\xf2\x26\x14\xcd\xc1\xae\x4f\xf1\x67\x23\xc5\x9f\x4d\x14\xcd\xd6\xd6\xbf\xbc\xc2\x42\x12\x26\x2f\x6a\xb7\x07\x40\x2b\x77\x30\x0f\x52\x47\xa5\x7d\x19\x1e\x81\x45\xa2\xb3\x58\xe3\xda\x8e\xcd\x3f\x5f\xe4\xac\xe2\xe8\xca\xfd\xa6\x2f\xfe\xe0\x7d\xd3\x68\xdf\xf0\xba\x79\x66\x62\x1b\x06\xa0\xc2\xd4\x06\x5e\x6c\x0b\x53\x1b\x78\x87\xe6\xa6\x36\xf0\x0a\xcd\x4d\x6d\xe0\x95\x7c\x92\xcf\xe1\xfa\x8e\xb9\xed\xfe\x0e\x78\xa7\x9f\xe4\x33\x80\x92\xa2\xe3\xba\xe4\xf2\x81\xd0\xac\x37\x81\x08\x4c\x99\x89\x47\x98\x52\xc8\x4c\x3c\xc2\xec\x45\x6a\x6a\x03\x93\x17\xa9\xa9\x0d\xcc\x93\x30\x53\x1b\x98\x26\x19\xdc\x66\x20\xfe\x60\xda\x65\x22\x4d\xbd\x22\x56\x61\xc0\xc4\xcd\x44\xca\x41\x58\xd6\x7e\x3b\xe2\x48\x69\x54\xc3\x64\xe7\x56\x2f\x2b\xd1\xe6\x0c\x21\x33\x78\x02\xf6\xcf\x06\xd9\xc0\x93\xa6\x18\xc5\xe4\x09\xd8\x3d\x93\xcc\x3e\xf1\x74\x6e\xd9\x90\xd9\x3e\x1e\x6d\x96\x51\x12\x04\x11\xa5\x43\x82\xb8\x25\x08\xe2\x49\x15\xc1\x4e\x24\x48\xc7\x09\x6a\xf3\x92\x92\x20\x81\x10\x3b\x24\x48\x5a\x82\x64\x56\x8f\x4b\x13\x80\xd7\xc2\xeb\x38\x41\x6d\x26\x53\x12\xf4\x05\xc1\x7c\x48\xd0\x6f\x09\xfa\x82\x56\xae\x08\xfa\x23\xee\xd0\xc7\xa3\xcd\x7d\x4a\x82\x81\x20\xc8\x87\x04\x83\x96\x60\x20\x68\x71\x45\x30\xd0\x09\xf2\x71\x82\xda\x6c\xa9\x24\x18\x0a\x82\xc5\x90\x60\xd8\x12\x0c\x05\xad\x42\x11\x0c\x75\x82\xc5\x38\x41\x6d\x7e\x55\x12\x8c\xe0\xa5\x62\x48\x30\x6a\x09\x42\xf6\x7e\xa6\x08\x46\x9d\x97\x88\x71\x82\xda\x8c\xac\x24\x18\x0b\x82\xb3\x21\xc1\xb8\x25\x08\xaf\x4d\x6a\x4c\x16\xf0\xae\x24\xe0\x93\xcf\x5e\x7c\xb9\x14\xe7\xf6\x2e\xc5\xc1\x22\xb9\x57\x37\x9b\x09\x64\x50\x87\xc5\xf7\x6e\xfb\x5a\x1c\x33\x19\xfc\x4f\x79\x31\xce\xc9\x72\xf1\x81\xaf\x64\x95\x5f\x54\x2d\x91\x4f\x0e\xd2\xb2\x12\x09\x4a\x8e\x18\xec\xcf\x4e\x79\xb1\x5c\x71\xb5\x9d\x7a\xa0\x35\xed\xac\x89\xb6\x76\x57\x2d\x5f\xfb\xe4\x36\x2e\xe2\xf9\xbd\x5e\xc1\xa3\xf3\xd9\xd4\x07\xb9\x8f\xb0\x47\x82\x23\x5f\xd5\x29\xfe\x72\xba\xc9\x7a\x54\x29\xc4\x64\xd7\xd3\x4d\xa2\xc9\xc8\xe9\xa6\xce\xb6\x86\xc1\xe9\xa6\x10\x93\x2f\xa7\x9b\x6e\xfb\x74\x93\xd0\xca\x76\xa7\x9b\x8c\xca\xe9\x9c\x6e\x92\x0a\x72\x9e\x6e\x92\xe7\x68\xb7\x3c\xfd\xed\xff\xae\xcf\x33\xf1\x45\x76\x90\xb2\x35\x8f\x82\xde\x83\xf3\x3c\xec\x83\x7e\xb8\x78\x9f\x17\xbd\x1f\xb3\xf2\x62\xc6\x57\xbf\xc9\x91\x28\x8d\x55\xf8\x2e\x38\x94\x0f\x24\x63\xf0\x59\xe7\xe7\x5f\xe1\xe8\xd4\xcf\x5b\xdd\x09\x04\x9b\x67\x4e\xa0\xeb\x0d\x9c\xf6\xdb\xf8\x51\xa8\xa3\x23\xf4\x82\xaf\xce\x61\x14\x3d\x99\x2d\xcb\x8c\x23\xdc\xbf\x36\x45\x34\x7f\x71\x82\xbb\x67\x97\xc2\x78\x8a\x82\x64\x8a\x02\x3c\x45\xbe\x3f\x45\x24\x9c\x22\x1c\x4f\x51\x32\x45\x08\x6b\x5b\x8d\x42\x3a\x45\xa1\x37\x45\x01\x99\x22\x3f\x98\x22\x12\x4d\x11\xa6\x53\x84\xbd\x29\x22\x3a\x5c\x32\x45\x21\x9e\xa2\xc0\x9f\x22\x3f\x9c\x22\x12\x4f\x11\x4e\xa6\x08\x0b\xfc\x1a\x5c\xe4\x4d\x51\x48\xa6\x28\x08\xa6\xc8\x8f\xa6\x28\xf2\xa7\x28\x0c\xa7\x28\x88\xa7\xc8\x4f\x34\x40\x1f\x4f\x11\xf1\xa7\x08\x87\x53\x14\x4f\x11\x8a\xc8\x14\x85\xc1\x14\x05\x70\xb5\x80\x0e\x28\x38\x21\x53\x84\x83\x29\x8a\x04\x20\x9e\xa2\xd0\x9f\xa2\x20\x9c\x22\x3f\xd6\x00\x49\x32\x45\x04\x4f\x11\x16\x24\xa7\x08\x11\x3a\x45\xc4\x9b\x22\x2c\xd8\x91\x60\x6f\x1d\x72\x25\x66\xb9\x92\xae\x5c\x05\x17\x42\x8e\xa2\xdf\x44\x7c\x9e\x22\x14\xea\xdc\x2a\xc2\xa2\x5b\x82\x5b\x60\xc8\xd3\xb9\xf4\x95\xe0\x04\x57\x02\x20\x9a\x22\xbd\xbb\x38\x92\xf2\x10\x02\x06\xee\xfd\xae\x22\x84\x42\x85\x80\x85\xfc\xfc\x58\x0a\x36\x0c\x7b\xf2\x0a\x3c\xa5\xad\x50\x6a\x3f\xd0\x29\x08\xd5\x08\xd3\xf0\x85\x4a\x23\xa9\xf6\x50\xd7\xa1\x50\x81\xb0\x07\x61\x17\x42\x87\x42\xb0\x75\x56\xd3\xb9\x11\xea\xf2\xfc\x72\xce\xe0\x9a\x14\x91\x54\xae\x67\x65\x31\xb8\xe1\x09\xbc\xe0\x87\xd3\xbf\xbc\x7a\xf2\xc3\x63\x79\xa7\x94\x90\x18\x99\x22\xe8\xbc\x90\x10\x15\x16\xa9\xd4\x04\xd2\x55\x96\x8a\x95\x3a\x89\xb2\x5e\x10\x08\xd5\xe9\xbf\xfa\xee\xf9\x6b\xbe\x46\x6c\x91\xab\xda\xe8\x17\xa0\x52\x79\x9f\x86\x81\x0f\x01\xff\x97\x17\x5d\x7d\xf6\x52\x4a\x6f\xe3\xdd\x87\x97\x11\x4a\x3c\x6f\xda\x7f\x56\xbf\x2b\x48\x10\x03\x00\xe9\x00\x50\xcf\x23\x03\x10\x5f\x03\x19\x3e\x0d\xf4\xa7\x06\x02\x61\x97\x00\x31\x10\x88\xba\x4c\x9a\x40\xe2\x5e\x3f\x0c\x84\x68\x87\x91\x21\x8a\xa4\x4f\x65\x88\x82\xe9\x20\x26\x80\xb4\x2f\xad\x21\x48\xd6\x23\x33\x00\xc8\xfb\x5d\x19\x82\x70\x0d\x64\x48\xa1\xe8\x72\x39\x6c\x4e\x5d\xad\x31\x1d\xd5\x07\xa1\x23\x04\x7c\x3a\x62\x55\x41\x9f\x88\xc1\x2e\xa8\xdb\x6e\x22\x3a\x6a\x98\x31\x75\x19\x26\xa5\xa3\xfa\x4e\xe8\x88\xbe\x59\x9f\x09\x83\x49\xf4\xc9\x0c\x39\xc9\xe8\xa8\xc6\x73\x3a\x62\x35\x9c\xba\xad\xbb\xe8\xd3\x30\x68\xde\xaa\x2e\x15\x25\xb0\x59\x90\x44\x7b\x6a\x51\xa6\xdf\x01\x31\x52\x0f\xba\x58\x4c\x7d\x0c\x75\x10\xa3\x4d\xe8\x7c\x1a\x9e\xc7\x5d\x36\x1c\xbe\x81\x1d\xe6\x9f\xf4\x39\xb5\x06\x0a\xec\xd0\x68\xda\xed\x8c\xc1\x2a\x3a\x9d\xb1\xc6\x09\xec\xb0\x5f\xde\x03\xb1\x85\x0a\x6c\x0e\x05\x74\x54\x14\x98\x8e\x8a\x82\xd0\x51\xd5\xfb\xd4\xad\xb6\xa0\x87\xc2\x16\x2b\x5c\xe2\x8e\xa8\xcb\x84\x63\x3a\xa2\x0c\x4a\x47\x24\x99\xd0\x51\xd3\x62\xd4\xad\xd0\xb4\x2f\x6f\xc3\xe0\xd1\xa7\x32\x04\xc9\xa9\x4b\xa5\x9c\x8e\xb8\x50\xd1\xd7\xa8\x7e\x47\xd5\x74\x2c\xcb\x08\x3c\x8f\x06\x1e\xb6\x46\x10\x05\x63\x4d\x33\x1a\x05\xda\x22\x48\x4d\xc4\x33\x11\x09\xba\x44\x8c\x30\x61\x17\x8f\x91\x99\xa8\x8b\xc7\x08\x13\xb7\x30\x06\x2a\x7a\xb0\x35\x36\x4f\xfa\x24\x0c\x48\x58\xbf\x3b\xf6\x84\x43\x11\x32\x20\xc9\x3a\x82\x35\x00\xe4\x2d\x80\x35\x80\x48\x16\x0c\x8d\x8b\xbe\x56\xac\x79\x97\x53\x98\x98\x8e\xf4\x82\x50\x97\xb4\xfd\x3e\x09\x93\x6d\xd0\x9e\xde\x4d\xb6\x41\xc7\x05\x1e\xd1\x11\x43\x8d\xe9\xb8\xa1\x52\x3a\xa2\x94\x84\x3a\x94\xc2\xa8\xdb\x97\xd2\x3e\x07\xf6\x40\xe2\x74\x95\x9c\x8e\x18\x31\xef\xcb\xd4\x1e\x4f\xac\x16\xa4\xbf\x80\x18\x9e\xe2\x2d\xdc\x1e\x93\x2d\x9c\x09\xfb\x5b\x38\x3e\x0e\xb6\xb0\x67\x1c\x3a\x5d\x1f\x47\x63\x2e\x89\xe3\x91\x60\xa8\xa7\xe0\x66\x0c\xc9\x58\xb8\xc4\x6c\xcc\xef\x71\xba\x45\xb4\xc4\xd9\x58\x20\xc3\xf9\x16\xc1\x12\xf3\x2d\x42\x19\x2e\xfa\x1a\x32\x9a\xcb\x58\xa8\xc0\x78\xcc\x43\x31\xd9\xc2\x41\xb0\x3f\xe2\x65\x38\xd8\x26\xb0\x85\x5b\x84\x1d\x1c\x39\xa3\x1b\x8e\xb7\x08\x4b\x98\x6e\xe1\x8b\x38\xd9\xc2\xeb\x31\xdb\x22\x9a\xe2\x74\x2c\x82\xe1\xcc\x15\xc2\x70\x3e\x16\x16\xf8\x16\x61\x14\x17\xbd\x08\xb5\x4b\xaa\x82\xbd\xc0\x12\x8c\xcc\x2c\x93\x8e\x54\xb0\x35\x45\x91\xb8\x4d\xd8\x03\xed\xb9\x67\x78\x1e\xf6\x94\x33\x84\x88\x3a\x42\x33\xd1\x88\x3b\x10\xe3\xc3\xb1\x3d\x37\x69\xa9\xd8\x32\x93\xba\xa7\xb6\xac\xa4\xe5\x62\xc8\x67\xd6\x93\xe6\x10\x22\xef\x48\xcb\x96\x9a\x00\x06\x4b\x5a\xa2\xda\x9a\x25\xe0\xea\x1e\xa6\x63\xec\x13\x6a\x37\x14\x9f\x8e\x19\x4a\x40\xc7\x14\x1d\x52\x77\xe7\x23\xea\x36\xa5\x58\x7b\x3e\x7c\x4a\xa9\x5d\x74\x09\x75\x89\x8e\xd1\x31\xf3\x4a\xa9\xdb\x09\x32\xea\x36\x9d\x9c\x8e\x19\x06\xa7\x63\x4e\x50\xd0\x31\x13\xef\xa4\x15\x16\x23\xc0\x23\xee\x8a\xc9\x88\x85\x62\x7f\x34\x64\xe0\xc0\x69\xa9\x38\x1c\x75\x78\x1c\x8d\x46\x0d\x1c\xbb\x22\x31\x1d\xf5\x44\x9c\x8c\x86\x0c\xcc\x1c\xde\x88\xd3\x91\x70\x81\xb3\xd1\xa8\x85\xf5\x70\x60\x20\xc1\x47\x62\x2f\x2e\x46\x43\x92\x4a\x2d\x9c\xdd\xc4\x4e\xbf\xc2\x64\x3c\xb4\xf8\x8e\xc8\x81\x83\x11\xb7\xc6\xe1\x68\x6c\xc1\x91\xd3\x81\x71\x3c\x1a\xdb\x30\x1d\x09\x3e\x38\x19\xf5\x40\xcc\x46\xc2\x00\x4e\x47\x63\x20\xce\x46\x43\x01\xce\x47\xe3\x11\xe6\x8e\x60\x87\x8b\x6e\x34\xda\x25\x7f\xa0\x9e\x24\x69\x8e\x2d\x75\xf6\x89\xbd\xc0\x92\x4a\xd4\x4c\x1b\x9e\xfb\x2d\x86\xc0\x6c\x88\x81\xdd\x88\xc2\xae\x44\xcc\x39\x44\x93\x1c\x9b\xc8\xc7\x5e\x27\xfd\xb3\x8f\x9f\xf5\x8a\x8a\x39\x83\x68\x75\x6b\xce\x1f\xe4\x73\x73\xee\xd0\x8a\xcf\xb6\x82\xd2\x8a\xc7\x80\x23\xd7\xbc\xd4\x92\x39\xd4\xe6\x6d\xce\x1d\x5a\x05\x5b\xfa\xef\xd4\x2f\xa6\xf6\xee\x11\x3a\xc6\xbc\x4f\xc7\x04\x10\x50\xb7\x8a\x43\x3a\xd6\x85\x88\x5a\xed\x27\xa6\x63\xc6\x47\xa9\x4b\x7e\x49\x97\xb8\x2d\x89\x70\x58\x47\x4a\x5d\xda\xcb\xe8\x98\xf5\xe5\xd4\x6d\xbf\x9c\xba\xdd\xaf\xa0\x63\x1e\x82\xbd\x11\x17\xc1\x78\xc4\x0b\x31\x19\x75\x43\xec\xbb\x46\x0a\xa7\x85\xe3\x70\xd4\x45\x70\xe4\x8d\xe9\x09\xc7\xa3\x91\x0c\xd3\x51\x6f\xc1\xc9\x68\xb8\xc0\x6c\x34\xe0\xe1\x74\x24\x66\xe2\x6c\x34\x6e\xe0\x7c\x24\x2c\x61\xee\x88\x4b\xb8\x70\x86\x0d\x99\x3d\xb8\xfb\x80\x47\xfd\x12\x13\xbb\x63\x62\x7f\xc4\xed\x71\x30\x62\xf8\x38\x1c\xf5\x1d\x1c\x8d\x47\xb7\xd8\x11\xde\x30\x1d\x77\x9e\xc4\x19\x3f\x30\x1b\x8d\x7f\x38\x1d\x0d\xa2\x38\x73\x06\x11\x9c\x8f\x46\x29\xcc\x47\xc2\x14\x2e\xba\x71\x64\xb7\xe4\xc1\x18\x53\x6a\x7e\x6d\x2b\x24\x0d\x37\xc6\x94\xe1\xbe\xb6\x5d\xc3\x98\x31\x28\x00\x98\x4f\x31\xe6\x0d\x4d\xce\x67\x78\x1e\xd5\x08\x6c\x00\x71\xcb\xa0\xe1\xa9\xae\x73\x5b\xca\xd0\xf2\x67\xc9\x19\xda\x1e\x1a\x28\xa4\x2d\x83\x66\x16\xb2\x0e\x80\x69\xe0\xb0\xfa\x1e\xd7\x95\x63\x40\x5d\x74\x84\x63\x9e\x73\x70\xb5\xc7\x74\x44\xb8\x84\x7a\x36\xc3\xf1\xa9\xdb\x70\x02\xea\x32\x9c\x90\x8e\xd8\x45\x44\x47\xa4\x16\xd3\x11\xd3\xa3\x74\x44\xb5\x09\xb5\xc9\x9d\xd1\x11\x9d\xa6\xd4\x6d\xb5\x19\x1d\xb1\x9a\x9c\x8e\x68\x8e\x53\xb7\xe1\x16\xd4\x65\xf6\xd8\x73\xba\x2d\xc6\x9e\x55\xaf\x98\x8c\xf9\x34\xf6\xc7\x7c\x12\x07\x23\x5e\x8d\xc3\x31\xa7\xc0\xd1\x58\xe4\xc0\xf1\x88\x6f\x37\xe3\x9e\x55\x8d\x38\x19\x73\x20\xcc\x46\xe2\x23\x4e\xc7\x22\x08\xce\x9c\x11\x0a\xe7\x63\x11\x06\x73\xfb\xe0\x5c\x8c\x44\x08\xc8\x0f\xdc\xba\xc2\x23\x96\x86\xc9\x88\xa7\x63\x7f\xcc\x99\x71\x30\xe6\xac\x38\x1c\x0b\x55\x91\x3d\x14\xe1\x78\x2c\x58\x60\xea\x76\x97\x64\xcc\xe1\x31\xb3\x06\x0b\x9c\x8e\xf9\x32\xce\x46\xc2\x05\xce\x9d\xc1\x12\xf3\xb1\x50\x86\x8b\x5e\xc0\xd9\x25\x2b\x50\x6c\x53\x53\x14\xa9\x71\x9a\xf2\x02\xd9\x96\x98\xfb\xec\xb7\xcf\x89\x09\x77\xd0\x4a\xc4\x88\x3f\xd4\xfb\x63\xca\x0a\x9a\xa7\x43\xdc\x71\xc7\xa0\xad\xa3\xa2\x31\x1b\xd0\x98\x1a\x22\x66\x35\x59\x23\xcb\xa9\x32\x50\x53\x06\xa0\xc9\x6a\xf8\x3c\xd7\xd0\x0e\x9f\xf2\xa6\xaf\xc3\x67\x45\x47\xca\xa6\x9e\x3a\x95\x84\xa9\x5b\x49\x84\x5a\x7a\xe4\x53\x97\x76\x02\xea\xea\x4f\x48\xdd\x56\x17\x51\xb7\x65\xc4\xd4\x2e\x0f\x4a\x5d\x76\x91\x50\xbb\x3d\x33\xea\x56\x7d\x4a\xdd\x3a\xcc\xa8\xc5\xa6\x72\xea\x56\x11\xa7\x2e\x9b\x2a\xa8\xdb\x94\xb1\x37\xe2\x47\x18\x8f\x18\x1f\x26\x23\x9e\x8a\x7d\x87\x01\xe2\xc0\xe9\xa7\x38\x1c\x71\x45\x1c\x79\x23\x31\x28\x76\xfa\x5c\x93\xc1\x5a\x78\x4f\xac\x51\x9b\xd9\xbc\x15\xa7\x23\xa1\x0d\x67\x8e\xb8\x88\xf3\x91\x18\x82\xf9\x88\xcf\xe2\xc2\x19\xdc\xc4\x88\x6e\x61\x1c\x3b\x4d\x09\x13\xa7\xd3\x62\x7f\xc4\x2f\x71\x30\xe2\x98\x38\x74\x78\x26\x8e\x46\x62\x0d\x8e\x47\x83\xd5\x88\x27\xe1\x64\xc4\x47\x31\x73\x04\x00\x9c\x3a\xa3\x16\xce\x9c\xa1\x05\xe7\x36\xff\xc7\x7c\xcc\x85\x8b\x6e\xe8\xd9\x7d\xe8\x36\xd8\x48\xcd\x6a\xe0\x61\xc3\xd0\xad\x52\x0d\xc3\xa0\xad\x90\x9a\x9a\x05\x4d\x92\x63\x7a\x1a\x5a\xba\x1f\x49\x94\x86\x31\xba\x4d\x99\x86\x4f\xa9\xd6\x01\xd3\x30\xdd\xf4\x7d\xd8\x94\x69\x46\x3e\x7c\x9a\x6a\x9d\x30\xbd\xaa\x6b\x79\x9c\x61\x98\x96\x72\x1b\x62\xe5\xad\xdc\x4c\x2f\xe9\x5a\xe6\x3b\xec\xa9\x4b\x0c\x98\x9a\x85\x4a\xa8\x4b\xbf\x3e\x75\xf5\x31\xa0\x0e\xc3\x09\xa9\x4b\x78\x11\x75\xf5\x24\xa6\x36\xf1\x50\xea\x30\xab\x84\xba\x54\xcd\xa8\x4b\x23\x29\x75\x18\x42\x46\x6d\x66\x9e\x53\x97\x25\x73\x6a\xb6\xd8\x82\x3a\x94\x8c\x3d\xa7\x96\x31\x76\xba\x2b\x71\xfa\x2b\xf6\x9d\xbe\x82\x03\x97\x3b\xe0\xd0\xe9\x4a\x38\x72\x3a\x04\x8e\x5d\x11\x41\x8d\x37\xc6\x47\x89\x33\x5a\x60\xe6\xf2\x18\x9c\x5a\x82\x06\xce\x6c\x41\x36\x77\x7a\x2e\xe6\xce\xa0\x80\x0b\x6b\x44\xc4\x9e\x53\xeb\xd8\xe9\x88\x98\xb8\xbd\xdb\xb7\x58\x1a\x0e\x9c\x8e\x86\x43\x97\x0b\xe3\xc8\xea\x87\x38\x76\x46\x06\x4c\x9d\xde\x8f\x13\xa7\x2f\x62\x66\x09\x56\x38\x75\xba\x1b\xce\x5c\xd1\x01\xe7\x56\x2f\xc6\xdc\x19\x39\x70\xa1\x05\x87\x5d\xc6\x54\x2a\x06\x78\x62\x40\xd8\x08\x67\x18\x8f\xef\xb7\x8b\x1b\xc3\x70\x2c\xdb\x0d\x03\xb1\xc2\x67\x78\x14\x4a\x7c\xc4\xc8\x47\xd4\x3c\x34\x05\x61\xc5\x89\x79\x9c\xa1\x9e\x99\xff\xa4\xe9\xb7\x29\x04\x4b\x3e\x4d\x8f\xd2\x06\xa9\x81\xcf\xec\xbe\x3c\xec\x31\x0c\xbf\x66\x3b\xe1\x8d\x10\x0d\x6d\x0a\xc5\x84\xe1\x51\xbd\xa8\x64\xed\xb9\x7c\x8c\x5d\x32\x55\x30\xc4\xa5\x7f\x05\xe3\xbb\x74\xad\x7e\x0f\x5c\xc2\x56\x30\xa1\x5d\xac\x0a\x22\x1a\xed\x73\x6c\x31\x2d\xf5\x98\xba\x24\xaa\x60\x12\x9b\x96\xd4\x73\x66\xb7\x52\x05\x91\xba\xec\x51\xc1\x64\x66\x95\xab\xa7\xb9\xcb\x8c\x14\x0c\x77\x99\xa8\x82\x29\xec\x1e\x5a\x67\xc4\x46\xc7\xc6\xae\x1e\x60\x62\x11\x32\xf6\x6d\x16\x87\x03\x17\xb3\x38\x74\xa9\x05\x47\x2e\x61\xe0\xd8\xd1\x45\x5b\xfc\x4d\xec\x2a\xc4\xcc\x65\xa9\x38\x75\xc6\xc3\xcc\xe5\x51\x38\xb7\xdb\x37\xe6\x36\xa3\xc3\xc5\xb8\x77\xb5\x2f\x37\x56\x08\xec\x8e\x05\x98\x8c\x1b\x1c\xf6\xc7\xbc\x0f\x07\x4e\xef\xc3\xe1\x78\x10\xa8\x95\xed\xec\x6e\x3c\x1e\x94\x30\x1d\x0f\x6e\x38\x19\x8f\x06\xb5\x39\xb8\xbc\x4c\x1a\x85\xf5\x69\x36\x16\xd6\xa4\x61\x38\xf8\xe4\x63\x11\xa7\x36\x12\xa0\xa2\x8d\xec\xf2\xa3\x5e\xd7\xe0\x19\x5b\xbf\x5f\xa3\x6a\xc6\x2a\xb4\xe6\x73\x9e\x55\x50\x8f\xe8\xd5\x77\xcf\x5f\xa3\x72\x71\x51\x5f\x13\xd1\x54\x34\x78\xf6\xed\xab\xde\xc5\xc5\xed\xc1\xc4\x29\x6a\x37\xfe\xc3\x05\x8a\xea\x0b\x7c\x56\x5f\xa6\x7a\x43\x4f\xfd\x2a\x01\xe4\x97\xfa\xb3\xf8\x32\xd5\xfa\xd3\xe7\x5c\xab\xaa\xf4\xfd\xa3\x57\xb2\x30\x16\x92\x85\x5f\xdc\x77\x54\x09\xe8\xe6\x82\x2a\xf9\x45\xab\x92\x72\xd3\x2b\xaa\xdc\xa5\xf5\xde\xf3\xeb\xa6\x04\xd8\x7b\x7e\x6d\x28\x7d\xf7\x9e\x5f\xd7\x75\xf5\xde\xf3\x6b\x73\x59\x3d\x41\x43\xaa\x28\x8c\x50\x5a\x56\x6b\xc4\xb2\x6c\xb9\xca\xcb\xc5\x19\xaa\x96\xe8\xc5\x09\x36\xe2\xfd\xae\x84\x52\x40\x6f\xfa\x35\x90\x4d\x77\x87\x84\x91\xfd\xee\x90\x16\xdd\x8b\xa5\x40\xf8\xe2\x04\xbf\x29\xdf\xa2\x03\x84\x0d\x35\x4a\x15\x5d\x59\x9e\x7f\x52\xf7\xee\x4d\xdb\x5e\x95\xe3\x13\xff\x99\xf8\x18\x1d\x68\xa8\xa1\x0e\xdf\x1e\xba\x3b\x40\x6c\x28\x58\xfa\xed\x7a\xcd\xcf\xd3\x39\x47\x38\x42\xeb\xcb\xf4\x3d\xbf\x36\x88\x7f\x7d\x99\xfe\xc8\xaf\xd7\x8d\x0a\xda\xef\x76\xa1\x2c\x5e\x01\x90\x14\x4d\xfd\xe5\x21\xc2\x51\xf3\xcd\x7e\xc5\xca\x09\x54\x9c\x52\xfc\x98\x05\xb9\xae\xb1\x2b\x5e\xde\x28\xa4\x6f\x15\x53\x46\xbc\xee\xab\x5b\xd2\xb2\x7a\x05\x55\x51\x8e\xb5\x22\x28\x0d\x5e\x1b\x4a\x69\x50\x01\x35\x1a\x14\x19\xb6\x31\x59\x0d\x09\xec\x56\xd3\xa5\x53\xac\x96\xe7\x10\x60\xe6\xbc\xa8\x10\xa1\xe0\x19\x82\xb2\xb9\xa1\x14\xce\x9b\x49\x89\x8e\xe4\xdd\x10\x1e\x14\x70\xac\x8d\x6b\x32\x79\x71\x42\x94\x0d\xee\xa1\xfd\x46\x02\x7b\xe8\x4f\x88\xd0\xb7\x50\xe3\x11\x6c\xab\x44\x7f\x82\x3b\x2e\xb6\x66\x6f\x55\x9e\xcd\xb6\xe7\x2f\x80\xf2\x9d\x2d\x93\x7b\x1d\x2e\x09\x85\xc7\x92\x57\xb4\x8f\x48\x60\x61\x78\xcf\xc0\xf1\x80\xac\xa9\xb2\xbf\xe8\x40\xb9\xc8\x38\xe2\x2c\x9b\x29\xb3\x43\xe5\x1a\xb1\x8b\x8b\x79\xc9\x73\xa1\x4b\xb6\x40\x7c\x73\xc1\x16\x39\xcf\xeb\xba\x8c\x10\xde\xa7\x46\x6c\x42\x04\x0a\x4d\xc6\x16\x28\xe5\x28\x5d\x2d\xdf\xf3\x05\x2a\x17\xd5\x12\x51\x59\x14\x78\x8d\xd6\x19\x9b\x4b\xf4\x12\xe5\xda\x8c\xed\x6a\x56\x66\x33\xc4\xe6\xf3\xe5\xd5\x1a\x50\x0b\xbc\xd5\x52\xa0\xbd\x5c\xf3\x1c\x5d\x95\xd5\x6c\x79\x59\x49\x06\xd7\xe5\x72\x31\xc4\xa2\x04\x0d\xe5\x35\x27\xed\x97\x87\x0f\xd5\xb5\x32\xed\x4f\x22\xa0\xf8\xd8\x24\xb9\x8e\xe5\x62\x69\xb9\xb1\xdb\x70\x15\x5a\x08\x62\xed\x67\x88\x59\x93\x52\x2a\xf1\x6b\x24\xb4\xef\x9b\x55\x65\xeb\x47\xac\xf7\x23\x7e\xab\x0a\x7b\xfe\xaa\xff\x04\x97\x02\x0c\xae\xda\x31\x44\xc0\x13\x59\xf8\x12\x95\x8b\x0f\x7c\xb5\xe6\xf6\x28\x58\x2e\x3e\xbc\xea\x05\xc2\xce\x4f\x5b\x0d\x10\xd8\x31\x40\xb4\xd8\x74\x89\xad\xdf\xe0\x50\x18\x74\x1f\xfb\xc7\xce\x84\x43\xfb\x85\x2f\xb2\xd5\xf5\x45\xb5\xc3\x55\x80\xaa\x62\xed\xf2\xa4\x69\xd7\x02\x4f\xbb\x21\xdf\x5a\x42\x37\xe7\x9f\x83\x6a\x2b\x11\x57\xed\xde\x13\x37\xe5\x69\x2d\x48\x53\xd2\xf1\x1f\xbc\xd2\xf3\xb4\x2e\x73\x73\x40\xaa\x5d\x8d\xd5\xd7\x81\x04\x5b\xf5\xc1\xe0\xe6\x2c\x43\xf6\xf1\xc3\xa2\xac\x4a\x36\xd7\x4b\x5f\x75\x61\xf8\x26\x9b\xb1\xc5\x19\x7f\xfa\xb2\x2d\x8b\x2a\x2b\x8f\x79\x1b\xaf\x90\xff\xeb\x9b\xb4\xb9\x8d\xbc\x9f\x1a\xde\x58\x8b\xc2\xda\xe6\xe5\x53\xbd\x0d\x01\x3a\xbe\xfa\xdb\xae\x0d\x95\xbc\x79\x45\x21\xfe\xbf\x25\x6f\xd0\x26\x54\x7f\xc6\xca\xb4\xae\xab\xda\x64\xf9\x30\xf0\x28\xf9\x51\x7a\x15\x7c\x1e\xbf\xb6\xcd\x30\x12\x19\xf3\x09\x40\x67\xbb\xf6\xa2\x31\x0c\xdd\x4e\x2c\xb0\xab\x2e\xec\x4a\xc1\x1a\x99\x7c\xcc\xcb\x75\xc5\xe7\x8d\x15\x9b\x31\x16\xd0\xf9\xed\x52\x0b\xea\x0e\xd0\x85\x18\x68\x65\xa9\xb5\x37\xe5\xdb\x37\x93\x89\xe2\xf6\x5d\x1b\xae\x45\x22\xd9\xbc\xba\xc0\x77\x28\xab\x6d\x12\x8d\x21\x60\xf7\x1c\x69\x65\x93\x54\xcf\x93\xe6\x35\x1b\xc5\x78\x00\xff\xf3\x22\x5f\xa2\xf5\x15\xbb\x90\xe9\xc7\x9c\xad\x2b\x69\x0c\xc3\x10\x5e\xb9\x55\xd6\x63\xb6\xab\x30\x97\xe3\x57\x06\x1b\x86\x8a\xe2\xbb\xba\xfa\xc0\x35\x6e\xcd\x05\x6f\xe2\xea\x37\x09\x29\x23\xa1\xcb\xf0\x46\x56\xa1\xe5\x65\x35\x88\xc0\x4d\xc8\x75\xab\xac\x13\x72\xed\x3a\xeb\x0c\x19\xef\xf9\xb5\x2c\x01\x1d\x05\x47\x3e\xd1\x9f\x94\x1f\x2c\x0f\xb4\xba\xd1\x91\xb1\x6a\xf4\x11\x7a\x25\x2c\x50\xbd\x04\xac\x96\xeb\x75\x9b\xa6\x43\xcd\x43\x48\x88\xe1\xb5\x54\xb6\x68\x06\xaa\x56\x70\x93\x7a\xbc\x3a\x67\xeb\xf7\x1d\x97\xad\x6d\x77\x32\xe9\x98\xa8\x70\xc4\x7a\x74\x7d\xd7\xe9\xba\x70\x5a\x81\x45\x13\x41\xc7\x64\xdf\x81\xcd\x7e\x65\x34\x7c\xf1\x4c\x64\x54\x12\xb3\x82\xaa\xfd\x6e\xc0\xf6\xcb\xa7\xdb\xb3\xbd\xb2\xb3\x3d\x77\xb3\x3d\x77\xb0\xbd\xda\x82\x6d\x67\x11\xe9\x75\x5d\x45\x5a\x4e\x7f\x6c\x57\x47\x7a\xac\x08\xb3\xc4\x55\xf1\x4d\xa5\x97\x62\xfe\xfe\xd1\xab\x43\x95\xa0\x75\x6a\x31\x4f\x51\x56\x9c\x19\x8a\x6b\x5f\xcc\x99\x60\x62\x53\xa1\x3e\x16\x95\x70\x4d\x5a\x3a\x26\x44\x4d\x65\xe7\xe1\x44\x4d\xb7\xe8\xf6\xf7\x8f\x5e\x19\x2b\x6e\x9f\xae\xca\x8b\x39\x3f\xd8\x6d\x8a\x48\x36\xea\x4c\x14\xe9\x3f\xfd\x7e\xa6\x8b\xd4\x44\x84\x60\xbb\x84\x0a\xa5\x59\xff\x7a\x20\x95\xc5\xf2\x35\x46\xc7\x02\xee\x50\x4a\xf5\x91\xd4\xf1\x72\x35\x69\xef\x59\x57\x17\xc7\xd7\xa4\x0f\xd7\xf3\x32\xe3\x13\x6f\x8a\xc8\xde\xe0\x2e\x8c\x06\x2d\xb9\x21\x5a\x32\x45\x81\x03\xad\x7f\x43\xb4\xc1\x14\x45\x7b\xf6\x8b\x34\x6e\xfc\xee\xc1\xd7\xf8\x50\x6f\xac\xb5\xb0\x4a\xe6\x50\x7f\xe7\xd8\xa2\x81\xbf\x05\x85\xdb\x79\xa7\x11\xb4\x76\x64\x8e\xec\xda\x7d\xbc\x05\x05\xf3\xa8\x87\x13\x72\x6b\xc3\xde\x3f\x49\x58\x6d\xa2\xcb\x2d\x04\xd7\x16\xd7\x8e\x21\xd6\x16\xe2\xba\x81\xb6\x81\x72\xd6\xcf\x6f\xa0\x7a\x25\xf4\xb5\xc2\xec\xf7\x43\x32\xed\x55\xd5\xd7\x8a\xbb\xdf\x0f\x83\x69\x5b\xd5\xfd\x7e\x18\x4d\x55\xb1\xf7\xfb\x11\xfe\xf8\x76\x4a\x83\x4f\x2a\xb8\xff\x5b\x56\xda\xff\x6c\xf5\xf0\xff\x7b\x2a\xdb\xc3\x4d\x05\xe5\x82\xe7\xb7\x5b\xe2\xfe\x3b\xb6\xe6\x6d\xd5\x7a\xb6\xe6\xda\xb3\xd7\x3e\x71\x56\xc0\x1f\xfa\xf2\x26\x0a\xd0\x82\x9d\xf3\xf5\x85\xee\xa5\x47\x3a\x1b\x02\x44\xb0\x21\xff\xfb\xf7\x8f\x26\x34\xdf\xa2\x28\x68\xae\xb0\x31\xa1\x79\x1d\x05\x82\x0f\x60\x6a\x13\x05\x87\xea\x8b\xe0\xdf\x90\x19\xb4\xa8\x25\x7a\x35\x9d\x52\xfe\x8d\xaf\x11\x43\x0b\x7e\x35\xbf\x46\xd2\xd7\x72\x13\x61\x3d\xa0\xa0\xce\x6d\x1e\x8b\xcb\xf3\x94\xaf\x3e\x22\xb8\x55\x0a\x6e\x55\x11\x1f\x7c\x02\xe9\xfc\xa1\xb3\xc9\x7c\x79\x05\x2d\xc4\x7f\x4d\x0d\xba\x8d\xbb\xd1\x6d\x08\x50\xcb\x65\xd3\xca\xa5\x8e\x08\xb5\x78\xea\x81\x59\xae\xfe\x79\xc4\xf3\xe1\xad\x2c\xf0\x42\x2f\xf2\xba\xf3\x9d\xb5\xa4\x21\xc4\x2f\xca\x4e\x46\x25\x7a\x38\x15\x5c\x9b\xc7\x30\x75\xbf\x96\xe1\x56\x4f\x78\x2c\x7a\x7b\x8c\xba\xb7\x6f\xeb\x6f\xe6\x7d\x4d\x7d\x57\x56\x57\xe5\x9a\xa3\x9f\x9e\x9f\xae\x01\xc3\x98\x62\xea\x8b\x52\x94\x81\x7c\x44\xdf\x0a\xfd\x0a\xb9\x1c\x80\x60\xd4\x48\xc2\x8a\x8a\xaf\xd0\x82\x9f\xb1\xaa\x5c\x9c\xdd\x82\xe0\x01\x15\x17\x82\x57\x2a\x38\x5c\x2c\xab\x89\x55\xaa\x47\x47\x68\xb1\x1c\xcd\x54\xe1\x4e\x16\x29\xd0\x7f\x34\xd2\x7d\x60\x04\x93\x82\xfd\x47\x2d\x64\x43\x4a\xaa\x24\xa3\x04\x53\x5b\x43\xab\xce\x07\x1d\xee\x3a\x19\x80\x4d\x2b\xdf\xfe\xf4\xbd\xa6\x15\x58\x4e\x80\x71\xfb\x82\xad\x61\x79\x61\x2b\x1f\x6a\x34\x05\x38\x84\x4b\x34\xca\xaa\x96\x82\x44\x8d\xf7\x96\x95\xff\xed\x4f\xdf\xdf\x8e\xea\xe5\xda\x4e\xab\x78\xb6\xc8\x27\x6c\xb1\xac\x66\x7c\xa5\x18\x71\x99\x01\x5b\xe4\xba\x19\x88\x1e\x8e\x98\x42\xeb\x67\x77\xa5\x40\xc6\xac\xa2\xf1\x3c\x05\xff\x9b\xd9\xc7\xf3\x97\x9f\xdb\x3c\x9e\xbf\xfc\x4c\xd6\xf1\xfc\xe5\xed\x18\xc7\x72\xd5\xb1\x8d\xe5\x6a\x07\xd3\x58\xae\x6e\x6c\x19\xbf\xee\x68\x19\xbf\xfe\xc6\x96\xf1\xfa\xf3\x9b\xc6\xeb\xcf\x66\x1b\xaf\x6f\xcb\x38\x36\x3d\xeb\xd8\xec\x64\x1e\x9b\x4f\xb0\x8f\x77\x3b\xda\xc7\xbb\xdf\xc8\x3e\x60\x51\x5e\xb7\x8c\x85\x9c\x19\x55\x2f\x84\x73\x5e\x54\xdb\x67\x65\x0b\xb0\x09\xf9\x0d\x2d\x8b\x06\x13\x5c\x61\x73\x5b\xc6\x00\xc8\x6e\xc7\x1c\x00\x55\xc7\x20\xe0\x97\xa7\x13\x12\xba\xec\x40\x02\xe9\xa6\xb0\x30\xd9\x81\x78\x05\x5a\xa0\x87\xc8\x27\xb6\x95\x2e\xcd\x52\x26\xad\xa9\x3c\x7c\x88\x16\xb0\x44\xde\x18\x83\xdc\x3a\x44\xd0\x01\x5a\x18\x2f\xab\x37\x9b\x90\xc0\x33\xb4\xb5\x8f\xa8\x7e\x79\x72\x33\xa4\xa3\x99\x2c\xd0\x81\xe1\xc6\xd0\x01\xe9\xfe\x52\x97\x20\xf7\xdf\x69\xbd\x30\x95\xff\x6f\x67\xbe\x2f\x27\xf6\x97\x8b\xda\x7a\x5f\xde\x92\xf5\x4a\xbd\x77\x2d\x55\x33\xde\xda\x9e\xb7\x30\xde\x41\xc4\x04\x54\x37\xb0\x5f\xcd\x0b\x1a\x3c\xe3\x06\xac\xc8\xff\xe6\x16\xfc\x72\x59\xb1\x8a\x7f\xee\x00\xbc\x02\x2a\xb7\x65\xc2\x80\xed\x76\x4c\x58\x32\xa6\x9b\xf0\x6a\x39\x1a\x7f\x05\xc8\xa8\xfd\xaa\x1e\x81\x1d\xa8\xa8\xbe\xd8\x13\xe9\x60\xfb\xcb\xcb\x49\x14\x0c\xcc\xf2\x53\x15\x76\x4b\x31\xe7\xf7\xa5\xb1\x91\x90\x23\x20\x76\x57\xd8\xcb\x81\xc2\x9e\xde\x44\x61\xdf\xe6\xf9\xe7\xce\x7c\x59\x9e\x7f\xa6\xcc\x57\x5e\xf9\x7d\x1b\xef\xcc\x79\xef\x9d\x39\xdf\xe9\x9d\x39\xdf\xfa\x9d\xb9\x3f\x22\xec\x37\x89\x2c\x6c\x18\x35\x27\xbf\x19\x5b\xad\xae\x45\xb3\x7a\x0c\x91\x17\xc3\x77\x86\x95\xf6\x7a\x78\x33\x8e\x61\x22\xb5\xdf\xe6\xdc\x68\x5f\xd2\x50\x3c\x7c\x6a\x44\x97\xdf\xcc\xab\x2b\xdf\x2e\xd4\x15\xe0\xcb\x42\x9f\xdb\x5c\x9b\x6e\x38\x5e\x2d\x2f\xf8\xaa\xba\x46\x7f\x57\x57\x0c\x03\x20\x98\x57\x83\x62\x30\xad\xa8\x0c\x64\x7d\x68\xc2\x53\x87\x95\xe6\x4e\xf4\x6e\x74\x59\x97\x67\x8b\xb2\x28\x33\xb6\xa8\x50\x0a\xcf\xcb\x85\xe6\x1b\x40\xd4\x31\xfb\xdb\xce\x4b\xd7\xcc\xd4\xbf\xdc\xc2\x3c\xf0\x90\x03\xbb\x3b\x76\xc4\x35\x79\x7e\x21\xcc\x92\xcd\xf7\x3a\xb2\x1f\x15\x1c\x32\x06\xe4\x46\x72\x1a\xda\xad\x84\xc8\xbb\x6a\xfe\x04\x5f\xbd\xd2\x45\xdd\xef\x45\x67\xcd\xb7\xeb\xb3\x9f\x88\xec\xcd\xa0\xbd\xf8\xdb\x75\x5a\x7b\xba\x2b\x16\x4c\x71\x82\x19\x4e\xe1\x4c\x4d\x86\x73\xcc\x71\xb1\x37\x40\xf2\xf6\xdf\xa8\xab\x53\x84\xbd\xad\x97\x07\xc0\xe8\xa6\x8d\xd9\x0e\xc2\xf2\x95\xda\x3c\x01\x61\xb1\xfe\x22\xff\xfb\xeb\xaf\x86\x03\x18\x22\xef\x6f\x7c\xe0\x0f\xc7\x68\xb8\x0a\xa6\xff\xc9\xb1\xb9\x06\x3f\x6e\xd8\xe8\xef\x05\xb4\x26\xed\x7d\x04\xd2\x87\xe6\x7c\x71\x56\xcd\xd0\xd7\x88\x6e\xb9\x95\xba\x1f\x68\x4e\x96\x8b\x0f\x7c\x55\xbf\x1a\x6a\x61\x58\xc5\x07\x31\x68\xd7\xa7\x03\xb6\x0a\x3c\xf5\xa8\xdd\x68\xb7\xb3\x32\xf7\x11\x9d\x76\x83\xe8\xbd\x35\xca\x59\xc5\x10\x5b\xef\x48\x67\xeb\x99\xac\xee\x4a\xe1\x46\x0b\xd0\x87\xd5\xf2\xb5\x4f\xec\x4b\x21\xf0\xf8\x13\xf6\xec\x28\x5a\x5d\xa3\x32\xec\xdc\xa9\xe1\x9e\x4a\x65\x36\x4c\xd6\xea\x35\xed\xe2\x91\x6a\x33\xe0\x92\xdd\xdd\x7a\xf3\x7e\x97\xb6\xfb\xa4\x57\xbb\x84\x57\xb7\x7a\x33\xd8\xc2\x2f\xfe\x6a\x1e\x0e\x2f\x2e\xd7\xb3\x49\x9d\x48\x89\x1c\xc1\xf4\x5e\x69\x86\xee\xe5\x12\xc8\xb0\x4f\xb6\x4e\x45\x34\x05\xd7\x11\xa4\xc6\x39\xed\xba\x8d\x75\x23\xc9\xc0\x2b\x00\x8d\x30\xc9\x6c\x79\x01\x83\xa4\x65\xec\x47\xa3\x69\x6b\x63\xf6\x1c\x65\xf3\xe5\xc2\xf5\xa6\xb2\xad\x49\x03\x9e\xbe\x2d\xc3\x8f\x76\x5b\x86\xc7\x4e\x5b\xd6\x31\x43\x96\x22\xd9\x6d\x76\xbe\x9a\x76\xba\x9e\x00\xfc\x1f\xc1\xb0\xff\x28\x25\x33\x44\x5a\xc7\x52\x89\x6f\x18\x66\xeb\x5d\x63\x76\x02\x70\x86\xa9\x5e\x58\x97\xc9\x89\x85\x4c\xe3\x42\x57\x1d\xff\x19\x75\x83\xab\x6d\x7c\xe0\x4a\x99\x7c\x8d\xfe\x4d\xf9\xd6\x24\x76\xbb\xa9\x02\x70\x67\x7d\xb9\x49\x8f\xad\xfb\x66\x7a\xbb\x65\xd4\xd6\x98\x8f\x6f\xa7\x34\xdc\x66\xbf\xcb\xd1\xd7\x7f\x40\xb3\xaa\xba\x58\xdf\x3f\x3a\x3a\xaf\x66\xeb\xc3\x94\x1f\x5d\x56\x05\xfd\x65\x8d\x3e\x90\x43\x7c\x48\x50\x7a\x8d\xfe\xe7\x39\xab\x66\x25\x5b\x0b\x8b\x69\x37\xc8\xc0\xae\x10\xb9\xd9\xe3\xe8\x08\x7d\xcf\x2b\x79\x1c\x8e\x73\x21\xee\x92\xa5\x73\xbe\x46\x7f\x55\x94\xfe\x7a\xe7\x2b\xd8\xc6\xbf\xe2\xfc\x51\xb3\xff\x65\xb0\x93\x06\xdd\x93\xca\xbb\x87\xee\xde\xad\x7f\x7e\x60\x47\x8f\xfe\x2a\xbb\xa3\x21\x7f\x06\x3f\xb4\xb8\xcf\xd5\xf7\x2e\x6a\xf5\xeb\xdd\xbb\x86\xfd\x39\xc7\x1d\x26\x1b\x60\x27\x1b\x67\xb0\x73\xe6\xaf\x53\xb9\x1b\xff\xa7\x65\xce\x0f\x7f\x59\xa3\xe5\x0a\x7d\x27\xb7\xd2\x94\x45\xc9\x73\x94\x2d\x73\x3e\x05\x2c\x6c\x91\xa3\xcb\x35\x47\x65\x25\xc6\xb5\xbf\x0a\x39\x6a\x7d\x50\xfb\x70\x9a\x3e\x9c\xa9\xef\xdd\x3e\xc8\x5f\x1f\xc8\x3d\x49\x6d\xb3\xc3\x06\xfa\x58\x47\xf6\xeb\xaf\xda\xb7\xc3\xab\x72\x91\x8b\xb7\xcb\x0e\x8c\xdc\x3a\x24\x78\x41\xfa\xcf\xb0\xd9\xe7\xce\x57\x47\x5f\x1f\xdc\xda\xdf\xd7\x47\x77\x64\x6f\xd7\xd5\xaa\x5c\x9c\x3d\x5e\x2d\xcf\x4f\x66\x6c\x75\xb2\xcc\x85\xe6\x5e\xc1\x8f\x87\x85\xf6\xab\x12\xfe\x29\x7b\xcf\x17\x52\xc6\x7d\x93\xbd\xb8\x5c\x5c\x0b\xf9\xde\xf9\xaa\x89\x60\x97\xd9\x9a\xe4\x5c\xfc\x38\x91\x74\x64\x07\x61\x69\x13\x36\xdf\xd7\x43\x20\xfc\x94\x2d\x2f\x17\x15\x5f\xa9\x99\x4b\xf8\x69\x5e\xc7\x0a\xd9\xbc\x0d\x16\xf0\x14\xce\x33\xd6\x5f\xf8\xa6\x5a\x31\xf1\xe5\x6a\x56\xce\x39\x9a\xd4\xd8\x1e\x2a\x24\x92\xf4\x57\xd0\xa6\x45\x98\xa9\xee\x7d\x5b\xd5\x0d\xf6\xf7\x85\xab\x7f\x05\x3a\x95\xc0\xdf\x1c\x23\x6f\xf3\x3d\xf5\x3c\xa1\x73\xf9\xd3\x43\xf8\xe9\xbb\xc7\x8f\xc5\x4f\x16\x4a\x42\x5c\xf0\xba\xbe\xbe\x5c\xad\x96\x67\xac\xe2\x53\xb0\xba\x6a\xc6\x57\x1c\xce\x79\xa2\x05\xdf\x54\x48\xb0\xc0\xb2\x8a\xaf\xa0\x11\x74\x63\x1b\xfe\x80\xc1\x89\x04\xbf\x8b\xbc\xcd\xe3\x13\xcf\xdb\x13\x16\xea\x6d\xbe\x87\x8f\x7f\x17\xc1\x79\xbe\xbc\x6a\xe9\x43\xb3\xaf\xa4\xe4\xe5\x50\x3e\x51\x5d\x14\x08\xfc\xc7\x8f\xf7\xe0\x68\xa6\xb7\x87\xf6\x91\x86\x19\x1e\xec\xd7\x15\x87\x14\xf5\x36\x0b\x56\x5d\xbd\x5c\x9c\xb3\x2a\x9b\xf1\xbc\xa5\xf7\x00\x2d\x17\xf3\x6b\xc4\x2e\x2e\x38\xf4\xbb\x5c\x83\x03\xa2\xcb\x45\x59\x4d\xc5\x8b\x66\xc6\xd6\x1c\xde\x36\x85\x20\x1a\x4c\x0d\x8c\x10\x52\x55\xef\x8b\x6a\xb0\x8a\xa1\x9e\x69\x5f\x2f\x58\xb9\x1a\xf6\x0c\xfa\xa5\x78\xfd\x4a\x89\xee\xe0\x40\xf1\x7e\xa7\xdf\x01\x4b\x4b\x01\x28\xfe\xaf\xe2\xbd\x84\xaa\xbd\xf1\x26\xce\xc0\x17\xe0\x0c\x30\x0a\xb7\xbe\xd0\x58\xb9\xcc\x5b\xba\x46\x5e\x2e\x72\xbe\x41\xc7\xe8\x00\x1b\xcd\xbe\xf1\xa3\x7b\xf7\x34\xe3\xdf\xdf\x97\xcd\x2c\xc6\x0f\x74\xde\x00\xc8\xdb\xbe\xb1\x0b\x53\x7a\x2c\x34\x2e\x25\x23\x7f\x3d\x38\xae\xd5\xff\x40\x93\x17\xda\x3f\x36\xc4\x8f\x1a\xd1\x37\xdf\x20\xec\xd5\x06\x84\x7e\x55\x3e\xa4\x54\x52\x73\x22\x8d\x15\xfd\x8a\x3a\x76\xd8\x08\x7f\x0b\x42\x80\xd0\xa6\xa4\x46\xf8\xd9\x8c\x67\xef\x5f\x65\x6c\xce\x56\xff\x5b\xb4\x9a\x08\x3d\xbc\x58\x96\x0b\xb9\x9b\x1a\x04\xd0\xfc\xd4\xf5\xf8\xf6\x67\xe9\xf5\xad\x70\xaa\xd9\x6a\x79\x85\x1e\xad\x56\xcb\xd5\x04\x7a\x75\xef\xa9\x48\x85\x5a\xd3\xfc\xf3\xfe\x3d\xb4\xdf\x22\x38\xac\x96\x32\xb2\x4e\x70\xb4\x77\x58\x2d\xff\x7c\x71\xc1\x57\x27\x6c\xcd\x27\x7b\x68\x5f\x22\x10\x26\xbf\x58\x56\xc2\xc0\x81\x59\x29\x97\x7b\xe2\x61\xdd\xd1\x8f\x9f\x61\x24\x68\xe5\x04\x59\xb5\xc8\xc4\x5b\x71\x4c\xe5\x32\x9b\x1a\x9c\xa4\x94\x0d\xda\x98\xe8\x02\xfc\xa6\x6e\x23\x35\x0a\x53\x95\x1b\xea\xed\xf5\xf5\x22\x1d\xe2\xa4\x6e\x68\x52\x8b\x86\xf6\xae\x32\xce\xc7\x8f\xa9\x8a\x75\x2a\xcc\xe1\x83\xf4\xba\xe2\x68\xcd\xff\xeb\x92\x2f\x32\x08\x74\x76\x46\x5b\x1a\xb5\xe9\xc0\x40\x78\x7d\x9e\x2e\xe7\x8d\x23\xd9\x28\x53\xaf\x4b\x99\x0c\x29\x37\x98\xc6\x85\x14\x49\x01\x61\x25\xa0\x13\xaf\x61\xa9\xd9\x78\x6c\x60\x02\xc2\xb0\xce\x84\x3f\x64\xc2\x61\xf0\x0f\x76\x64\x12\x13\xc9\xa5\xa7\xb8\x7c\xe4\x75\x50\xec\x1f\x5b\xac\x26\xda\xa2\x33\x8f\xbc\x41\x67\x82\x4f\x92\x28\xa6\x8a\xd9\x58\x32\xfb\x78\x4b\x66\x31\xd9\xb5\x53\x2d\xa4\x89\xab\x6e\x47\xbb\x1e\xd0\xd8\x26\x60\xe8\xbb\x84\x48\xfd\xd5\x38\xd1\x4f\x9a\x1a\xa4\x22\x75\x1f\x26\x57\x83\xac\xa9\x85\x1f\x1d\x54\x1a\xd0\xfa\x07\xa1\x04\x19\xad\xb6\x1c\x5c\xda\x1e\xeb\x84\xf5\x51\x46\x43\xb9\x7f\xec\x70\xfd\x5e\x44\x6f\x9b\x7d\xae\x44\xb8\x91\xfd\x8a\xb3\xfc\x64\xb9\xa8\xca\xc5\x25\x1c\x9e\x05\xed\xb7\xa1\x48\x70\xf2\x03\xf4\xfd\x9b\x63\x60\xeb\x44\x24\x16\x86\xd1\xe0\xde\x0f\x8b\x0f\x6c\x5e\xe6\x00\x24\xa5\x7d\x4f\x75\xab\x91\x77\x97\x0a\x92\x08\x61\xa2\xe0\x4d\x43\xe7\xad\x72\x13\xd1\xb4\xf9\x71\x7f\x5f\x24\xe3\x75\x84\xea\xa1\xb9\x2b\xc3\x88\x4c\x04\x45\x94\xfc\xbb\x16\x0c\x8d\xd0\xfe\xe3\x86\xb1\xa3\x23\xf4\x43\x81\xae\x38\x12\xf9\xda\xe5\x05\x12\x99\xea\x14\x95\xd5\xff\xfb\x3f\xff\xb7\x1e\x96\x74\x14\xc0\xf1\x1d\x4b\xcf\x07\x80\xf7\x06\xc1\x5f\x5a\xef\x2b\xf0\x82\x49\x6b\xe5\x02\x18\xeb\x66\x48\xf4\x2f\xbe\xfe\x25\x30\x98\xef\x50\x57\x9f\xa0\xaa\x2e\xa6\xe3\xa1\xd6\x95\x64\x0b\x36\x87\xc3\x0f\x8d\x1c\x5f\x72\x96\xa3\xa2\x5c\xad\xab\x5a\x4a\xd0\xad\xdd\xd5\x3c\x1c\xdd\xd0\x64\xb1\x1c\x8a\x77\xbd\x57\xdb\x84\x24\x74\x57\xe9\x5f\x45\x56\x8d\xd7\x46\xbe\x35\xaf\xc3\x31\xac\x87\xe7\x51\x6d\x50\x27\x35\x2a\x50\x0b\x3a\xb6\x38\xcc\x83\x7e\x3c\xd0\x91\x61\xf9\x9a\x01\x35\x77\x1a\xed\x9a\x12\xb0\xc6\x7a\x5b\xf3\xd5\x62\x54\x37\x81\xdf\xc1\x04\xeb\xb4\x5e\xf6\xdd\xef\xcb\xf6\x9c\x5d\xa3\x72\x91\xcd\x2f\xe1\x25\x44\xbc\x5c\xe8\xaf\x34\x26\x29\x3f\xae\xa5\xf3\x68\x07\xe9\x80\x29\xdf\x4c\x80\x9e\x7a\x4f\x23\xb0\x37\x49\xd2\xd2\x05\xea\xdb\x04\xea\x41\xf2\x22\x05\x36\x96\x1f\x7c\x4e\x99\x0f\x47\xf8\xbe\x44\xa9\x92\xe8\xe3\xdb\x95\x28\x84\x8c\x1b\x0a\x3d\x06\xa1\x7b\x9b\xbe\xd8\xbd\x8d\x77\xb2\x87\x7e\x05\x89\x4c\x24\x0f\xf2\xd7\x46\x1f\x81\x55\x1f\xf0\x46\x65\x78\xc7\xc0\x9e\xfe\x0a\x66\xd6\x44\x2d\x4f\xa3\x16\xfe\x7c\xfa\xf8\x80\xa2\x1c\x66\xca\x78\xde\x44\xde\x3a\x6c\xaa\x13\x58\xcd\x77\x08\x68\xda\x77\x88\x3f\x0f\x7a\x39\x89\xca\x35\xda\xd1\x58\xf2\xd7\xe0\xeb\xa6\x24\x1a\x58\x1d\xd5\x80\x8a\x1e\x00\xb5\xa4\x44\x8b\xb1\xed\xec\x4f\x27\xdd\x69\xe7\x89\xaa\xf3\x0b\x2d\x1b\x99\x54\xe7\x17\xe8\xb8\x37\x96\xec\xa1\x3f\x1c\x1f\xcb\xa0\xdc\xcf\x4e\xd4\x22\x46\x75\x7e\xd1\xcf\x33\xb4\x17\xf4\x16\x7a\xef\x73\x4e\xbe\x09\xb1\xa2\x63\x60\xf0\xde\x07\xbe\x5a\x97\xcb\xc5\xbd\xfb\xe8\x1e\x4c\xfa\xde\x9b\x8a\x5f\x25\x3f\xf7\xee\x6b\x59\x21\xfc\x2e\xbb\xab\x7e\x97\x5f\xee\x7c\xf5\x51\x4d\xd2\xbd\x5a\x9e\x73\xf4\xed\xb3\xef\x51\x7a\x59\xce\x73\xb4\xbc\xa8\xca\xf3\xf2\x6f\x7c\xb5\x9e\xa2\x79\xf9\x9e\xa3\xd5\xe1\x2f\xeb\xa9\x7c\x25\x86\x99\xf6\xf5\x05\xcf\xca\xa2\xcc\x84\xf3\xe6\x25\x28\xfc\x82\x55\x15\x5f\x2d\xd6\x80\x0f\x1a\x55\x33\x8e\x8a\xe5\x7c\xbe\xbc\x2a\x17\x67\xf7\xe5\x9c\xa7\x30\xbf\xde\xb9\x48\x74\xaf\x36\x9a\x7b\x72\x72\xb7\x03\x70\xc8\xce\xf3\xde\x2c\x6a\x73\x44\x52\x3c\xbb\xf3\x95\x54\x97\x3a\x34\xd9\x4c\x73\x77\x07\x30\xd1\x67\xd0\x1d\x28\xa7\x7d\xbb\xe8\xcd\x1a\xff\x41\xfb\x7e\xb8\x58\xe6\xfc\xf4\xfa\x82\xb7\xc9\x5c\x3b\x57\xad\x5e\x3c\xca\x85\x3e\x6f\xfc\xb2\x5c\x9c\x2d\xff\xd7\x2b\xf4\xc1\x3b\xa4\x87\x1e\xbc\x9e\xb7\x2d\xb4\xb3\xa4\x0d\x33\x2a\x34\xd6\x98\xd8\xea\x6a\xc6\xe6\x3d\x4c\xf1\xa1\x77\x20\x27\x62\x56\xf5\xde\x28\x79\x8a\x51\xfd\x36\x63\xeb\xe7\x57\x8b\x17\xf5\x16\x98\x63\x05\x74\xd8\xfd\x1d\xc0\x9b\x25\x12\xa8\x1a\x27\x85\x52\x47\x8c\x2e\xb8\x5c\x1f\x12\xcf\xe1\x20\xf1\x9e\x90\x8d\x2e\xab\x37\xef\x65\x01\x43\x01\x01\x9f\x3b\x93\x5f\xbd\x7e\xbd\x9c\x95\x8b\xa5\xe8\x15\x43\x57\x3c\x45\xea\xa0\xaa\x9a\xb5\x3e\x54\x06\xad\x64\xf2\xf1\x8e\x3a\xa2\x0a\xcb\x26\x1f\xa7\x7f\xff\xf8\x76\x4a\xa3\x6d\x96\x44\x06\x27\x76\x5f\x3f\x7b\xfa\xa4\xaa\x2e\x5e\x8a\x21\x63\x5d\x35\xd8\xfe\x98\x96\x67\x72\x33\xcb\xe1\x2f\xeb\x3f\x6e\x83\xf9\xde\xe5\x9a\xc3\x0b\x5b\x56\xdd\x7b\x70\x67\x48\xe8\xbb\xf2\xec\x27\x40\xf8\x40\x74\xf8\x97\xf5\x4c\x04\xe5\xf2\x6c\xb1\x5c\xf1\xfb\xf3\x72\xc1\xef\x34\xa4\xaf\x78\xea\x6f\x45\x52\x28\xe9\x67\x9e\xca\xb1\x49\x1e\x33\xbe\x77\x78\x34\x2f\xd3\x23\x81\x42\x04\xe7\x3b\x47\x47\x28\x5f\x2e\xee\x55\x68\xf9\x81\xaf\x56\x65\xce\xeb\x15\x87\x7a\x81\xe3\x8e\x76\x06\x59\x2d\x1d\x88\x08\x77\xaf\xd9\xd1\x00\x0b\x12\x1d\x80\x43\x49\xb3\x0b\x25\x2c\x04\xd6\xc9\x74\x10\xe0\xee\xc1\x9d\x8f\x06\x71\xc8\x27\x6a\x65\xab\x66\xf9\x8f\xf7\x09\xf9\xf8\x56\x88\x61\xfa\x46\x8a\xe1\xed\xde\x9d\x3b\xff\x3f\x00\x00\xff\xff\x30\x1c\x67\x1d\x64\x21\x06\x00") +var _web3Js = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xfd\x6b\x7b\x13\x39\xd2\x38\x0e\xbf\xcf\xa7\x50\xfc\xdc\x0f\xb6\x89\xb1\x73\x60\x18\xc6\x99\x0c\x1b\x02\x33\x64\x6f\x20\x5c\x40\x76\x76\xef\x6c\x96\xab\xed\x96\xed\x1e\xda\xdd\xfe\x75\xb7\x73\x18\x92\xef\xfe\xbf\x54\x3a\x95\x0e\x7d\x70\x12\xe6\xb4\xc9\x0b\x70\x4b\xa5\x53\xa9\x54\x2a\x95\x4a\x55\x19\xfd\x7f\xcb\x28\xa3\x7b\x9d\xc9\x32\x19\x17\x51\x9a\x10\xda\x29\x7a\x49\x2f\xeb\x7e\x51\x29\x79\x27\xed\x2d\xbb\x5f\xa2\x49\x67\x3d\x39\x49\x4f\xf9\xaf\x02\x7e\x9d\x05\x19\x09\xf6\x8a\xcb\x05\x4d\x27\x44\xd6\xb5\xd7\x92\x45\x5b\x0f\x1e\x88\xc4\x5d\x56\x66\xf9\xe0\x41\xd0\xcd\x68\xb1\xcc\x12\x12\x74\xd2\xde\xfa\x66\x97\xa5\x47\x32\x2d\x12\x69\xac\xd6\xc9\x5e\x42\xcf\xc9\xcb\x2c\x4b\xb3\x4e\xeb\x20\x48\x92\xb4\x20\x93\x28\x09\xc9\x3c\x0d\x97\x31\x25\xed\xd6\x46\xba\xd1\x6a\xb7\xba\xbb\xc5\x2c\x4b\xcf\xc9\xa4\x3f\x4e\x43\xba\xd7\x7a\x73\xf4\xe2\xf8\xf5\xcb\x4f\x6f\x8f\x3e\x7e\xfa\xf1\xe8\xf8\xed\x8b\x56\x6f\x72\xcd\xea\x8b\xf7\x58\xdf\xf7\xbe\xd0\x8b\x45\x9a\x15\xf9\xf0\xcb\xf5\xf5\x2e\x1b\xc3\xc9\xe6\x69\x7f\x1c\xc4\x71\x27\xee\x8b\xac\x9e\xec\x7d\x87\xf2\x01\x26\x7b\x00\xb8\x75\x7a\x42\x4f\x77\x45\x57\xf3\x4e\xf2\x2c\x19\xd2\xee\x75\x2f\xee\xe9\x92\xb4\xc7\x71\x77\x2d\xa0\x58\x93\x32\x13\x7a\x11\x35\xc2\xd5\x24\xcd\x3a\x0c\x3a\xdd\xdb\xdc\x4d\xbf\xcf\xfa\x31\x4d\xa6\xc5\x6c\x37\xdd\xd8\xe8\xe6\x9d\x8c\x21\x5e\x75\xe3\xba\xdb\xf9\xb2\x35\x3c\x51\x5d\x16\x55\xf4\x38\x96\x7a\xa2\xed\xee\x97\x35\x9e\x20\x3b\xb3\x77\xb2\x46\xc8\x97\x35\x42\x08\x69\x8d\xd3\x24\x2f\x82\xa4\x68\x0d\x49\x91\x2d\x69\x8f\xa7\x46\xc9\x62\x59\xe4\xad\x21\x39\x81\x6f\x09\x0d\x79\x49\x30\xa7\xad\x21\x69\x7d\x4a\xcf\x13\x9a\xb5\x7a\x3a\x87\x8d\x8e\xe5\x04\x61\x98\xd1\x3c\x6f\x89\x9c\x6b\xf8\xff\x54\x54\x2d\x8b\xc3\xff\x22\x2d\x5d\x16\xf5\xed\xa5\x9f\x50\x11\xa3\xbd\xd1\x65\x41\xf3\x9d\x6d\x7f\x7b\x12\x48\x61\x7a\x8d\x90\xeb\xde\x9d\x20\xe0\x46\xfd\x51\xc3\x41\xd8\x6b\x86\x80\x95\x51\xfd\x47\x1d\xfa\x38\x4d\x0a\x9a\x14\xb7\x1e\xfc\x9f\x72\xde\xd9\x8c\xfd\x61\xa6\x7d\x12\xc4\xf9\x6f\x37\xf4\x8c\xe6\x34\x3b\xf3\xad\xfa\x3f\xfa\xa4\xe5\xcb\xd1\x7b\x3a\x8d\xf2\x22\x0b\xfe\x0b\x26\xaf\x57\x55\x07\x3d\x3f\xba\x15\xdf\x2f\xb2\x20\xc9\x27\x5e\xd6\xf7\x67\xc1\x41\x66\x91\xc2\xea\x48\xc8\x69\xf1\xa1\x9a\xa4\xee\x0c\x17\x76\xd3\xbf\x49\xa3\x5f\x79\x02\x82\x26\x88\xaf\xaa\x60\x91\x45\xf3\x20\xbb\xf4\xf6\x23\x4d\xe3\xda\xc9\xdb\x17\x6d\xfd\x79\x51\x68\xee\xc1\x95\xd5\x94\x21\xe1\xa0\x74\x1b\xff\x23\x21\xc1\xdb\xfb\x30\xca\xd3\xf3\xe4\x16\x3d\x0f\x92\x34\xb9\x9c\xa7\xcb\x7c\x85\xae\x47\x49\x48\x2f\x68\x68\xec\x5d\x77\x36\xb1\xba\x72\xd4\x1d\xb3\xf6\xf3\x28\xb9\x0d\xe3\xde\x5f\x02\x26\x5e\x26\x21\x0d\x5b\x16\x9a\xe8\x19\x23\x84\xbf\x00\x8e\x46\x51\x18\x36\xc3\xd1\xcd\xea\x3f\x0b\xe2\xa5\xb7\xfb\xcb\x28\x29\xb6\xbf\x79\x52\x3d\x05\x6f\xe9\xf9\xf3\xe8\x77\x44\xfe\xad\xd6\xdc\xc1\x2c\x48\xa6\xbf\x27\xe9\xdc\x09\xe5\x94\xd4\x8d\xa4\xfa\x4a\xaa\xf1\x62\xe6\x1d\xdf\x8d\x6a\x11\xb4\x76\xba\xb6\x76\xdd\xfb\x72\x7d\xda\xdb\xfe\xdd\x0e\xfd\x7f\xa1\x33\xef\xef\x24\x3b\x4e\x96\x49\x78\x63\x52\xb9\xf5\xc6\x75\x7f\xec\xfd\x73\x1f\x7b\xef\x0f\x7d\x7f\xe4\x33\x87\x77\xf0\xe2\xbc\xf0\x47\x93\x36\xbf\xee\x66\xae\xf7\xaa\x9d\x3b\xdb\xab\x56\x9d\xf7\x49\x96\xce\x6f\x39\xed\x45\x7a\xcb\xa3\xe6\xed\x04\xbe\xdf\x77\xdd\xfc\x11\xf0\x17\x25\x61\x94\xd1\x71\x71\xe8\xdd\x33\x57\xe8\xc9\xed\x26\x22\x1a\x07\x8b\x8f\xbf\xeb\x64\xf8\x31\xd9\xec\xb4\x4b\x17\x69\x1e\x55\x1d\xd4\x17\xc1\x65\x30\x8a\xa9\x29\x14\xfc\x2e\x5c\xa9\x8c\xe6\xee\xe4\xf8\x75\x3b\x1a\xd8\x97\xe3\x7d\x61\xe2\xf3\xb7\x3f\xc9\xdc\x09\x92\x4a\xea\x6e\x46\x67\xbf\x03\xfa\xff\xb0\x58\xbf\x8b\xf3\xe3\x8d\xf9\xe4\xd7\xc6\xba\xcd\xf4\xee\xd1\xde\x10\xed\xb7\xde\xb8\xbe\xf6\xcc\x1e\x7a\xb6\xb4\x2a\x39\xee\x71\x13\x39\x0e\x8c\x37\xc8\x9e\xb4\x70\xe8\xb4\xfb\x83\x49\x9a\xcd\x83\xa2\xa0\x59\xde\xee\xee\x02\xc0\x87\x34\x8e\xc2\xa8\xb8\xfc\x78\xb9\xa0\x26\x2c\x6b\x9f\x41\xad\x0d\x1e\x3e\x5c\x23\x0f\x0d\x48\xa1\x73\x27\x51\x4e\x02\xb2\xc8\xd2\x94\x01\x93\x62\x16\x14\x24\xa3\x0b\x76\xc8\x4a\x8a\x9c\x88\xb9\x23\x2c\x93\xd5\x70\x58\x90\x79\x50\x8c\x67\x34\x1f\xb2\x4f\x91\x8d\x7e\x9e\x9c\xe2\x8f\xc7\xc6\xd7\xa9\x99\xb9\x63\x7d\x9f\x9e\x3c\x39\x3d\x39\xed\x91\x7e\xbf\xbf\x46\x1e\x0e\x9c\xb1\xc9\x1e\xef\x11\x65\x4d\xd3\xe9\x8a\x29\x2e\x66\x51\xde\xff\x04\x0b\xe3\x47\x89\x20\x06\xd8\xe7\xe8\x3a\x64\x19\x87\x49\xb1\x8b\x80\xf9\xbe\xed\x83\x3e\x82\x1c\xd1\xdc\xee\xda\xf5\xee\xda\x9a\xa7\x1f\xfd\x45\x96\x16\x1c\x6b\x7b\x24\xa1\xe7\x46\x5f\x3b\x5f\xae\xbb\xbb\xd5\xa5\xfa\x20\xbd\x64\xcb\x71\x91\xb2\xc6\x3d\xb0\x75\xed\xf6\xa3\x5c\xcc\xb9\x46\x08\x23\x47\x89\x14\x61\xd7\xb2\xbe\xce\x12\xfb\x30\x6f\x9d\x81\xc0\x76\xe7\xdf\x27\x9d\x93\xcd\x47\xdf\x9d\x3e\xec\xfe\xfb\xb4\xfb\x6c\xd0\xe5\xe3\x34\x0f\x0e\xa5\xdd\xba\xee\x7d\x69\x61\x52\x6c\x0d\xbf\xeb\xb5\x38\xbd\xb5\x86\x5b\x8f\xaf\x4f\x7b\xdf\xfc\xce\xe4\xfd\x3c\x4d\xe3\x1a\xda\x1e\x31\x90\x12\xc2\x66\x79\xf2\x7f\x4e\xa5\xf0\xeb\xb1\xfe\x79\x8a\x92\x77\xf0\x47\x1d\x19\x43\xcf\x6e\x4a\xc3\xac\xf0\x2a\x44\xcc\xe1\x6d\x0a\x66\xa9\x2b\x92\xaf\x59\xa4\x82\x76\x79\x8b\x55\x65\x6f\x42\xb5\xff\x61\xa8\x35\x69\xf6\xe1\xff\x34\x22\x5a\xd1\x9f\x7a\x8a\x7d\xf2\x7b\x53\x2c\xdb\xc3\x14\xc9\x16\x7e\x9a\x2d\x66\x94\xc0\x66\x07\x84\xdb\xf7\x51\x2e\xcb\x55\x3f\x04\x5d\xc2\xcf\xc7\xe8\xf7\x29\xce\xd8\x31\xbe\x4c\xfa\x25\x62\x6b\x55\x3f\x9f\x1a\xf5\x88\xa2\x1e\x2a\x87\x4e\xde\x98\xcc\x59\xe9\x95\xe8\x9c\x17\x70\x08\x9d\x25\xaf\x4a\xe9\x66\x99\x2a\x52\xe7\x8d\x56\x96\xbe\x19\xb1\xb3\x4a\x38\xa9\x7f\xd9\xea\x5d\x77\x6f\x46\xf8\xa2\x77\xf5\x94\xff\x6d\x13\xca\x1f\x3c\x84\x0e\x7f\x9c\x45\x39\x99\x44\x31\x65\x94\xba\x08\xb2\x82\xa4\x13\x72\x4e\x47\x3b\xfd\x5f\xf2\xfe\x1a\x80\x88\x2f\x06\x30\xc9\x28\x25\x79\x3a\x29\xce\x83\x8c\x0e\xc9\x65\xba\x24\xe3\x20\x21\x19\x0d\xa3\xbc\xc8\xa2\xd1\xb2\xa0\x24\x2a\x48\x90\x84\x83\x34\x23\xf3\x34\x8c\x26\x97\x50\x47\x54\x90\x65\x12\xd2\x0c\x08\xbe\xa0\xd9\x3c\x67\xed\xb0\x8f\x9f\xde\x1e\x93\xd7\x34\xcf\x69\x46\x7e\xa2\x09\xcd\x82\x98\xbc\x5b\x8e\xe2\x68\x4c\x5e\x47\x63\x9a\xe4\x94\x04\x39\x59\xb0\x94\x7c\x46\x43\x32\xba\x14\x54\x44\xc9\x8f\xac\x33\x1f\x44\x67\xc8\x8f\xe9\x32\x09\x03\x36\xe6\x1e\xa1\x51\x31\xa3\x19\x39\xa3\x59\xce\x66\x68\x47\xb6\x25\x6a\xec\x91\x34\x83\x5a\x3a\x41\xc1\xc6\x90\x91\x74\xc1\x0a\x76\x49\x90\x5c\x92\x38\x28\x74\x59\x17\x05\x7a\xa4\x21\x89\x12\xa8\x76\x96\xca\x95\x1d\x15\xe4\x3c\x8a\x63\x32\xa2\x64\x99\xd3\xc9\x32\xe6\x82\xe3\x68\x59\x90\x9f\x0f\x3f\xbe\x3a\x3a\xfe\x48\xf6\xdf\xfe\x8b\xfc\xbc\xff\xfe\xfd\xfe\xdb\x8f\xff\xda\x25\xe7\x51\x31\x4b\x97\x05\x61\x12\x25\xd4\x15\xcd\x17\x71\x44\x43\x72\x1e\x64\x59\x90\x14\x97\x24\x9d\x40\x15\x6f\x5e\xbe\x3f\x78\xb5\xff\xf6\xe3\xfe\xf3\xc3\xd7\x87\x1f\xff\x45\xd2\x8c\xfc\x78\xf8\xf1\xed\xcb\x0f\x1f\xc8\x8f\x47\xef\xc9\x3e\x79\xb7\xff\xfe\xe3\xe1\xc1\xf1\xeb\xfd\xf7\xe4\xdd\xf1\xfb\x77\x47\x1f\x5e\xf6\x09\xf9\x40\x59\xc7\x28\xd4\x50\x8f\xe8\x09\xcc\x59\x46\x49\x48\x8b\x20\x8a\xe5\xfc\xff\x2b\x5d\x92\x7c\x96\x2e\xe3\x90\xcc\x82\x33\x4a\x32\x3a\xa6\xd1\x19\x0d\x49\x40\xc6\xe9\xe2\xb2\xf1\x44\x42\x65\x41\x9c\x26\x53\x18\xb6\xa2\x32\x42\x0e\x27\x24\x49\x8b\x1e\xc9\x29\x25\xdf\xcf\x8a\x62\x31\x1c\x0c\xce\xcf\xcf\xfb\xd3\x64\xd9\x4f\xb3\xe9\x20\xe6\x15\xe4\x83\x1f\xfa\x6b\x0f\x07\x92\xd9\xfe\x0d\xc8\x76\x9c\x86\x34\xeb\xff\x02\x2c\xf2\x6f\xc1\xb2\x98\xa5\x19\x79\x13\x64\xf4\x33\xf9\xdf\xb4\xa0\xe7\xd1\xf8\x57\xf2\xfd\x9c\x7d\xff\x8d\x16\xb3\x90\x9e\xf5\xc7\xe9\xfc\x07\x00\x0e\x83\x82\x92\xed\xcd\xad\x6f\x80\xe1\xd5\x6f\x05\x15\x02\x2c\x2a\x23\xe4\x31\xdf\xde\x21\x24\x05\x04\xcc\x76\x41\x1f\xe4\x61\x52\x98\x80\x51\x52\xf8\xe0\x8e\x1d\xc0\x65\x09\xe4\x8b\xcb\x24\x98\x47\x63\xc9\xc6\x51\x89\x90\xe7\x00\x8f\xf2\x95\xfc\x50\x64\x51\x32\x35\xcb\xe4\x90\xe6\x83\x7e\x4f\x03\x6b\x8c\x19\x0d\xbc\x63\x3c\x76\x41\x97\x65\xb0\x9e\x6e\xab\xfe\x02\x70\x94\x8b\x01\x1a\x9c\x39\x47\x55\xf4\x60\x87\x15\x7c\x5a\x5a\x88\xa3\xfc\xbe\xaa\x02\xb6\x11\x0e\x7c\x75\xa5\x4e\x8f\xa4\x04\x7a\x3f\xcb\x82\x4b\x0e\xce\x99\xb8\x25\x0a\x1c\x30\xfa\x44\x12\x80\x58\x49\x9c\x43\x84\xa4\x48\x09\x4d\x18\x0d\x0f\x42\xca\xfe\x53\xad\x30\x66\x1c\x70\x36\xc9\xb8\x92\x90\x6b\xcd\x8d\x99\xd7\x8d\x47\xcc\xc0\x72\x73\x67\x86\x24\xb2\x07\x35\xe4\x46\x17\x81\xf7\xcf\x69\x31\x4b\x43\x4f\xb7\xb8\x72\x3d\xcd\xe6\x84\x4b\x2e\xa9\x31\x23\x6b\x84\xaf\x41\x51\xfc\x93\x98\x19\x91\x45\xfe\x06\xbd\x27\x5f\x38\xf1\x5c\x2b\xb1\xfc\x6f\x1c\xf3\x39\xf9\x82\x2b\xbb\x86\x2c\x78\xab\x90\x93\x2f\xf0\xae\xe1\x9a\x88\xcf\x88\xf1\x06\x2e\x11\x31\x32\x84\xbe\xb0\x9d\x88\xb1\x7b\x40\x88\x81\x0c\xb4\x53\xe3\x2e\x39\x38\x92\x28\x62\xd8\xcc\x4d\xf1\x0e\x61\xad\x3f\x89\xe2\x82\x66\x1d\x54\xb6\x8b\x74\x10\x82\x8a\x0a\x21\x14\x48\x22\x00\x9d\x42\xf7\x64\xf3\x74\x97\xf3\xcf\x68\x42\x3a\xeb\xb8\x11\x5c\x07\x7f\xa0\xc1\x9f\x72\xb4\xa3\xe4\x2c\x88\xa3\x50\xd3\x00\xab\x71\x7d\x48\xda\x64\x83\xe0\xca\xd7\xb0\xac\x81\x6b\x36\x29\xb0\x84\xd2\xc8\x22\x0e\xa2\x84\xd3\x97\x35\x8d\x1c\xe0\x9d\xc8\x29\x9f\x45\x91\x7e\x34\xfa\x85\x8e\x8b\x6b\xab\x42\x39\xc9\xba\x1c\xaf\x36\xb4\xe0\xca\xa7\x0e\x75\xc3\x99\xb9\x1e\x2f\x6f\x09\x5c\x30\x69\xa8\x58\xde\x39\x61\xc0\xa7\x3d\x72\x02\xe0\xa7\xdd\x66\xa8\x89\xa3\x1c\x24\x20\xbe\xf8\xca\xb1\x93\x63\x34\x00\x0b\xe0\xd8\xf1\xa5\x2f\x74\x81\x32\xc4\x38\xcd\x36\xc2\x4d\xee\x2e\x7d\x81\x9d\xbc\x8c\xbe\x73\x49\xe0\x53\x5a\xe0\x15\x98\x0b\xce\x21\x48\x96\x15\x13\x7d\x63\x25\x8c\x1a\xfa\xf3\x60\xd1\x29\xe3\xb1\xa0\x95\xf3\xac\x11\x83\x77\xf2\x9a\x3b\xbc\xa7\x27\x50\xe4\x94\xb3\x67\xf9\xa5\x56\x11\xea\x8f\xd8\xa7\x8e\x26\x93\x9c\x16\x4e\xa7\x32\x1a\x2e\xc7\x14\xf5\x2b\x18\x8f\x7b\xa4\xa6\x73\x80\x9d\x22\x28\xa2\xf1\xbb\x20\x2b\x5e\xc3\x4b\x22\xab\xe6\xbe\x9d\xdf\xf1\xf4\x53\xd6\x95\x31\xa6\x44\xc3\x0f\x6e\x95\x6f\x82\x62\xd6\x9f\xc4\x69\x9a\x75\x3a\x4e\x8b\x1b\x64\x67\xab\x4b\x06\x64\x67\xbb\x4b\x1e\x92\x9d\x6d\x31\x68\x84\xbe\x60\x3c\x26\x1b\xa4\xa3\x36\x1d\x03\xeb\x25\x28\x24\xcf\xd0\xde\x45\xc8\xce\x36\x19\x1a\x09\x25\x9d\x95\xa8\xef\x91\x4d\x8c\xfd\x8c\xe6\xcb\xb8\x90\xd4\xc3\x67\xf0\xcd\x32\x2e\xa2\x9f\xa3\x62\xc6\xe7\x44\x52\xa0\xd1\xb7\x9e\xa2\xa3\x9e\x39\x83\xb2\x72\x31\x42\x5e\xbf\x79\xe2\xf3\x93\xbe\xd5\xaa\x6f\x0d\x34\xec\x01\x5a\x23\x6a\x78\xad\xd6\xae\x5e\x38\x34\x9e\x88\x11\x8b\xce\x8a\x5d\x21\xcd\x5e\x06\xe3\x59\xc7\x66\x4c\x11\xa6\x2d\xc6\xf5\x4b\xe7\x4b\xcf\xd5\x69\x17\x17\xe2\x08\x81\xae\x6c\xb8\xda\xce\x8e\xd9\x7d\xb9\x8e\x10\x11\xaa\xb5\xcb\xa8\x98\xc6\x13\x01\x62\xcf\x11\x74\xc0\xed\x92\xc4\x13\x7c\xd8\x93\x85\x9b\x30\x97\xe2\xc6\x1e\xa1\xe2\x19\x1e\x19\x90\x6d\x0d\x7a\x4d\x68\x9c\x53\x6b\x78\x83\x01\x09\xd3\xa4\x5d\x90\x20\x0c\x89\x28\x55\xa4\x66\x95\x7d\x12\x15\xed\x9c\x04\x71\x46\x83\xf0\x92\x8c\xd3\x65\x52\xd0\xb0\x04\x4b\x5f\x69\x9c\xd7\x7a\x11\x0e\x06\xe4\xe3\xd1\x8b\xa3\x21\x99\x44\xd3\x65\x46\x09\x3b\xb0\x25\x34\x67\x27\x40\x76\x4a\xbb\xcc\x4d\x66\xf5\x5b\x10\xc9\x1f\x67\x92\xcd\xc9\xa0\x18\x81\x12\x2b\x25\xcb\x5c\xa1\x35\xa3\x93\x00\xd4\x31\xe7\xb3\x34\xa6\xbc\x87\x51\x32\x5d\xaf\x61\x04\x15\x3c\xc0\xe6\xfc\x62\xd0\x3d\x92\x3a\x2b\xdf\x58\xe4\x72\x4e\x6a\x45\x7d\xcf\x16\xd7\x71\x55\x63\x88\x80\x78\xc3\xe4\x3c\xd0\x64\x9d\xd3\xc2\x99\x53\x4e\x56\x6f\x83\x39\xb5\xf7\x21\x9d\x83\xe5\x4c\xb7\xac\x67\xf3\xa9\xde\xcf\x74\xc5\x9e\x3a\x15\x5f\x14\x18\xd4\x52\xad\xfc\xab\x18\xb6\xac\x64\x91\xd1\xb3\x28\x5d\xe6\xaa\x43\xdb\xbb\x0c\x25\x51\x42\xa2\xa4\x70\x4a\xd4\xe1\x1f\xf5\xd7\xd7\x20\xfb\x9b\xa4\x19\x81\x47\xc2\x11\xd9\x23\x5b\xbb\x24\x22\xdf\xcb\x01\xc8\xf7\xc2\x24\xda\xd8\x28\x2b\xce\xfe\xac\x3e\x6f\xec\x91\x8d\x8e\xc4\x41\x44\x1e\x91\xad\x53\x26\xe1\x93\xab\x2b\xb2\xb9\x5b\x5a\x49\x05\x2b\x17\xf4\xb0\x41\x22\xf2\xb0\x6c\xe6\x36\xec\x5e\x30\xe1\xa0\x8c\xed\xcb\xbf\x6b\x27\xd5\x4c\xb9\xee\x76\xba\xd6\x14\x0e\x06\x64\x12\x65\x79\x41\x68\x4c\xe7\x34\x29\xd8\xf9\x8a\xa3\xa9\x47\xf2\xcf\xd1\x82\x44\xc5\x2a\x53\x6e\x60\x7f\xd3\x87\x7d\x86\xbf\xca\x19\x80\xa7\xf3\x61\x18\xb1\x46\x82\x58\x2d\x72\x81\x4f\x87\xff\xb8\xf8\xf6\xf3\x45\x4d\x3a\x25\x0c\xe2\x24\x22\x1b\x64\xeb\x54\xf2\x09\xb2\x41\x9c\x6e\x78\xd0\x5e\x8b\x60\x8b\xf9\x79\x20\xc5\x56\xe9\xa1\x7d\x4e\x15\x37\x66\x3d\x7f\x68\xa6\xc2\x84\x2d\x13\x53\xb7\x5c\xfc\x35\x94\x49\xca\x18\xd2\x66\x15\x43\x22\x8d\x68\xba\x96\xa3\x0c\x06\x64\x1c\xc4\xe3\x65\x1c\x14\x54\x0a\x3e\xec\xc8\x27\xfa\x42\xa2\x82\xce\x6f\xc1\x8e\x18\x2b\x3a\xf9\x13\x31\xa5\xae\x0d\x7b\xbd\xd2\xbe\x72\xcb\x09\xf9\xfd\x18\x0c\x66\x2e\x5f\x9d\xb7\x10\x47\x5b\x24\xfa\x51\xa3\x0d\x11\xba\x48\x71\x33\x99\x56\x68\x8c\x38\x64\x63\x8d\x91\x4c\x57\xb7\x9a\x4a\x25\xe2\xd7\x25\x95\xeb\x41\x50\xc3\x1e\xf1\x0f\xea\xf7\xe9\x88\x50\x31\xad\x23\xe2\xd0\x20\xdb\x34\x41\x4b\xa5\x92\xa8\x04\x21\x65\x3a\xa2\x72\x84\x88\x12\x70\xc2\x80\xd6\x34\x62\xaa\x35\x44\x78\x88\xbe\xd3\xb1\x81\x9b\xd5\x15\x44\xb2\x14\xa7\x62\x0c\xcf\x89\x38\xf7\x9e\xc2\xad\xe3\xfe\x1d\x6b\x94\xf8\x90\x3b\x30\x32\xb9\xbe\xb4\x5a\xc4\xd0\x8b\xc8\x1a\xb5\x86\xa9\x4a\xe5\xa0\x47\x55\xab\x67\xc0\x18\xe5\x1c\x88\x95\xb9\xeb\x91\x36\x51\x47\xa9\x93\xa8\x4f\x0e\x16\x5d\x2b\x65\x92\x83\x01\xc9\x97\x73\x7e\x43\xe7\xd9\xa5\x84\x88\xa8\xe0\x45\x75\x27\xd1\x29\xe3\x8a\xea\x0b\xb6\x24\x1f\xff\x91\xcd\x9b\x88\x90\xd2\xa6\x83\x82\xc1\x80\x64\x74\x9e\x9e\xc1\x35\x26\x19\x2f\xb3\x8c\xc9\xa7\x4a\x38\x4d\x21\x59\x74\x33\xca\xa1\xe7\x9e\xde\xe6\xab\x68\xfc\x24\x32\x1b\x6b\xfe\x8c\x91\x91\x47\x4e\xfd\x8d\x29\xed\x83\xb5\x0e\x4b\xae\x75\xbc\xa7\x56\xc9\xe3\x3c\x54\x56\x58\x57\x0e\x92\xac\xd8\x0e\x86\x2f\x49\xcc\xfb\x0b\xde\x5b\xd6\xd6\x58\xdc\x32\x61\x53\x0b\xe8\x7d\x87\xdb\xab\xda\x26\x18\xe2\x5a\xb4\xd3\xed\x79\xb3\x9f\xa7\x69\x5c\x96\xc7\x84\x90\x92\xac\xe3\x8a\x3c\x7c\xb9\x59\xda\x6c\x55\x26\xe7\xc2\x65\xb9\xef\x69\x50\xda\xe3\x63\x9e\xb9\xc6\x08\xc2\xb5\xdf\x00\xd4\x29\x9b\x0d\x69\x38\x3b\x7c\xdc\x6b\xf1\xbb\xdf\xd6\xf0\x1b\xf8\xc9\xfa\xd6\x1a\x3e\x61\xbf\xf1\x75\x6c\x6b\xf8\xb4\xe7\xb3\xf5\x88\x92\xa2\x35\xdc\xda\x64\x3f\x33\x1a\xc4\xad\xe1\xd6\x36\xfb\xcd\x6f\x65\x5b\xc3\xad\x1d\xf6\xb5\xe4\x50\xd0\xc0\x52\x80\x3d\xb9\x3e\xed\x3d\xfd\x2d\xed\xa2\x6a\xae\xa1\x6f\x66\x4d\x84\x2b\x59\xc5\xa8\xc8\x2c\x67\xdb\x16\xe1\xdc\x15\x4d\x8c\xfc\x45\x2b\x2c\x8d\xcc\x9e\x34\xa9\xeb\x16\x76\x47\x25\xc6\x46\x8d\x1a\x45\x57\xe2\xde\xe9\x92\x6c\x27\x5b\xd2\x06\x26\x4c\xd6\xb0\xeb\x2d\x99\xbe\xbb\xb7\x64\xba\xb7\x64\xfa\x6f\xb1\x64\xd2\x0b\xe1\xae\xcc\x99\x9e\x47\xd3\xb7\xcb\xf9\x08\x58\xa1\xe2\xce\xa3\x68\x9a\x40\x62\xff\x17\xc5\xc9\x97\x45\x14\x9b\xf6\x35\xfd\x01\xa4\xf1\x7f\x25\xd8\xd8\x0b\x32\x4e\x93\x49\xe4\x18\x03\xc9\x93\x19\xda\x15\xe0\xec\x02\xdb\x82\x1c\x38\xe7\xd5\x39\x01\x7e\x4f\xe0\xc1\x06\x3b\x67\x31\xbe\xa5\xad\x64\x61\x29\xb0\xb9\x01\xe5\xcc\x43\x86\x63\x0e\x19\xe5\x24\xa1\xd3\xa0\x88\xce\x68\x4f\x72\x22\xb8\x38\x2a\xce\xd3\x76\x4e\xc6\xe9\x7c\x21\xa5\x55\x28\xc5\xe6\x56\x95\x9c\xc4\x69\x50\x44\xc9\x94\x2c\xd2\x28\x29\x7a\xfc\x3a\x94\x91\x7d\x98\x9e\x27\xd6\x99\xce\x54\x93\xb8\xc7\xb7\x2b\x8e\xe5\x2b\x85\xef\x6b\x39\x16\xb6\x94\x12\x4a\x43\x38\x45\x8f\xf4\x1c\x87\x7e\x63\x18\x40\xda\xb5\xb2\xf3\x31\xdb\x35\x18\x30\xd4\x2f\xb9\xb0\x6a\xb7\xcf\xe7\xa2\x33\xee\xbf\xfc\xf8\xea\xd3\xf3\xc3\x9f\xde\x1e\xbf\x79\xfe\xf2\xfd\xa7\xf7\x47\xc7\x6f\x5f\x1c\xbe\xfd\xe9\xd3\x9b\xa3\x17\x2f\xd1\x19\x4e\x69\xe2\x60\x26\xfb\x8b\x20\x7c\x4d\x27\x45\x87\x7f\x15\xe9\xc7\xf3\x34\x3f\x50\x58\x14\x6d\xf6\x8b\x54\x88\x4b\x5b\x4f\xba\x3d\xf2\xe4\xb1\x79\xc3\x83\x77\x4b\x18\x4e\x87\x37\x62\x1a\x60\x98\x13\x2f\x0f\xbf\x25\x38\x7f\xae\xce\xc6\xe6\xa1\x79\x55\x1c\xba\x52\x87\x81\x45\x0f\x42\x8a\xf4\x15\xbd\x90\xe3\xce\x97\xa3\xbc\xc8\x3a\xdb\x08\x7f\xb1\x75\xb5\xcf\x8b\x4b\x2d\xf7\x06\x79\xb2\xd3\x25\x03\x8c\x22\x1b\xdd\xef\xa3\xe9\xac\x10\xc5\x7a\x24\x26\x0f\xbf\x32\x3e\xc5\x0e\x7c\xa7\x68\x2d\x95\xe9\x6e\x8d\x5d\x79\x3c\x33\xd1\xaa\xb4\x73\xbf\xdb\x0c\x58\x6a\x53\xde\x58\xb7\xcf\xd7\xfc\x06\xa9\x9f\xa0\x3a\x4e\xc7\x25\xf9\xf2\x15\xf1\x41\xe6\xdf\x76\xee\x94\x71\x67\xf3\x59\x9b\x64\xe9\xfc\xb8\x98\x3c\xbd\x9f\x38\xcf\xc4\x89\x77\x46\x65\x8c\x4c\xbc\x42\x92\x93\xc6\xbe\x69\x90\xac\xce\xc8\xec\x27\x47\xe5\x73\xd6\xde\xbc\xdd\x5f\x9b\x6c\x88\xea\xc9\x33\x42\xda\x5b\x6d\x32\x24\xed\xcd\xf6\xed\x79\x54\x1d\x26\xd9\x89\x95\x95\xfa\x07\x83\xcb\x09\x13\x8c\xe7\xcb\xb8\x88\xb8\x50\x39\xba\x24\xdb\xff\x99\x33\xf1\x5c\xd9\xd0\x05\xac\xe6\x82\x4e\x69\x56\xb1\x95\xbc\x17\xb5\xd6\xed\xdf\xab\xce\x88\xb0\x65\x2e\x99\x11\x81\x26\x8b\xfa\x18\xd6\x54\x8b\x6a\x73\x8d\xe6\x34\xb7\xb2\xb6\xbb\xfd\x45\x7a\xde\xd9\xda\x7e\xda\xed\x9a\x28\x3d\x98\xd1\xf1\x67\x12\x4d\x0c\x9c\x22\xb1\xc8\x42\x44\x1e\x4d\x13\x1a\x1e\xe6\x6f\x75\xb6\xa3\x88\x56\x75\xcc\xe8\x85\xe8\xb1\x89\x0c\x49\xb4\x70\xe8\x83\xb6\x0b\x53\x12\x4b\xd9\x91\xe5\x3c\x62\x62\x78\x10\xe7\xda\x6a\xd9\x6e\xbd\x16\x5f\x3e\x0c\x49\x76\xb3\xd9\x23\x5b\xdd\x1e\xd9\x7a\x82\xe4\x91\xed\xae\x91\xdb\x25\x7b\x7b\x7b\x8c\x64\xbd\x54\x98\x31\xf6\xf1\x28\x88\xa1\x53\x84\xab\x0e\xf4\x85\x07\x17\x35\x5d\x22\xe2\x8a\x04\x5b\x08\x34\xc8\xc3\xb1\x83\x65\x38\xd3\x82\x61\x45\xbb\x4a\x38\x84\x65\x11\x4d\x09\x97\xd3\x2d\x7a\x53\x5d\x30\xf0\x67\x18\xc5\x32\x60\x3e\x8f\x7b\xbc\x37\x48\x97\xd9\xe9\x92\xab\x2b\xd2\xda\x6c\x09\x1d\xf1\x60\x40\xc6\x8a\x8a\x98\xf0\x2c\x27\x52\xb5\xce\x81\x60\x96\x95\x98\xed\x4a\xd8\xf2\xf2\xd6\x9a\x64\x31\xb1\x1e\xfd\xa3\x67\x72\xf9\x7c\xce\xa3\x64\x69\x2f\x81\xf6\xe4\x96\x7f\x6d\xa8\x5b\x56\xbe\xa5\xee\xc6\x1a\x74\xe8\x06\xe4\xb3\xac\xa6\x9f\xe3\x4a\x02\xf2\x91\x0e\x5d\x89\x76\x44\xf3\x2e\xd5\x1c\xdf\x05\xd9\x7c\x1d\x94\x09\x7e\x5f\x86\x32\x87\x71\xd7\xa2\x0c\x30\x86\xe4\x61\x13\x45\xa2\x39\x17\x45\x0e\x27\xf7\x99\x9b\x5b\x2b\x51\xc0\xf4\xc3\xe8\x2c\x0a\x69\xf8\xfc\xb2\x82\x81\xdf\x84\x9a\x6a\x70\x73\x7c\xd7\xc8\x59\x96\x62\xe7\x78\x65\xf4\x1c\xdf\x06\x3f\xee\x15\x2c\xaf\x5a\xa1\xa8\x4c\xdc\xd2\xaf\xa5\x1b\xe3\x45\x6e\x6b\xe6\x5c\x94\xe2\x48\x34\xed\xa2\xc8\x11\xce\x7c\x18\xf2\x2c\x2f\xd8\xac\x6e\x29\xad\x6d\xb5\xc9\x33\xbe\x2f\x0b\xb7\x18\xab\x61\xb3\xf4\xd8\x88\x1e\xe5\x56\x6c\x7c\x31\x9d\x68\xc4\x31\xf1\xa1\xe2\x60\xe3\xc8\x1d\x49\x30\xa7\xfc\x75\x0f\xfb\x65\xc9\x5f\x02\x86\xd5\xa9\x6a\xf0\x60\xde\x39\x81\x42\x1b\x3d\x82\x35\xe5\xac\x90\x78\x5f\x4d\xf6\x48\xd9\x33\xdd\x87\xdd\x01\x3a\xcf\xe4\xd1\xaf\x82\x27\xe6\x70\x45\x25\xca\x9f\x6c\x9d\x9a\x72\x70\x7b\xf3\x82\xc9\xcb\xee\xe4\xf6\xf3\x38\x1a\x53\x26\x96\x6c\x93\x87\x50\xdd\x8a\x74\x5e\x33\x33\xf8\x08\x7e\x67\x13\xb4\x2a\xfa\x4b\xf5\x00\xce\x26\xa3\xce\x87\x16\x1f\xe0\x88\x13\x37\x60\x36\xe6\x9e\x3c\xee\x8a\x3d\xbc\x48\x05\x7c\x97\x3c\x94\x47\x4a\xdf\x0c\x58\x15\x71\xd1\xf0\xc9\xe3\x9e\x68\x7f\xb5\x29\xa8\x38\x92\xf3\xe1\x7b\xce\xe4\x77\x8a\xfd\x20\x1f\x47\x51\x15\xfe\x3d\x67\xf9\xdf\x10\xf3\x52\xa5\x03\xaa\x81\x66\xf8\x5f\x6d\x02\xb4\x6f\x9a\xb2\x19\xd8\xd7\xde\x6b\x4a\xa6\xa0\x94\xb7\x97\xa0\x5c\x55\xe8\x62\xdb\xe7\xbd\x66\x05\x69\xca\xc0\x5d\x6b\xf3\xa2\x45\x36\x88\x38\xe0\x00\xda\xf9\x6f\x65\x53\xf0\x78\xb3\x47\x70\x52\x99\xc3\x80\x2f\xd2\xee\x03\x1d\x34\x87\xd6\x77\xcf\x86\x81\x15\x3b\x74\x52\x1c\x38\xbc\xc0\x87\x65\x19\x4e\x29\x8e\xcc\xa1\x9b\xe4\xf6\x23\x4d\xe3\xa1\x9d\xe0\x40\x31\x09\x64\x68\x27\x60\x28\x25\x96\x0d\xed\x04\x17\xea\xd8\x01\x3b\xf6\xc2\xe1\x46\x75\x8a\xa7\x3e\x17\xf0\xd8\x0f\x89\x07\xab\x53\x3c\x70\x18\xdb\x28\xc9\x85\xf4\x4d\x8f\x9b\xe3\x96\x33\x27\x08\xa7\xb9\xb0\x82\xea\x87\xde\x75\x77\x2d\xef\x74\xcd\x9b\xa1\xd6\x70\xeb\x69\xaf\x65\xde\x28\xb5\x86\xdb\x60\xbe\x00\x0b\xa3\x35\xdc\xda\xea\xb5\xf0\xbd\x54\x6b\x68\x7e\x5e\x9f\xf6\xb6\x36\x7f\x67\x7f\x2e\x87\xdc\x30\xbe\xc2\x01\x51\x94\x14\x65\xfe\x87\xc4\xd5\x55\x94\x14\xdc\x35\x0b\xfb\xf1\x58\xfd\x3a\xd5\x89\x3b\xe8\xb7\xe5\xb9\x25\x4a\x0a\xee\xb7\x25\x4a\x8a\x27\x8f\x15\xd8\x53\x5d\xd1\xf6\x37\x4f\x4a\xea\x62\xf0\x35\x7e\x8c\xec\xa3\xe1\x57\x74\xc5\x05\xe0\xb6\x0d\xc2\x61\x52\xac\x68\x76\x61\x94\xa8\xb0\xb6\x80\xe6\x2a\x4a\xde\xc8\xb6\x22\x4a\x0a\x29\x2a\x3e\xbb\x91\x3f\x17\xde\xab\x7a\x1b\x88\xad\x46\x21\xec\xee\x8d\x20\xee\x8d\x20\xfe\xbc\x46\x10\x44\x5b\x41\x70\x51\xe9\x8e\x0c\x20\x1a\xd8\x35\xd8\xac\x9e\xdb\x2d\xa4\x60\x8d\xae\xdd\x76\xf4\x3d\x12\xea\xf9\x8c\x26\xea\xb1\x62\x8f\x1b\x7e\x33\x01\x5c\x79\x6f\x90\x92\xe5\xc0\x6b\x18\x61\xe9\xbe\xed\xb7\x89\xc0\x49\xa5\xfc\xc8\xff\xbf\xba\x22\xed\x36\xe2\xb3\xa9\x7c\xb6\xc0\x7f\xec\xa2\x77\x86\x51\x22\x5a\x6f\xec\xee\x63\x4a\x0b\x6c\xef\x0b\xd6\xe3\xed\x5c\xbe\x02\x05\x5e\xc2\x2a\x31\x4c\xdd\xb5\x7c\xcf\x2d\x5d\x4d\x29\x5a\xaa\x99\x74\xad\xb8\x32\xd2\x91\x7d\xec\x1a\xd6\xec\x80\x1e\x6c\xcd\x6e\x37\x52\x69\x87\x06\x26\xfe\xc6\xb1\x03\xdf\x3d\x36\x46\xc6\x38\xa3\x8c\x98\xe4\x7a\x30\x7d\xb2\x70\x72\x0f\xa3\xc9\x84\x82\x35\x32\x47\xb9\x75\x2e\x39\x57\x8f\x42\xf0\x71\x44\xa2\x44\xcc\x92\x34\x5c\x4e\xbc\x87\x10\xf3\xe8\xc2\xb6\x43\x5f\x3f\x82\x05\xe7\x30\xaa\x17\xe5\xa8\x3c\xf7\x3f\x98\x35\xe9\xae\xf4\x4a\x4f\x13\xa4\x22\xd5\x55\x30\x9a\xce\x47\x51\xe2\xba\xb7\x29\xd2\x29\x65\xdc\x9d\xd5\x40\xa7\x7d\xbe\xa8\x82\xc5\x82\x26\xb0\x96\x82\x84\x3f\x80\xb0\xb0\x2b\x6a\xab\xbb\x84\x11\x8c\x69\x16\x8d\x19\x7b\x92\xbd\xaa\x2f\x2c\x6e\x4f\xd3\x89\x80\x85\x7d\xa8\x12\xb5\x72\x78\x75\x7a\xbf\x2a\xb4\x2a\xbd\x05\xbf\x32\xd9\x25\xf5\xd8\x1d\x07\x71\x2c\xf0\x2b\xef\x70\xf8\x88\x66\x81\x5e\xba\x79\xf4\xab\xf0\x2c\x08\x77\x75\xb3\x20\xef\xb1\xff\x25\xa1\x81\xef\x5f\xcf\xa5\x1d\xc6\xb7\x32\x04\xf5\xeb\x4c\x2b\x51\xe3\x77\xcd\xe4\x5b\xb8\x62\x55\xac\xef\xed\x81\x74\x31\x89\x12\xeb\xa1\x52\x1d\x12\xb4\xcb\x22\x51\x95\xb8\x5e\xb6\x95\x06\x3c\x77\x3f\x7f\x5e\x7e\xf4\xe7\x1a\x5f\x57\x43\xd3\x60\x99\x19\xb5\x57\x0d\x7a\x1d\x46\xad\xdf\xff\x77\xc9\x33\xd2\x6e\x93\x61\x33\x6b\x2c\x84\x32\xaf\x4d\xd6\x0a\x78\x63\xbc\x9f\x2b\x27\x94\xcc\xe8\x7b\xeb\xa5\xf5\x17\x7e\x9c\xc9\xbd\x47\x5e\x09\x07\x98\xe1\x07\x73\x4c\x64\x40\xe2\x95\x58\xd4\x8d\x79\x51\x08\x4e\x95\x6c\xfc\xf9\x9c\x33\xa9\xe5\xb5\x4b\xf8\x95\x1f\x29\xa1\x3b\x31\x61\x9d\xd5\x51\x67\x6c\x6b\x25\xb8\x43\x9b\x92\x1f\x79\x32\x21\x90\x37\xf0\x0d\xb0\x48\xe7\x8b\xe2\x12\xab\x04\x1b\x6c\xa2\xb5\xab\xd0\xa4\x47\xc4\x9e\x86\x20\x7d\xac\x80\x1b\xe9\x6e\xaa\xd4\xd1\x94\x17\x13\x95\x03\x11\x55\xd6\x8d\xc1\xb8\x58\xd9\xf0\x88\x05\x37\x19\x87\x7e\x89\x57\xee\x1c\xea\x75\x94\x17\xce\xb3\xbf\x13\x63\x34\xa7\x1e\x8f\x50\x95\xa3\xd7\x35\xbb\xdb\x8b\x7a\x14\x24\xaf\xe9\x97\x8b\x90\x9b\xb5\x8a\x47\x70\x4a\x15\x59\xa4\x05\x7a\xe8\xca\x0b\x4b\xe1\x88\x3b\x1d\x22\xc6\xc3\x3e\xf5\x7e\x50\x80\x9a\x6f\x8a\x8c\xbd\x4d\xad\x47\xbe\x7d\x95\x2c\x48\xfb\xf6\xcb\xf6\x14\x62\x36\x4f\xf6\x70\x8f\x35\x2c\x1e\xc6\xc6\x9e\xab\xe8\x17\x4f\xb5\xdc\xe7\x59\x1c\x52\x8b\x40\x9d\x14\x3f\xb9\x55\x4f\xe6\x06\x03\x39\xdd\xf4\x8c\x66\x97\xc5\x0c\x1c\x91\xa0\x7a\x30\x76\x5c\xaf\x53\xd2\x1c\xcd\xc1\x8f\xf1\x4c\xd7\x7f\x43\xa1\x1c\x2f\xdd\x69\x13\xae\xd2\xf9\xba\x47\xda\x6d\xa9\x7c\xaf\x50\x52\xbc\xe3\xb3\x64\xe9\xf4\x94\xfa\xee\xfa\xb4\xb7\xd5\x28\xd0\xde\x57\xd4\xc9\xc1\x6d\x74\xb5\x52\x2e\x63\x20\x25\x5a\x39\x69\x63\xc6\xfe\xe7\xaa\x32\xf8\xf5\x58\xff\x3c\x45\xc9\x3b\xf8\xc3\xd2\xcd\xb1\x34\xae\x9c\x63\xbf\xa4\x76\x8e\xfd\x7e\x8a\xaa\x43\xfa\x39\xa7\xc6\x06\x1a\x3a\xe7\xee\x7d\x15\x15\x1d\x2b\xbc\x8a\x8e\x8e\xc3\xdb\x4a\x3a\x96\xba\xa2\x96\xce\x2c\x52\xa1\xa6\xe3\x2d\x56\x95\xbd\x89\xa2\x8e\xe1\xb6\x44\x51\xd7\xcc\x4b\xbe\xe8\x56\x03\x45\x5d\xa3\x50\x5e\x5f\xeb\x65\x9d\xe7\xf6\x6f\x15\xf2\xe0\xc5\x57\x21\x10\x59\xc2\x26\x11\x9e\xbe\x22\x91\xd8\x85\x2a\xc8\x44\xb6\x5b\x5d\xfe\x46\x3a\x5d\x2e\x49\x35\x79\x30\xe7\x69\xef\x6e\x9f\xca\xa9\x51\x36\xa0\xbb\xbb\x0f\x3d\x52\xf9\x78\xc7\xc3\x87\x91\x7f\xdb\x28\x6f\xee\xd8\x76\x4c\xb3\x22\x88\x12\xbf\x73\x5b\x07\x91\xfc\x36\xa9\x86\xa8\x39\x50\xdf\x4c\xaf\x26\x6b\x51\xc4\xca\xa8\x75\x05\x51\xd0\x6c\xce\x8e\xfc\xd1\x04\x6a\x36\xfb\x1d\x0a\x97\xb5\x64\x1a\x9d\xd1\x44\x9a\xb4\x98\x47\xea\x32\x5f\xb9\x96\xfd\x0b\x3f\x66\x6b\x73\x5b\xc0\x32\xaf\xdc\x69\xd7\x6f\x7c\x8b\x21\x9a\x2f\x11\xee\x99\xb6\x55\x78\x85\xe3\xf4\x8c\x66\xd9\x79\x16\x15\x05\x05\x73\x2f\xde\xab\x16\xd9\x80\xde\x37\xc6\xdd\x39\x68\xd9\x73\xfc\x8a\x1f\xac\x20\xf4\x51\x34\x4a\x04\x0a\x0b\xd7\xe9\xb0\xfd\xd0\xbe\x11\x32\x5d\xad\xa4\xd5\x9c\xd6\xda\x96\xe0\xcd\xe3\x3f\xc0\x8f\xc1\xc1\x00\x54\xe1\xc1\x9c\xad\x0a\x70\x79\x28\xb4\x59\x6c\xbc\x8c\x13\x50\x7e\xc7\x10\x47\x9f\x29\x09\x48\x1e\x25\xd3\x98\x2a\x27\x5c\x00\xd9\x37\xec\xa1\x81\x82\xb9\x8f\x19\xee\x93\x83\xb7\x76\x75\x45\x4e\xda\x27\x5b\xa7\xed\xd3\xae\x12\x06\x6b\x7c\x00\x88\xee\x99\x78\x67\x5f\xd8\xaf\x61\x89\xe8\xce\x6d\xa0\x38\x2a\xc0\x56\x61\xab\x47\x1e\x81\x31\xf6\x26\xf4\x65\x0b\x7b\xa1\xd1\x1d\x72\x04\x59\xe9\xa5\xa1\x27\xfd\x3a\x94\x9d\x16\xa4\x37\x87\x87\x12\x50\x37\x30\x18\x90\x20\x8e\xc9\x28\xc8\xa3\x31\x77\x7e\x00\x2f\x05\x76\xb6\x85\x02\x27\x4e\xd9\xc9\x58\xf6\xa6\x47\x76\xb6\xeb\x8c\x4e\xcc\x85\x2d\x38\x9a\x3c\x81\x4b\x5d\x24\xa1\x53\x10\x20\x21\x22\xd4\xc9\x69\x8b\xec\xfd\x00\xeb\x53\xa7\x3d\xe6\x89\x95\xca\xb4\x7d\x59\xdb\xaa\x1c\x60\x46\x4b\x7b\x56\xb1\xda\x71\xab\xa5\x34\xab\x7d\x7e\x19\xde\x60\x1c\xa2\xdb\xb5\xb6\x51\x54\xe4\xc1\x03\x82\xbf\x4f\xd0\x6f\xe4\xff\xed\x54\xee\xba\x2a\x2c\xc6\x60\x7a\xa3\xb9\x11\xcb\xb7\x6a\x6a\xe4\x2c\x98\x73\x23\x26\xcc\x9c\x1a\xe4\x6e\xed\x96\x33\x63\xf5\xab\x62\x62\x50\x9b\x5f\x7b\x5e\xee\x72\x62\x4c\xbf\x27\x9a\x91\xa2\x99\x80\xb3\x51\x0b\x6c\x11\xb6\x39\xd2\xf9\x21\xa9\x25\x8c\x15\xb6\xc4\x54\x6c\x3d\x56\x80\xdb\xa7\x27\x3b\x02\x54\xa6\x71\x10\x05\xb1\x75\x6a\x25\xe8\x6f\x77\x77\x00\xac\xde\x60\x7b\xc0\x63\x11\x43\xac\xdf\x13\x50\x63\x77\x34\x91\xd1\x84\x74\x50\x16\xe2\x90\x36\x3f\xbe\xe1\xc4\x02\xc3\xf6\xbd\x86\xd8\xaa\x98\x72\xb1\x49\xc8\x53\xb5\x6f\x9e\x61\xde\x7c\x53\xdd\x52\xc1\xf7\x9c\x09\x17\x9f\x2d\x63\xde\x8d\x8a\x4e\xcc\xca\xf1\x74\x6b\xd7\x6b\x8d\xe6\x59\x65\xf0\xa1\x88\xfc\xd2\xf9\x35\x5c\x28\x96\xee\xf6\xc2\x55\x51\x1c\xe4\x05\x39\x39\x65\xc2\x04\xaf\xf7\x46\xd3\xbe\xee\x9f\x77\x35\x07\x20\x67\x11\xc7\xc1\x12\x1c\x68\xf4\x33\x28\xf8\x54\x34\xd0\x84\x48\x2a\x8c\x63\xd1\x11\x46\x71\x60\xfb\xa6\x89\x8c\x2e\x49\x48\x27\xc1\x32\x06\x45\x68\xbe\x64\x72\xaa\xda\x98\x5b\xc2\x47\x4d\x4f\xc4\x78\xb4\x67\xd1\x38\x46\xdd\x80\x01\xeb\x1d\x71\x45\x51\xb8\xe1\xe9\xad\xd4\xa8\x5e\x3a\x6a\x97\x3a\x62\xb4\x44\x72\x7b\x8d\x00\xc5\x0b\x52\x3e\x69\x31\x8a\xef\x91\x16\x5b\x04\xec\xbf\xd3\xd6\xa9\xa6\x76\x01\x81\xd2\xa0\x50\xb2\x8c\xed\x67\x0f\x68\x36\x1b\xa1\xcd\xf6\x2e\x67\xf5\xb7\x66\x21\xb8\x1e\xaa\x9c\x95\xc0\xf7\x06\xe1\x29\x8f\xcf\x7a\x0e\x37\xbc\x6c\x38\xc6\x78\xd9\xbf\xb0\xea\x2d\x22\x16\xdc\xaa\xf3\xef\x13\x7e\x1a\xff\xf7\x69\xb7\x5e\x44\x10\xca\x5b\xe5\xea\xa1\xfc\xde\xc1\x8a\x61\x21\xa1\x9b\xb3\x0e\xf9\xf0\xd4\xbd\xcb\xb2\x70\xe6\xb9\xb4\x10\xf7\xe8\xf6\xc6\xe0\x75\x46\x6d\xde\xca\x08\x3f\xa8\xd2\x03\xaa\xcd\x16\x6a\x5c\xc1\x2a\xfb\x6f\x6c\x4c\xbc\x4b\x4a\xff\xfc\x5e\x51\x5d\xa7\xb2\x34\x9e\x60\x67\xb2\x82\x95\x39\x85\xd4\xb3\xe4\x93\x53\x9f\x07\xf1\xfe\x62\x99\xcf\x3a\x8e\x5b\x52\xf9\x4c\x5b\xfa\x18\x75\x6b\x66\x63\x71\x1d\xae\x9f\xf9\xbc\x7f\xe2\x96\x90\x13\xcf\xce\x59\x8f\x60\xe7\xb2\x96\x6f\xd2\x5b\x79\xf4\x15\x13\x88\x3d\xf9\xde\x7a\xfe\xa0\xeb\x8e\xd4\x21\x10\xff\xdb\xcf\x9f\xcf\x1d\x6b\x8d\x1b\xd6\xd2\x89\x60\xb3\x09\x7e\x52\x2b\xe6\x63\xe5\xd9\x58\x73\xee\x08\x2d\xdd\x91\xb1\x24\x91\x3b\xdb\x26\x0e\x41\xf9\xfd\xe8\x24\x4b\xe7\x5e\x73\x03\x0e\xe5\xe3\x2d\x23\xfb\xc1\x8e\x65\x20\x64\x58\x06\xad\xf0\x60\x4a\x32\x35\xde\x72\x03\x16\x25\x06\x82\x59\x94\xe1\x4c\xb3\x86\x55\x7d\x15\x5e\x05\x7b\x13\xbe\xb1\xe4\x82\xae\x78\xe2\x03\xdd\x93\x82\x8e\x40\xd7\x43\xb2\x0d\xc6\x0f\x5d\xe9\xce\x59\x20\xaf\x6c\x11\x55\xd6\x89\x9b\x77\x2a\xf6\xad\x28\x28\xf0\xa1\xe0\x77\xec\xb8\xf4\x06\xd9\xe1\x1e\xef\xf9\x6e\x9b\x33\x90\x9c\x04\x93\x82\x66\x6a\x91\xe0\xfe\xde\x68\xad\xfa\xcb\xf8\x1c\x77\x6b\xce\x51\xe2\xb0\x9b\x54\x62\x4f\xc4\x8d\x79\x5b\x56\x3f\x76\xea\x51\xea\x43\xda\x0e\x78\x53\xc9\x68\x1a\x72\x1a\xf2\xb0\xba\x6f\x0c\x76\x63\xaf\x1a\xa6\x11\xa3\x32\xbd\xcd\xa2\x69\xdf\x20\xd1\xdd\x72\xad\x3f\xc4\x1e\x82\xff\x1a\x52\xbf\x34\x48\x6d\xf8\xf7\x87\x22\xfe\x7b\xda\x47\x7f\xbf\x0b\xed\x13\x2f\xe9\xe3\xe8\x8c\x37\x25\x7d\x3b\x86\xd8\x8a\x9b\x8a\x43\xac\x76\xfd\xcd\x76\x16\xb3\x17\xab\xd4\x2f\xe6\xcf\x4b\x6f\xb1\x43\x5f\xfe\xf5\x57\xbe\x84\x17\xe2\xd6\xcf\x35\x52\xad\xeb\x7e\x87\x6c\x91\x0d\xb3\x77\x5d\xee\x90\x89\x87\x11\xf3\x4c\x3d\x77\x3f\x6c\x5d\xba\x19\x0f\xb6\x2b\x9c\xd9\x1b\xb8\xb6\x2c\xbe\x0c\x2e\xb6\xb6\xe2\xd8\xf0\x9c\xab\x95\xb5\xdd\x35\xd5\xaa\xde\x8b\x44\xab\xeb\xb5\x17\xbc\xe5\x57\xbb\xea\x4d\xdc\xf5\x69\x6f\xeb\xf7\x8e\xbb\x7f\x5c\xff\xec\x6d\x59\xf1\xee\x4d\x78\x22\x81\xff\xb9\xad\xcb\x52\x3f\x7d\x5b\xa2\xb7\x6f\x4b\xfc\x60\x6d\xe9\x79\xfd\xb6\x54\xcf\xdf\x96\xe8\xfd\xdb\x12\x3d\x80\x5b\x9a\x2f\xe0\x9c\x1a\x1b\x58\xd8\x38\xfe\x51\xbe\xe2\x23\xb8\x63\xef\x2b\xb8\xe3\xd5\x9f\xc1\x1d\x37\x7d\x07\x77\xec\x3e\x84\x3b\xbe\x83\x97\x70\xcb\x5b\x3f\x85\x3b\x6e\xfc\x16\xee\xf7\x0e\xea\x7f\xdc\xc0\xe2\x6c\x59\x65\x72\x26\x5d\xab\xf0\x1f\x82\x38\x91\xd5\xd9\x12\x9b\x9d\x2d\x0d\x2b\xb1\xa5\xcf\xf0\x6c\xa9\x2d\xcf\x96\xd8\xf4\x6c\x89\x6d\xcf\x96\x96\xf1\x99\xa7\xde\x26\x8b\xe3\x37\xb5\x3f\x3b\xf6\x1b\xa0\x1d\xdf\xc0\x02\xed\xb8\xb1\x09\xda\xb1\xc7\x06\xcd\x2e\x7d\xb3\x35\x52\x61\x86\xd6\x74\x91\x34\x37\x44\xfb\xb6\xc9\x2a\x69\x2f\x73\x0a\x8a\xd9\x71\xd1\xe6\xd1\xf8\xa6\x29\xa1\xc9\x19\x09\x53\x0a\xd6\x0a\xf0\x3a\x30\x48\x42\x70\x60\x4b\xfe\xf9\xe6\xf5\xab\xa2\x58\xbc\xa7\xff\x6f\x49\xf3\x62\x0d\x04\xb3\xcb\x05\x4d\x27\x56\x0e\xf7\x63\xa3\xde\x6f\xb4\x25\x5e\x44\xc3\x7d\x1b\x9a\x7c\xb9\xde\x5d\x33\x22\x45\x96\x42\x9a\x09\x20\xa9\xff\x92\xcf\xd8\xee\x13\x4d\x93\x34\xa3\xc3\x38\x4a\xe8\xda\x35\xb7\x58\x65\x78\x68\xe4\xea\xfe\xfe\xe5\xec\xfd\xcb\xd9\x3f\xf1\xcb\x59\xfe\x6a\x56\xd8\xb0\x19\xcf\x66\xf9\x86\x43\x6e\xf6\x7a\x56\xec\x7d\xc7\x45\x14\x43\x9d\x5c\x9f\x09\x6b\x87\x3f\x4f\x72\xc0\xa2\xe2\x52\xb1\x44\x5d\x64\x1c\x07\x79\x4e\x4e\xa0\xc8\xa9\xe8\x26\xcf\xd0\x4c\x98\x57\xb5\x36\x80\x7b\x23\x58\xa5\x42\xb9\xca\x38\x08\xa9\xf0\x64\xdd\xdc\xc9\x39\x40\xb2\x9a\x8e\xdf\x1e\x7e\xfc\xc0\xce\xd6\x30\x09\xed\x73\x1a\xb5\x39\x69\xb6\x3f\xa3\xdf\x6f\xd0\xef\x9f\xd0\xef\xfc\xd7\x60\x94\xca\x8f\x49\x94\x24\xf4\x52\x7d\xd1\x79\x91\xc2\x53\x46\x99\xb2\x88\xc6\x66\x42\x12\x24\x66\xc2\x3c\x1a\x67\x76\x4a\x1c\x47\x4e\x21\x03\xde\x00\x95\x1f\x46\x91\x69\x16\x24\xa1\x1a\x8a\x91\xf5\x93\xf1\xf5\xd1\xf8\x7a\x67\x7c\xbd\x34\xbe\xfe\xcf\xf8\xfa\x97\xf1\xf5\xd6\xf8\x7a\x61\x7c\xfd\xc3\xf8\x3a\xe6\x5f\x6b\xa7\xe5\xae\x6b\xd8\x1c\xbd\xdb\x7f\xc1\xa6\x78\x48\x76\xb6\x7b\x2a\xf1\xc3\xe1\x4f\x6f\xf7\x3f\x1e\xbf\x7f\xf9\xe9\xf5\xcb\xb7\x3f\x7d\x7c\x35\x24\x8f\x75\x26\xcc\xea\x50\xff\xd4\x39\x25\x94\x33\x24\x5f\x88\x95\xa0\x9d\xa8\x43\xc6\xa7\x17\x47\x3f\xbf\x25\xd7\xba\xa6\x77\x47\xaf\x5f\x33\xe8\x8f\x87\x6f\x5e\x1e\x1d\x7f\x1c\x92\xad\xcd\xcd\xcd\x81\xe8\xa1\xb8\xf1\x7e\x1e\xa7\xe3\xcf\x43\xd2\x66\xac\x33\x2f\xda\x46\xde\xfe\x18\xe2\x18\x0f\xf5\xdb\x46\xfe\x00\x83\xed\xe7\x75\xbe\x4f\xee\xe3\x60\xdc\x6f\x64\x7f\xf5\x8d\x6c\x4d\xb9\x80\xc8\x67\xc1\xce\x5d\x79\x80\x38\xc8\x2e\x17\x45\xfa\xf7\x0f\x78\x73\x18\x43\xda\x23\x1d\xfe\x82\x35\xe8\x05\x18\xb0\x9c\xb6\x37\xb4\x93\xeb\xbe\x01\x28\x2e\xc7\x0f\x54\x45\x12\x79\xf0\x40\xe6\xf6\xa5\xbf\x08\x2e\x26\xcf\xe8\x45\xdb\x7e\x45\x67\x78\xfe\xfa\x81\x6c\xb3\xd2\xb6\xeb\xe3\x6d\xe9\x2e\xd2\x2c\x4e\xe4\x65\xb8\xba\xe0\xb7\x9c\xb3\x13\xeb\xb5\x1d\x07\x95\x38\x62\x9d\xeb\xbf\xa2\x17\x7d\xd0\x5e\x0a\xcf\xbd\x3e\x1b\x23\x86\x15\x39\x6c\xdd\x3a\x3f\xd1\x71\xf5\xdb\x90\x6c\x7f\xf3\x84\x97\x44\x8f\x93\xe5\x9b\x33\xc6\xf2\x14\x8e\x5b\xc3\x6f\xbe\xeb\xb5\x4c\x94\xb7\x86\x4f\x37\xaf\x4f\x7b\xdb\x8d\x7c\x3e\xdd\xf3\xbd\x7b\xbe\xf7\xe7\xe5\x7b\x9a\xed\xf1\x77\xfe\x77\xc0\xf7\x2c\xd9\x7d\x75\xd1\xdd\x23\xb9\xcb\x82\x3e\xc1\x7d\xa5\x50\x43\x36\xaf\xed\x0f\x04\xbb\xd7\xb1\x88\x26\x4f\x31\x00\xfb\x56\x22\xfc\x32\x89\x8a\x37\xc1\x42\x89\x8b\x6d\x29\x51\x0f\x39\x0f\x6a\x6f\x4a\x59\x93\x49\xed\x43\xcd\x16\xdb\x5b\x86\x9c\x3f\x44\x19\x9b\x9b\xaa\xd0\xff\x56\xe4\x8d\x82\xd1\x28\x98\x52\xd5\x12\xce\x43\xc2\xff\xd0\xce\x9b\x7b\xea\x44\xd9\x6f\xaa\xb3\xe3\xf4\x8c\xc6\xc1\x58\x36\x6b\x67\xeb\x33\xc6\xd0\x97\x3d\xf5\x57\x8e\x20\x7e\xaa\x85\xc8\x67\x41\x92\xa4\x89\x31\x6e\x13\x42\x9f\x6b\x86\x15\x10\x35\xad\xc0\xc9\x6a\xe8\x81\xc0\xa8\xd4\xe7\xa5\x61\x35\x50\x5d\x4d\xe2\xec\x36\xf4\x02\x19\x95\xa9\xf3\x98\x3d\x36\x0f\xa0\x7f\x88\x26\xa0\x41\xae\x1e\x38\x04\xfa\xd9\x84\xf5\x81\xe2\xb9\x86\x53\x5f\x65\xc5\xb8\xbf\x8d\xea\xc6\xd5\x37\x2d\x80\xca\x14\x2b\x94\x61\xc5\xfc\xc6\x56\xda\x11\xc3\x22\x08\x85\x29\x29\x98\x7a\x5e\x2c\xe8\x98\x6d\x5e\xca\x3c\x1f\x1b\x5d\x09\xef\x29\x3e\xcb\x29\x5d\xc5\x88\x32\xb8\x50\x84\xe3\xb2\x6c\xb0\xc6\xb3\x20\x0b\xc6\x05\xcd\x72\xa9\xe2\x87\x7b\x79\x51\x1a\xed\x23\xde\x36\xa2\x69\xd2\x43\xb6\xd0\x64\x73\xcd\xef\xf6\x23\x9a\xce\x0a\x22\x3d\xd2\x5a\xde\x7d\xc5\x18\x0c\x69\x93\x83\xf4\xa0\x77\x79\x0f\xda\xf1\xf8\x18\xe2\x16\x22\x00\x03\x11\x69\xe1\xb5\xaa\xba\x21\xde\xea\xf6\x7f\x49\xa3\x04\x82\x35\x90\x67\x50\x07\x19\x92\xd6\x66\xab\x4b\x36\x04\x70\x89\xe1\xdb\x8d\xe7\x02\xa2\xf5\xfc\xd9\x27\x03\x06\xb1\xe2\x6c\x88\x1e\x6e\x70\x8f\xcb\x37\x9d\x97\x32\x43\x44\xd3\x11\x0d\x6c\x9d\x60\x86\x08\x91\x3c\x5c\x1f\xd3\xd6\xbc\x70\x6f\xcd\x15\xb3\x12\x25\xac\x12\x3f\xb2\xb0\x3f\x6a\x8f\xa3\x24\xd6\xb8\x36\x3b\xe4\x1e\x48\x8e\xf9\xd6\xae\x44\xfa\x19\x0f\xf6\x3c\x18\x90\x1f\xa3\x24\x24\xfc\x71\x97\xe8\xa8\x0a\xd6\xcc\x24\x8a\x56\x4b\xdf\xe4\x83\xed\x4b\x0f\xe2\x47\xcd\xe8\x85\x34\x61\x56\x67\x2e\x96\xc6\x4f\x3d\xec\xc4\x51\x7e\x56\x62\xd5\x6c\xe3\x77\x2f\x60\x5c\x23\x6c\x6a\x76\x49\xb4\xb1\xb7\x8d\xc1\x65\x20\x64\x6c\xdb\xa1\x9b\xea\x44\xac\x1d\x11\xfa\x42\xb5\x30\x21\x1d\x5e\x64\x6f\x8f\x6c\x76\x8d\x53\xda\x28\xa3\xc1\x67\x0d\xca\x46\xb9\xb1\x47\xc4\xab\x72\x36\x83\x07\xb3\x20\x3b\x48\x43\x0a\x35\x78\x0f\x61\x6c\xb2\xa5\x39\x4e\x5e\x64\xcd\x28\x84\x4f\xda\x4a\x24\xb2\xcf\x8a\xfc\x76\x34\x02\xcd\xfd\xf7\x10\xc9\x4d\x66\x3e\x2f\xca\x5e\xa7\x9b\x93\xed\xf1\x31\xdf\x59\x64\x74\x12\x5d\xf0\x08\x5a\x9b\x17\x5d\x36\x0b\xc0\x35\xfc\xee\xed\x45\xa8\xb7\xf2\xd9\xf7\xda\x2e\xc3\x11\x34\x88\x81\x9b\x57\x06\x13\xf0\x85\xf8\x34\x7c\xed\x0b\xb7\xeb\xa2\x1b\x98\x2a\x18\xc5\x0b\xcc\xf3\xd9\x87\xe5\x20\xcc\xb6\xf9\x72\x90\x33\xc2\x5a\xd2\xd4\x31\x49\x33\xdb\x84\x2e\x2f\xb2\xb2\x70\xf8\x68\x46\x19\xd4\x58\xcc\xcd\x7e\xd1\x89\x6e\xb6\xd2\xc1\x3a\x51\x44\x06\x37\xbc\xb6\x69\x10\xd6\xdf\x8d\x3d\x92\xc8\x7d\xe1\x7b\xb2\x4d\x9e\xb1\x93\x0d\xd9\x20\x6c\x3f\x48\x7c\x34\x21\x5c\xc8\xcf\xe8\xc5\x5d\x92\x86\x15\x73\xc0\xa6\x8d\x1a\xd6\xf0\x9b\x11\x87\xc3\x33\x10\x75\xfc\x36\x14\xf0\xbb\x4d\xab\xe5\xb1\x74\xb2\x8c\x63\x85\x86\x01\x3d\xa3\x49\xc1\x1f\x0a\x00\xcb\xff\x25\x4f\x13\x12\x8c\x22\x9b\xc7\x4b\xb7\x89\x1f\xd3\x1f\x97\x71\x6c\xbf\xa1\x94\x8f\x09\x58\xe9\x47\xbc\xb4\xfb\x18\x8a\x37\xec\xb4\xab\x19\xbb\xdb\x86\x21\x48\xb1\xca\xb1\xea\x94\x7d\xf7\xc1\x84\x22\x4a\x42\x7a\x71\x34\xe9\xb4\x3b\xed\x2e\xf8\x86\x7c\xb4\xe5\x79\x0e\xa9\xe0\x1d\x3b\xc1\xe2\x72\x41\x45\x73\x00\x04\x54\x64\xfa\x33\xeb\x44\xdd\x2f\x32\x7e\x70\x9f\xc1\xef\x92\x6b\x21\x8a\x99\x96\x7f\xaa\x15\xb2\x41\xda\x1d\x36\x73\xaa\xf6\x0d\xd2\xee\xb6\x1b\xad\xbd\x30\xca\x17\x71\x70\xc9\xe7\x05\x7c\x8c\x26\x05\x93\x6d\x15\x36\xec\x37\x6b\x17\x90\xfd\x82\x17\xab\x7a\xe1\xca\x6a\x33\x27\xdf\xbf\xbc\x8c\x1e\xb0\x2d\xcd\xa2\x18\x3a\xed\xcb\x60\x8b\x97\x1d\x61\x56\xd7\x25\x8f\x7e\x50\x89\x6a\x5a\xdd\xbe\x55\x3e\x7c\x56\x36\x9b\xce\xcc\x1a\x68\x16\x60\x7c\xb2\xc9\x33\xfb\x4d\xab\x78\x0f\xc6\xd6\x8c\x76\x36\x32\x18\xe8\x81\xa6\x67\x34\x8b\xd3\x20\xa4\xa1\x52\x04\x7b\xd6\x04\x1e\xc0\x47\x4d\x24\x65\x6f\x1a\x07\xe4\xe3\xd1\x8b\xa3\x21\x99\x07\x9f\x41\x35\x1c\x25\x67\xcb\x38\xa1\x59\x30\x8a\xe9\x5d\x0e\x50\x9f\x06\xec\xd7\xbb\x5b\xe4\x11\x41\xd9\xdd\x6e\x3f\xa3\x8b\x38\x18\xd3\x4e\x9b\xb4\xc1\xa9\x1b\x3b\x2d\xb4\xcc\x08\x91\x69\x72\x46\xb3\x22\xd7\xf1\x36\x41\xee\x0b\xe9\x38\x9a\x07\xb1\xcd\x64\xa3\xc4\xcf\xec\x8b\xf4\x05\x2f\xe0\x52\x5e\x65\xec\x4c\xd3\xad\x21\x17\xf0\x44\x4d\xb5\xd1\x1f\x8b\xd4\x0d\x8e\xa9\xc2\xcf\x34\x19\x63\xad\x6c\xcb\x78\xe2\x5d\x8d\x0b\xd5\x55\x1d\x99\x35\x91\x5a\x52\x77\x7c\x9e\xb8\xdc\x42\x7d\x6a\xee\x28\xc6\x61\x9f\x03\xc4\x34\xcf\x3f\xce\x82\xa4\xb3\x09\x4e\x64\x1f\x71\xab\x73\x61\xbd\x2f\x08\x6b\xab\x0b\xb1\x5b\x51\x8e\x81\xc5\xfd\x25\xb8\x69\x16\xa8\x0c\x92\x4b\xe1\x78\x47\xb8\x23\x4d\xca\xd1\xda\x17\x78\xdd\x4f\x42\xae\xfe\xe7\x34\x14\x4d\x2e\x73\xe1\x48\x3d\x27\x23\x3a\x49\x33\xda\x77\xe8\xea\x95\x38\x3a\x54\xe3\xfe\x4a\xec\x41\x35\xa4\xf5\x0a\xf6\x79\x03\xf9\x6a\xfd\x3e\x14\xa6\x62\xf3\xe0\x82\x87\xad\xbc\x88\x8a\xcb\x21\x79\x0a\x2a\x6c\xb9\xeb\x44\xb9\x70\x69\x0c\x45\xbb\xf6\x26\x83\x26\xb9\xb3\xc1\x20\x76\x8d\xa2\x78\x3a\xab\x0b\x5b\x65\x85\x21\xdd\x19\xa3\x1d\x76\x0a\xe1\x48\x6b\x7b\xab\x80\xf8\x4a\x7f\xff\x70\xf4\xb6\xaf\xb0\xcc\xdb\xd3\x0e\x2c\xc1\x75\x6c\x4e\x02\x3b\x94\x67\x8f\x2c\x82\x3c\x67\xbc\xab\x98\x65\xe9\x72\x3a\x33\x57\x80\x1a\x88\xa0\x35\xa8\xd5\xbd\x9c\xd4\x5c\xed\x11\x9c\x96\x3c\x32\x6f\xe9\x88\x25\x80\x78\xdb\x61\x56\x57\x53\xdb\x99\xb4\x1f\x45\x15\x90\xce\x7a\x94\xff\x18\x25\x51\x41\x2d\xa4\x5b\xdd\x00\x09\x11\x75\xc2\x94\xb2\xdc\x8e\xa2\x75\xf1\x5e\x6c\x2a\x7c\x1d\xb0\xf3\x52\x02\xdc\x9f\xfc\x4c\x6d\x41\x6a\x4a\x0b\x08\x57\x7c\x34\x39\x4e\x22\xaf\xb6\x0b\xca\x16\x33\x2a\x7e\xa8\x05\x47\x8a\xb4\xa7\xb4\x53\xca\x21\xba\x37\x6a\xa3\xea\x87\xaa\xa6\xc3\x3b\xd3\x85\x22\xe0\xb6\x2b\x27\x34\xcb\xd2\x4c\xba\xa4\xe1\x3d\xce\x49\x92\x16\x64\x9c\x66\x19\x1d\x17\xc3\x73\xb5\x6e\xcc\x5e\x1b\x0b\x88\x15\x94\x24\xb0\xe4\x99\xf0\xdf\x33\xf8\xaf\x5f\xa4\xaf\xd3\x73\x9a\x1d\x04\x39\xed\x00\x73\xe1\xfa\x5e\xcd\xc7\x18\xd4\x3f\xc4\x2d\xb3\xb8\xba\x39\x61\xff\x9f\xea\xa3\x38\x02\xc1\x7e\xbf\x31\xe1\x71\x4f\x64\x09\x3d\x27\x2f\xd9\xa8\x3a\x6d\xb8\xea\x85\x8e\x80\xad\xea\xbf\xdb\x05\xa1\x17\x51\x5e\xe4\x3d\xb2\x88\x69\x90\x83\x58\x0c\x23\x4f\x13\x85\xaa\x49\x1a\xc7\xe9\x79\x94\x4c\xa1\x64\xce\xb8\xa0\xb5\x8c\x44\x0f\x7b\xe0\x5f\xa1\xa7\x9f\x7d\x54\x44\x89\x55\xbd\x07\xef\x57\xa6\x57\xe1\xe0\x33\x85\x45\xc8\x19\x3e\x5c\x46\x47\x60\x4f\xab\x98\x2c\x27\x01\xc6\x6a\xc1\x57\x05\x9f\x78\x8e\x5a\x41\x59\xef\xd2\x3c\x8f\x46\x31\x9f\x42\x70\xa1\x21\x8c\xfa\x3e\x1c\x32\xf9\x32\x2b\xf8\x4f\x26\x52\x4b\x6c\xbd\x9c\x4c\xa2\xe9\xa5\xf8\x38\x92\xa4\xf4\x88\x7c\x66\xcd\xf3\x3f\x7d\x5d\x05\x9f\xe2\x66\x8b\x83\xcd\x35\x98\xba\x5c\xe2\x9f\xf2\x2a\x8a\xc3\x4d\x35\x9c\xba\xff\xe1\x9f\xe2\xc2\x48\xe7\xf1\x02\x8f\x1e\xa9\x85\xa9\xef\x71\x78\x81\x5f\x83\x51\x6a\xe4\x79\x4a\xc8\x7b\x18\x3e\x00\xb8\xbe\xc1\x79\xbc\x04\xea\x05\x2a\xcc\x3f\x05\x16\x10\x08\xb1\x20\xd0\x07\x5c\xa6\x08\x84\x50\x8d\xc3\x29\xfa\x5d\xc8\xdf\xb6\x48\xc1\xf9\x82\x75\xf2\xbd\x52\x72\x3a\x27\x87\x71\x90\xb0\x93\x41\xa0\x58\xb3\x48\x17\xba\xb2\x34\x23\x01\x79\xf5\xf2\x9f\x70\x08\x97\xd2\xda\x9d\x31\x14\xb5\xcf\xca\xa3\xdd\xcf\x33\x2a\xfd\xec\x05\xe8\x2a\x57\x44\x41\x41\xc1\x02\xd8\x7a\x0a\x72\x72\x4e\xd9\x02\xd1\x0e\x56\xe4\x30\xd6\x90\x34\xf4\x33\x35\x8e\xe4\x72\x9c\x98\xa5\x70\x51\x87\xd5\x2c\x99\x04\x16\x8a\x78\x09\x1c\x35\xd6\xe4\x54\x9c\x3b\x59\xf2\x10\xde\x86\x45\x05\xe4\x99\xd1\xc8\x10\x7f\x21\xc9\xaa\x76\xf9\x06\x1c\xc7\x9e\x15\x7c\x4e\xa3\xfb\x05\xfb\xdf\xb2\xc4\x8b\xb4\x6a\x81\xa3\xf3\xc2\x6f\xb6\xd4\xd9\x6a\xfb\x1d\x17\x3b\x20\xe4\x6e\x96\x7a\x11\xcd\x69\xfe\x7b\x2c\xf3\x44\x28\x17\xd9\xe2\x56\xaa\xaa\x9c\x1f\xf3\xd9\x16\x4d\x94\x29\x8b\x43\x0d\xaa\x23\x8d\x68\x42\x53\x81\xbc\x3a\x64\x53\xaf\x49\xc1\xac\x4d\x39\xb9\xd2\x15\x68\x00\x85\x7e\x6c\x7b\x63\x4d\x42\xcd\xf1\xe7\x1b\x26\x03\xc2\xaa\x97\xe5\xc5\x8f\xab\x2b\xb2\xb9\xeb\x3d\xdb\x88\x7a\x9d\xb3\x09\x4f\x37\x0e\x44\x02\xe5\xb2\x27\x0f\x1e\x10\xf1\xdb\x27\xf3\xb3\x26\xed\x5c\x7c\xc0\xf0\xb9\x40\x33\x44\x31\x51\x58\xa9\x44\x36\x2f\xda\xbd\x76\x1b\xdf\xb7\x58\x8e\xd2\x7c\xa5\x31\x9d\x94\x8a\x74\x89\x0c\x1d\xeb\xa1\x14\x45\x27\x1c\x4c\x06\xf1\x50\x27\x31\x61\x35\x09\xb0\xc5\x79\xda\xce\xc9\x58\x85\x74\x71\x48\xcb\x8c\xf8\xd2\x84\xbe\x4a\xa8\x06\x9d\x91\xcd\x3a\x4d\x7d\x97\x41\x32\x0c\x7c\x84\x28\xcb\xb7\x5e\xe1\xc5\x77\x07\x39\xad\x53\x05\xb0\x46\xa2\x76\xea\x5a\x93\x5b\xfe\xb5\x60\x96\xfb\x8b\x78\x99\xeb\x2e\x88\x6f\xaf\x77\x43\x05\x64\x2a\x92\x66\x74\xfc\x39\x97\xa7\x26\xce\x22\xe5\x2d\x67\x2e\xde\xca\xc5\x97\xe0\xc6\xd7\x1b\x8c\x98\x93\xfc\xd8\x1b\x88\xd8\x0c\x29\x8c\x1a\x60\xeb\x3f\x40\x05\xb0\x63\x3b\x08\xae\x24\xa6\xce\xaa\xdc\x98\x39\x51\xde\xd2\xa0\x0d\xfe\xb3\x79\x71\xb2\xf9\xe8\xbb\xe0\xd1\xe4\xf4\xcb\xe3\xcd\xeb\xff\x19\x44\xfd\x82\xe6\x85\x02\x5f\x61\xf0\x15\x63\xfe\x4a\xa3\x6d\x30\x4e\x50\x00\x0c\xfe\xd3\xd9\xbc\xe8\x3e\xab\x1c\x28\xa6\xc0\xc1\x40\x07\xcb\xe2\xe1\xb0\xa0\x7b\xdc\x85\xb0\xb0\x3a\x9c\xc3\x43\x5e\xb6\x21\xa3\x61\x9b\x14\x2c\x3c\x01\x12\xd3\x57\x85\xb7\x33\x66\x5f\x18\xa3\x43\x60\xfb\x8f\x7e\xf4\x82\x59\x5d\x86\xd8\x5d\xed\x1c\xbc\x1d\xe7\x73\xf6\xef\x38\x58\xe4\x20\x3c\x88\xdf\x3d\xec\x9e\xd1\xee\x2d\xf7\x3a\x8f\x3a\x6b\x54\x7e\xa4\xf6\x76\x8e\x19\x1a\x8c\x67\x64\x1c\xe4\x4e\x35\x51\xce\xa9\x64\x39\x17\xb3\x83\x48\x89\xaf\xb1\xe6\x04\xc5\xdb\xca\x97\xf3\x39\x0d\x4b\x69\xcb\x6a\xee\xae\x69\xcc\xaa\xbe\x8a\xd6\x06\x03\x3e\x20\x0b\x39\x81\x2a\x29\x7e\x39\x1b\x90\xd6\x86\x08\x88\x57\x41\x0e\xae\x68\x66\xc1\x8e\x6c\xc4\xd4\xa4\x48\x59\xc7\xe7\xee\xe5\xf1\x26\xdc\x50\x12\x8b\x3c\xc0\x75\x77\x31\x23\x31\x85\xc7\xd4\x28\xfe\xde\x62\x41\x33\xd6\x5b\x39\x0f\x09\xc4\x2e\x9c\x46\x3c\xbc\x5d\x90\xd3\x79\xb0\x60\xf3\xb1\x65\xe8\xf9\x3a\xca\x7e\x01\x75\x1a\x9c\xb2\x6d\x3d\xe9\x92\x1f\xc8\xb7\x6c\x37\x17\x59\x27\xd1\x69\xbf\x48\x8f\x59\x43\x42\x13\xb4\xbe\xb7\x87\x32\x81\xe2\xab\x2b\xfc\x7e\xcf\x53\x23\xd6\x2d\x59\x35\x96\x78\x0a\x47\x6b\x52\x73\x7c\x83\xef\xeb\xe8\x0b\x8a\x4c\xdf\x88\x83\x9e\x24\xc7\x12\x5a\x2c\xd2\x3b\xa5\x45\xa9\xbc\x56\xfb\xf2\x0a\xa4\x88\x54\xc6\x8a\xfc\xec\x47\xd7\xa2\x9d\x76\x5b\xd0\x92\x4b\xa7\x06\x82\x6f\x44\xb5\x08\x68\xec\xf4\x9e\x55\x54\x41\xc7\xb2\x17\xe8\xd6\xdd\xa6\x69\x60\x79\x33\x6d\xf9\xc7\xa8\xf4\x3b\x76\xee\x99\x70\xff\xf9\xf2\x22\x4e\x91\xb8\x41\xc1\x75\x04\x6c\x92\x90\xdd\xff\x8d\xbd\x52\xea\x46\xf4\x65\xb3\xd2\xda\x9a\x2a\x69\xd3\x2a\x69\x4a\x9e\x5a\xd2\x34\x18\x69\x91\x32\x89\x32\x0a\xc9\xf6\x26\x77\x19\xf4\x48\xdc\x0f\xf2\x36\xf9\xf3\x84\xcd\x0b\xc2\x6d\x3b\x5c\xdb\xae\x5a\x52\xf6\x5f\xf6\x0b\xe7\x03\x98\x6f\x2b\xfb\xad\x66\xf4\x6b\x49\x33\xde\x6d\x4f\xfa\xd4\x95\xf8\x40\x32\x3c\xdf\x6b\xab\xb6\x59\x4f\x45\xe2\xee\xcb\x57\x9f\x09\x21\x23\x2f\xc2\x8d\x92\xaa\x51\x3f\xa6\xea\x91\xc7\x9b\xfe\x4b\x02\xe9\x87\x58\x1e\xa6\x73\x2d\xe5\xd6\xc7\xd8\xf4\x9e\x24\x7d\x37\x5f\x46\xdc\x4d\xbe\x93\xf9\xce\x80\xa4\xc3\xbb\x61\x89\x85\xb2\x6f\x49\x5e\x04\xc9\x98\x71\x11\x5d\xf8\xea\x4a\x21\x4d\x14\x86\xc7\x6b\xf0\xcb\xf0\x9b\xe1\x4d\xe5\xa6\x11\xc0\x8b\x54\x95\xed\xa6\x88\x92\xe7\xe1\x3a\x2c\x7d\x70\x6c\x8b\x1a\xa2\xc8\x13\x21\xc9\x8b\x1f\xc1\x5a\x45\xcf\x60\x34\xbc\x6f\xed\xbb\x43\x0f\xef\x4b\x63\xdc\xc8\x1e\xd7\x63\xe7\x47\x6d\x43\xb2\x2a\x7e\x64\xd1\x1b\x61\x48\x96\x68\x37\x1c\x11\xeb\x53\x51\x3f\x1c\xde\xf5\x1b\x0c\xe6\x48\xf4\xad\xe1\x62\x60\xf2\x45\xb2\x8c\x63\x08\x92\xd0\x71\x57\x08\xd8\x6d\x83\x0a\xc3\x33\x76\x71\x5d\xdb\x70\xe4\x23\xde\xd9\x06\xec\x80\x03\xde\x84\x19\xf0\xa4\x1b\x4d\xa4\xe8\x5e\xd3\xd1\x80\x07\xc0\xfa\xb1\x38\x01\x35\x1a\x8e\xc4\x0d\x8a\xd1\x90\xa5\x41\xc1\xca\x31\xd8\x07\x12\xbe\x8f\x82\x89\x5c\x2a\xa9\xce\x1c\xc4\xdf\x73\x73\x5d\x69\x03\x84\xca\x31\xb0\x62\xf6\xa3\x01\xe5\x39\x29\xbb\x74\xf7\xa9\xf5\x75\xb8\x98\xe4\xaf\x70\xb5\x2d\xeb\x35\x19\x43\xd4\xa7\x0e\xf5\xec\x6d\xf8\x38\xba\xca\xa8\x03\x31\xee\x97\x6c\x02\xe9\x72\x4e\x46\x71\x3a\xfe\x4c\x66\x34\x08\x69\xc6\x3e\xd2\xb9\x6d\xb4\x11\xe5\xcf\x59\xb2\x4f\x68\x98\xd1\x0b\xe5\x16\x1d\xca\x92\x49\x14\x17\xb6\x32\xd3\x43\xb0\x00\x6b\x78\x1f\x66\x29\x95\xe7\xfc\x6f\xb6\xb6\xf5\x41\x9f\x83\xd7\xe0\xa5\xfc\x98\xce\xeb\xc2\x55\xf9\x4e\xe9\x2e\x94\x2f\xe0\xb0\x3e\x69\xaf\xb9\xfd\xb8\xc1\xcc\xc4\x29\x13\xf3\x16\xd1\xd8\x9d\x87\x8f\x2c\xb9\x6e\x1e\x0a\x05\x54\x31\x01\x50\x93\x31\x01\x50\xac\x72\x02\x9e\x3c\xd6\xf8\xe7\xd0\x37\xc6\x3f\x54\x85\x6b\xf2\xa1\xdf\x01\xba\x11\xf6\x4b\xfc\x8e\x08\x91\x6f\x28\x7f\xf4\x64\x2a\xbc\xf9\x19\xaa\x5f\x3c\x1d\x04\xc3\x21\xff\x4f\xa6\x08\x03\x92\xa1\xfe\xc9\x73\x90\x71\xc9\x10\x7f\xc8\x72\xc7\xc5\xe4\xe9\x50\xfc\x2f\xd3\xc0\x5c\x65\x28\x7f\xe8\x7a\x38\xac\xfc\xa5\xd3\x05\xbc\xfa\x29\xea\x71\x6d\x6e\x87\xbe\x44\x0e\xed\x9a\x72\x0e\x3d\x69\x06\xac\xb4\x9a\x1c\xda\x09\x72\x1c\x3f\x53\x18\xc5\xcf\x14\x8d\x01\xd2\xc4\x0f\x09\xa7\xa4\xc5\x21\xfe\x90\xb9\xa6\xca\x7a\xe8\xa4\x28\xac\x71\x41\x7d\xa8\x7f\xf2\x1c\x24\x1d\x0f\xf1\x87\xcc\x35\x4e\x22\x43\x3b\x41\x42\xa1\x7c\x2b\xc7\x3a\xba\x0f\xdd\x24\xd9\x43\x07\xd2\x49\x92\x75\x4a\x61\x6c\x88\x7e\xe3\xfe\x26\xd3\xa1\xfa\x25\xd3\xf9\x9e\x3a\x54\xbf\xd4\xe8\xf9\x7a\x1f\xea\x9f\x6a\x4c\x6c\x97\x1c\xca\x1f\x32\x95\x6d\x58\x43\xf1\xbf\xaa\x83\xf1\xbb\xa1\xfc\x21\x53\x81\x6d\x0c\xe5\x8f\x1e\x2c\x30\xee\x9f\x4e\x3c\xea\x6e\x0d\xb7\xbe\xeb\x55\xba\xb7\xe9\xb5\x96\xc5\xe4\x69\x6b\xf8\xf4\x9b\xeb\xd3\xde\xf6\x56\x13\x87\x0f\xe6\x12\xde\xe3\x0b\xb8\x25\xfc\x1c\xb4\x86\xa4\xb5\xd9\xdf\xde\xec\x6f\xb5\xd6\xae\xa5\x27\xb8\xed\x46\x81\x8a\xef\x1d\x49\xdc\x3b\x92\xf8\x2b\x38\x92\x10\xb5\xac\xb9\xae\xe0\xfe\x4e\x27\x93\x8c\x5e\x92\x9f\xa3\x78\xfc\x99\x92\xef\x7f\xa1\x93\x89\xed\x4d\xa2\xa1\xc3\x38\x00\x8b\x82\x84\x1c\x31\x89\x3b\x00\xa8\x28\x48\x5c\xb0\x1f\x83\x11\x03\xfb\x47\x3a\xa5\x71\x5e\xd0\x38\xa6\x19\xf9\x7e\x02\x89\x2e\xf0\x4f\xc1\x19\xf9\x39\x4d\x43\xf2\xfd\xb4\xd4\xcb\xc5\x63\xed\xdd\x47\xb8\x82\x7c\x13\x24\xc1\xd4\x74\x3d\xd1\x1f\x30\x2c\x0c\x32\x0e\x30\xe7\x00\xd2\xc5\xc4\xe1\x08\x0e\x47\x36\x70\x34\x0a\x12\x09\xf2\x12\xac\xf8\x6d\x08\x2e\x79\xe5\x03\x5a\xcc\x24\xe0\x8b\xe7\x15\x70\xe1\x48\xb9\x9b\x9d\x55\xd5\x97\xcf\x54\x7d\x6f\xc1\x31\x79\x19\x60\x42\x0b\x09\xf8\x8e\x66\x39\xbc\xa4\x2a\x87\x5e\x08\x10\xd5\x89\xf3\x20\x9b\x57\x75\x83\xe5\x2b\x60\x5a\x14\x10\xb4\xc9\x85\xcf\x45\x96\x04\x95\x5c\xc5\x80\x94\xec\x82\x9d\xa8\xb4\x6f\x8f\x28\xb6\x2a\x44\x51\xe5\xcb\x5d\x84\x70\x20\xe9\x8c\x49\xbc\xdb\xa0\x49\xe8\xe9\x1b\xcf\x90\x60\xcf\xe1\xc4\xe4\x42\x8d\x58\xba\xc2\x64\x96\x2e\x68\x56\x5c\x7a\xe0\x16\x22\x4b\x82\xbe\x2a\x8a\xc5\xbb\x2c\x3d\x8b\x42\x2f\xb9\xb1\x85\xba\x10\xd9\x8a\xd8\x16\xe3\x8a\x12\xd1\x62\x6c\x17\x68\xe6\xd0\x70\x6d\x4d\xc9\xea\x3f\xd3\xd1\x0e\xe9\xc8\x6a\x4c\xa7\xbc\x99\xbd\x42\x12\x7a\x6e\x2d\x1b\x5d\x12\xf9\xe7\x15\x91\x56\x51\xcf\x25\x14\x02\xa2\xfc\xa9\x0b\x3d\x67\xcb\x05\xfc\xf4\xe3\x2a\xc2\x91\xc8\x7c\xf1\xdc\xc9\xcb\x67\xb2\xe4\x87\x99\x5b\x32\x81\x35\xc0\x72\xdf\xd2\xc2\xc9\x5d\x68\xc2\x67\x20\x72\x1d\x38\x70\xa3\x5f\x7f\x95\x6d\x30\xba\x76\xfb\xa0\x09\x1c\x80\xc4\x67\x07\xc3\x68\xca\xd6\x47\x8d\x60\x11\x0d\xd5\x66\x28\xfe\xe7\x47\x0e\xdc\x49\x81\xad\xdc\x28\x8a\xc9\x67\x68\x7c\xf5\x14\x0c\xa2\x97\x21\xfe\x70\x9a\xf8\xa4\xd6\x00\xff\xe1\x0c\x50\x00\x74\x74\xfb\x82\x9c\x23\x9a\x0f\xd1\xef\x0e\x37\xe6\xb9\xee\xee\x32\x89\x69\x30\x00\x0f\xbc\x39\x25\x7a\x0c\x29\xdf\x89\xc1\x25\xd0\x1a\x23\x37\xcf\xf8\xea\xc6\x56\x3a\x2e\x26\x34\xca\x3a\x65\x38\x4d\x8a\x29\x0f\x87\x0c\xae\xa7\x71\x5c\x78\x65\xd2\xf6\xf4\x25\xa3\x3c\x56\x84\xee\xc5\x67\x4a\x17\x87\xf9\x87\xcb\x64\x1c\x25\xd3\xca\xae\x40\x59\x0b\xbe\x19\x05\x7a\x3a\x82\xf9\xc2\x73\x6d\xbf\x62\x41\xc9\x57\x30\xdc\x9b\x14\x7c\x79\x60\xe4\x8b\x59\x09\x05\xdf\x1e\x38\xf1\xec\x5a\x82\xb1\x4f\x07\x0a\xbf\xc0\xe5\x80\x2a\xc5\x0b\x6b\xd4\x29\x13\x3c\x6d\xeb\xe7\x54\xb2\x79\x91\xe2\xad\xd5\x86\x46\x69\x9e\xba\x31\x2e\x65\xed\x55\x38\xe5\x16\x8e\x12\xf2\x67\xea\x1f\x19\x86\x12\xdf\x0e\x1c\x36\x6c\xe1\x90\x2a\xc5\x03\xeb\xde\x0a\xcb\x32\x07\xf6\x6d\xa1\xd3\xe7\xb2\xb2\x4e\x8e\xa7\xdd\xc3\xe7\xfb\x6f\x51\x63\xec\xd3\x81\xd2\xde\x69\x38\x98\xf8\xf6\xc1\x49\xc7\x29\x0a\x10\x12\xd8\x2e\x66\x2f\x7c\xbe\xf5\xe3\x87\xdc\xfc\x52\xc8\x74\xae\x68\x5e\xd7\xc1\x9d\xb4\x0d\x59\x76\x7d\x1a\x46\x19\xa8\x8a\xc7\xc1\x02\x1e\x5f\xa0\x0b\x4c\xcf\x8c\x1e\x1e\xec\xbf\x33\xd6\x3e\x2b\x87\x2d\xe4\x22\x2e\x4a\xb2\xe5\xcb\xa4\x4a\x9e\x6f\xbc\xf5\x64\x10\x7d\xd1\x8c\x5c\xd9\xe0\x4f\x46\xf1\xdf\xaa\x80\xa3\x27\x8a\x77\xc3\x5e\x27\xc4\x91\x8e\x79\xe7\x9c\x80\x0e\xa6\x2d\xf7\xa4\x24\x0d\x69\xbb\x67\x40\x4c\xc1\x2e\x64\x48\xda\x4c\xe8\xf8\x34\x8e\x23\x9a\x14\xff\xe0\xe0\x6d\x7d\x27\xdd\xed\xdd\xa4\x35\x5a\x9c\xa7\xd9\xe7\xb2\x06\x13\x5a\x7c\x12\xa0\x16\x88\x19\x2f\x60\x68\xaf\xf2\x5b\x76\x8b\x0a\x85\x76\x59\xbf\x68\x31\xfb\x04\x73\x3d\x4e\xe3\x7f\xfc\x0e\xfd\x3b\x9f\x45\xf9\x42\xb9\x46\x76\xba\x97\xcf\x66\xb7\x46\x1b\xfc\x3c\xf5\xee\x25\x51\x7e\x90\x26\x09\x77\xd9\x84\x96\x5b\xd7\xa0\xbd\x8e\x77\xbb\x7c\xf0\xc0\xbb\x8d\xe2\x2a\x3b\x5d\xff\x0e\xc6\x9d\x14\x48\x99\xbc\x94\xe6\xc1\x38\x14\x02\x27\x08\x89\xc6\xab\xb7\x65\x75\x4b\x67\xa2\xf8\x84\xc0\x55\x4e\xc6\xc1\xa2\x35\xdc\xde\x64\x49\xf8\x48\xd2\x1a\x6e\x6f\xb1\x34\x7d\x1c\x68\x0d\xb7\x1f\xab\x14\x2e\x3a\xb5\x86\xdb\x4f\x55\x12\x16\xee\x5b\xc3\x9d\x6d\x95\xc1\x56\x78\x6b\xb8\xb3\xa3\x13\xb4\x50\xdf\x1a\xee\xe8\x4a\xf5\xb1\xb0\x35\xdc\xf9\xd6\x49\xa6\xc5\xac\x35\xdc\x79\xea\xa4\x27\xb4\x68\x0d\x77\xbe\x73\xd2\xa5\x20\xdc\x1a\x3e\xde\x74\x32\xf3\xd9\xac\x35\x7c\xbc\xe5\xa6\x33\x59\xb8\x35\x7c\xac\xbb\x2f\xcf\x38\xad\xe1\xe3\x6f\x54\xa2\x79\x70\x6e\x0d\x1f\x3f\x51\x59\x52\x6a\x69\x0d\x1f\x7f\x5b\xad\xdb\xbb\x3e\xed\x6d\xef\xdc\x6b\xde\xee\x35\x6f\xff\x2d\x9a\xb7\x20\x8e\xc1\xbf\xc4\xed\xdc\xb8\x22\x05\x97\xa3\x0a\xf1\xe9\x42\x64\x94\x98\x97\x67\xdc\xa2\x1f\xe9\x18\xa0\x37\x12\x4e\xc7\x8c\xa9\x0b\x8e\xe4\xea\x69\xbc\x8a\x9a\x1f\xe1\x72\xd7\xaa\x0c\xd2\x24\xc4\x39\x0f\x7d\x64\x82\x48\x56\x24\x32\x95\x73\xd7\xfd\x38\x36\x86\x62\x0a\x46\xe6\xd1\xaa\x07\x37\xf5\x3d\x62\x99\x96\x95\x28\x3d\xcc\x04\x7c\x44\xfe\x85\x5f\xce\xb3\xff\x70\xb2\x63\x2e\xc9\x37\x21\xa7\x87\xd5\x51\xbe\x2d\xa9\x55\xba\x03\xdf\x53\xbf\xae\xae\x20\xfc\x0d\xb1\xdd\x3e\xb0\x44\x48\x3d\x69\x33\x29\x14\xc2\x0a\xb4\x7b\xa4\x5d\xa4\xfc\xe7\x69\x9f\xa3\x19\x85\x3b\x9c\x78\x6e\x43\x45\x33\x27\x93\x53\x30\x70\x51\xf6\xa1\xe2\x86\xb4\xeb\x89\x99\x6d\x55\xc3\xfa\xc3\x8a\xef\x21\xe2\xe1\x1e\x74\xa0\x23\xfc\xbc\xa4\x63\xe0\xe9\x06\xa5\xcd\x82\x7e\xb7\x05\xae\x28\x34\x5e\x0d\x3c\x9b\x8f\xbb\xb0\x73\x8a\x2a\x8c\x7b\x82\x16\x87\x41\x11\xc8\x11\xb0\xdf\x7d\xf6\x0f\xd9\x43\xbf\xaf\xae\xc0\x28\x56\x01\xc0\x55\x72\x2e\x41\xc4\xd7\xd5\x95\x0e\xbe\x09\xda\x46\xd6\xb4\xbc\x23\x47\x80\x27\x9b\xa7\xfd\x9c\x31\x04\xe5\x61\x9d\x41\xcf\x85\x80\xa3\x29\xcc\x9d\xae\x5f\x3c\xd3\x85\x5b\xd9\x13\xa6\xb6\x42\xba\x73\x2f\x6d\x3b\xbf\xa8\xe7\xe9\xdd\x93\xcd\x53\xf4\xf0\x6a\x1d\xda\xef\x92\x2f\xf0\xd4\x21\x48\x92\xb4\x20\x93\x28\x09\x79\xbf\xa2\x64\xca\x1b\x7a\xa6\x9a\x1f\xa7\x49\x9e\xc6\xb4\x7f\x1e\x64\x49\xa7\x8d\x4b\x70\x67\x39\x8c\x15\xc7\xe9\xb4\x8d\x4c\x5f\x45\x8f\x19\x2a\x1c\x87\x4b\x54\xb0\x21\x1c\x98\x0b\xe6\xae\xe3\x5b\x9d\x3d\xde\xad\x9e\x49\x10\xe6\x11\x0a\x6a\x94\xbe\x0e\x61\x8a\x1b\x2c\xc7\x0b\x3a\x66\x12\x80\x67\x3d\xf6\xc0\x21\xd3\x28\x18\x7f\x56\x21\x44\xc1\x13\x81\x38\xec\xca\xeb\xd6\x4e\x90\x4d\x97\xf0\x12\xe4\x44\xfd\x42\xce\x78\x4c\x2b\x74\x59\x23\x84\x7e\xae\x2c\x86\xdd\xc6\x75\x1c\x08\x36\xf1\x5b\xa6\x1b\x0b\xcd\x36\x92\x65\x1c\x3b\xe8\x4e\x25\xa5\x09\xe7\x77\xfa\x00\x2c\x21\x26\x28\xc8\x1a\xd7\xcc\x02\x26\xfb\xa3\xc8\x54\x1a\x22\xf1\x9b\x73\xf6\x4e\xda\x83\x83\x52\xbb\xe7\x65\xac\x3d\xc9\xde\xd9\x61\xab\xd3\xed\xe9\x86\x10\x86\xeb\x67\x2a\x28\x8a\x60\x3c\xfb\x98\x1e\x48\x3f\x58\x78\xca\xa4\x73\x2c\x7c\xe6\xd6\x53\xcb\xc7\xcd\x3f\x9d\xe1\xc8\xa2\xfd\x20\x8e\xd5\x7e\x22\x80\x4b\xce\x14\x4e\x37\xd5\x01\xc3\x73\xc2\xf0\x1e\x31\x80\x54\x5b\xc3\x6d\x90\xee\xf9\xaa\x6f\x0d\xb7\x41\x76\xc7\x21\xdb\x76\x00\xd8\xda\x08\x5b\xc3\xc7\x3b\x4c\x64\x7e\x7c\x2f\x32\xdf\x8b\xcc\x7f\x6d\x91\x19\x45\x7b\x81\xb3\xf7\x5d\x85\x7b\xf9\x7b\x9e\x26\xd9\x62\x6c\xca\x9b\xbf\xf0\x44\x75\x75\x98\x65\xa9\x2d\x02\xf3\x34\x25\x89\xba\x2a\x0a\x36\x58\x43\xc8\x74\x64\x4c\x40\xc7\xa7\x52\x49\x53\x64\xe4\x22\xae\x77\x8d\x9f\xc0\x20\x0c\xa5\x4b\x47\xc6\x8e\x45\x61\xf0\x92\x0d\x5d\x13\x09\x96\x45\x60\x10\x86\x1e\x1b\x5b\x22\xc6\xcf\x0b\x15\xda\xba\x75\xb0\x06\xe3\xc4\xac\x38\x0c\x7d\x32\xb7\x6f\xe0\x39\x0f\x0a\x2e\x21\x6a\x47\x24\x99\x76\x55\xff\x05\x8c\xb7\x6b\xbe\xfd\xdc\x74\x2e\xa0\xf0\x6b\x74\xd3\x9d\x02\x7d\x4f\x94\x84\x5c\xcd\x24\x61\x7b\xa8\x6e\x9a\x65\x3d\x21\x89\xe6\xae\x4c\xcc\xc9\x87\xff\x12\xc2\xa2\x06\x10\xf8\xc1\x1e\x26\x15\x2a\x7b\x04\x5e\xb7\x97\x3c\x60\x13\x55\x9e\x00\xcc\x29\x3e\x1e\x94\x0a\xec\xbc\x48\x49\xb5\x4c\xac\x91\xfd\x11\x95\xf6\x1d\xd9\xc7\x2e\xb0\x2e\x16\x51\x3f\xca\xff\x11\xc4\x51\xf8\x9e\xe6\x8b\x34\xc9\xa9\x68\xca\x79\x7c\xe7\x8c\xc1\xdf\x5e\x87\xaf\xb1\xfe\x61\x72\xe6\xad\x75\xd7\xa9\xf4\xda\xed\x5f\x69\xe5\xdc\x65\x93\x33\x58\xbe\xe7\x82\x6b\x08\x5f\x86\x68\xbc\x2f\xfa\x00\x4e\x23\x70\x82\x13\xc4\x5e\x4f\x85\x3a\xdf\x10\xbf\x28\x01\x94\xa5\xf5\x93\x7c\xf0\xad\xe1\x36\xe8\xd1\xc4\x8a\x6c\x0d\x77\xc0\xea\xad\x51\x90\xef\xfb\x0d\xff\x7e\xc3\xff\xf3\x6e\xf8\x7a\xbf\x57\x62\xf9\x1d\xa9\xc8\x1a\xea\xaa\xd8\x89\x27\xb3\xc0\x72\x21\xeb\x0f\x20\x73\x55\x75\x9a\x84\x43\xef\xa6\xb0\x1e\x4c\x3e\x88\x12\xd0\xfb\xe8\x10\x82\xc0\x94\xc6\xd0\x88\x38\xee\xdb\x3f\xb9\x7a\x09\x3f\x32\x83\x6d\xde\x7e\xa7\xcc\xe1\x0e\x34\xd8\x3b\x09\xa5\xe4\x02\x30\xf6\xbd\x26\xd2\x95\xb3\x99\xea\x6d\x40\x38\xfb\xf5\x57\x6d\x3e\xf5\x1c\x45\x3d\x51\xce\xba\xd5\x09\x46\x91\x47\x0d\x82\xdc\x3e\x13\xcb\xcf\x32\x8f\xef\xbd\xb7\x47\xda\xa8\x4f\x6d\xf2\xe0\x81\xe1\xc7\x19\x9d\x9b\x79\xb3\x86\xb3\xff\xeb\xae\xb5\x0d\x57\x35\xe8\xf1\x0c\x4d\x3a\x90\x58\xb2\x5d\x43\x1e\x77\x18\xed\xd9\x19\xac\x8a\x18\x58\xee\x69\x1a\x68\x4f\x1c\xde\x39\x42\x39\xa8\x42\x23\xd2\xf2\x48\xed\x55\x03\xe9\x51\xc5\xf3\x12\x9e\xa2\xf8\xd1\xda\xfb\xb2\x29\x08\x43\x49\xc3\xb9\x3e\x86\x63\xda\x90\x69\xd7\xaa\xa6\x52\x7a\xe2\xa4\xe2\xaf\xb2\xf2\x64\xaf\x8f\xeb\x37\x27\x14\xf4\x0a\x71\x95\xd9\xc7\x9a\x2a\xa5\xfd\x51\xfd\xf9\x44\x8b\x99\x54\x37\xeb\x4e\x9a\x7e\x2f\x6a\x55\xa9\x13\x47\xcd\xa1\x11\xa0\x55\xa5\x0d\xe6\x95\x73\x8b\x46\x93\xca\xf9\xcd\xdd\xcd\xa8\x5d\x5f\xbd\xa2\x46\x32\xbc\xbb\x98\x5b\xce\x7b\x2d\xb5\xb2\xe0\xac\x42\xdb\xa8\x78\xac\x39\x79\xae\xde\x8a\x77\xac\x74\x3a\xf7\xe3\xb8\x72\xba\x00\x48\x5c\xf4\xac\x4c\x60\x5c\x15\x5a\xd3\xc1\xd5\xa9\xcd\x78\x14\xe8\x2a\xd5\xca\xa8\xad\x8a\xdc\x94\x9b\x1c\xb0\xfd\x93\x93\x3e\xa5\x45\x2e\x8c\x57\xe2\x4b\x12\xd2\x45\x9c\x5e\xd2\x50\x9a\x08\xc2\xf3\xc1\xf1\x2c\x88\x12\xfb\xb9\x1a\xd4\xf6\x63\x9a\xc9\x1e\x79\x7c\x0f\xc8\x03\xab\x8f\x24\xe5\xba\xbc\x56\xaa\xc5\x35\xc3\x43\xee\xb1\xbc\xdc\xd0\xcf\xda\x4a\x5a\xc4\x06\x0f\xb2\x25\xa4\xb0\xd4\xe4\x0b\xf1\x9a\x21\x90\x8c\xa3\xe6\xfd\x11\x82\x94\xef\xc9\x87\x65\x90\x3f\x18\x90\xf3\x20\xe2\xea\x72\x10\xb9\x16\x85\x56\xc1\xca\x9b\x32\x73\xde\xc5\x52\x50\xf1\xa2\x75\xc7\x68\xd7\x74\xbc\xbc\x4e\xe1\x69\xb2\xd1\xbe\xbd\x2b\x41\x7f\x37\x36\x76\xcd\x63\xd3\x60\x40\xf2\x22\x5d\x70\x5d\x6d\x94\x4c\x49\x30\x61\x5d\xf9\x66\x93\xcf\x55\x4e\x3a\x45\x34\xa7\xe9\xb2\xe8\x3a\x47\x47\x8e\x80\x1f\xc8\x37\x9b\xde\xc3\x22\xef\x7d\x9f\xd5\xfe\xb3\xa8\x5c\x87\x54\xe8\x92\x2f\xd7\x9e\x33\x9d\x8d\x40\xfe\x60\xcf\x7b\x0e\x55\x33\xe2\x3d\x6d\xea\x93\x9f\xf6\x0b\xac\x18\x13\xdc\x97\x04\x7c\x65\x8c\x19\x61\x83\x8f\xe0\x11\x93\x98\x97\x49\x68\x63\xa0\xed\x3b\x7c\xd2\x18\x39\x14\xc1\x7f\x8e\x37\xe2\x1b\xb7\xca\x96\x1f\xae\x59\xf9\x13\x71\xb1\x66\x50\xcd\x94\x16\x1f\x75\x53\xef\x39\xa9\x69\x8e\x82\xba\xf1\x2a\xc8\x67\x98\xa8\x7a\x92\x30\xbb\xfe\x23\x7c\x34\xe9\x08\x00\x3f\xb5\x79\x0b\x79\x3b\x08\x11\x8c\x44\x5d\xfd\xb1\xb9\x00\xcd\x1e\x41\x98\x23\x7f\x77\xe4\x5f\x99\xf3\xf6\x27\xca\x79\x7b\xd9\x5f\x34\xe9\x98\x14\x77\x75\x45\xd6\xa1\xc5\xca\x62\x44\xb1\x6e\x0f\x6d\xe2\xbf\x9b\x2c\x01\xfc\xd7\x70\x39\xd8\x43\x4a\x43\x14\x21\x7a\xa7\x72\x66\xe4\xdf\x60\xa0\xee\xf9\xe2\x74\x8a\xa8\x16\x8e\x15\x92\x8d\xaf\xb7\xbb\x35\xcd\x13\x43\x54\x53\x1c\xb5\x64\xaa\x1b\x54\x36\x18\x10\xbe\x59\x49\x71\x21\x48\x42\x22\x6e\x46\x48\x30\x0d\xa2\x44\xac\x9c\x73\x2a\x02\xfc\xd5\xfc\xf9\x65\x4f\x7b\x03\xac\xa9\xc1\x96\x75\x9c\xed\xbf\x66\x48\x63\xee\x96\x4d\x5c\x0a\xb2\x2d\x81\xed\x8e\x39\x1d\xa7\x49\x48\x18\xc3\xad\xad\x04\x91\x6e\x3d\xb1\x12\x83\x23\x82\x2e\xac\x69\x87\xbd\x5e\x8c\xee\xb8\x43\xd8\x75\x3b\x12\x25\xc4\x89\x16\x71\xca\xbc\x48\x33\x1a\x2a\x37\xee\x5c\x02\x01\x8d\xcf\x34\xc8\x49\x30\x67\x1b\x52\xdf\xcb\xaf\xed\xbf\x52\xfe\x6d\xff\x79\xbc\xcb\xdf\x45\x17\xab\x7b\x78\x5d\x9a\x5b\xc6\x31\xdc\x12\x36\x24\xd2\x4e\x36\x3d\x50\xa0\x2b\x06\x49\xe8\x3f\x06\xec\x98\x7d\xa9\x7c\x69\x58\x52\x9c\x05\x56\x73\x68\xb0\x2b\xc5\x07\x06\x38\x55\x05\xa3\xc8\xb8\x5c\xe0\x2f\x8a\xa8\x3c\xbe\x43\x5a\x30\x8a\xc8\x1e\x83\x94\x72\xd6\x43\xae\x09\xad\x1f\x93\x3e\x21\x25\x24\x40\xa2\xa9\x28\x2e\x6b\x91\x63\x4b\xe8\xb9\x4a\x92\x63\x4a\x2e\xaf\x31\x31\x58\xba\x91\x2d\x69\x53\x10\xc4\xdd\x15\x8b\x6e\x57\x14\xb5\xe5\x60\x43\xb2\x10\xbe\x4e\xa4\xa2\x38\x74\x4a\xfb\x24\x65\x01\xa1\xa4\x65\x7d\xfc\x93\x49\xaa\x2d\x3d\xf1\x50\x68\xa0\x27\x82\xa1\xd4\x77\xfd\x42\x2a\xb6\xe8\x6f\x65\x0d\xec\x4f\xfd\xe0\xd2\xb5\x3a\x45\x62\xfa\xeb\x48\x3a\xe8\xa9\xd9\xc7\x1c\x6c\x30\xe0\xa1\x15\xb5\x95\x85\x51\xa9\xb6\x95\xf8\x72\xbd\xcb\x80\x25\x96\xd6\xcd\xb6\x05\x62\x50\xc5\x70\xc6\xcd\xe0\x2d\x0e\x10\x32\x7e\x94\x10\x47\x63\x0a\x57\x0d\xda\x5e\xc3\x8a\xfe\xe7\xb3\x1d\x01\xfb\x8f\x72\x8b\x11\xe2\x58\x8d\xe4\xfd\x45\xba\x30\x1c\xcc\x99\xdd\x8b\x83\xbc\x10\x90\x4e\xd5\xfe\xee\x70\x42\xea\xb0\x82\xe0\xbc\x68\x5d\xbd\x38\x81\x38\xb4\x90\x6e\xf7\x49\xa3\xb0\xa6\x4b\xac\x21\x01\xdc\xe7\x41\x49\x7e\x20\x9b\x76\x6d\x62\xa6\x25\xed\xef\xcb\xb5\x5c\xaf\x05\x90\x7f\xb7\x52\x09\x22\x34\x59\xcc\x52\xaa\xd3\x94\xa9\x1d\x1e\xd6\xba\xd9\xe5\xfe\x22\xb8\x0c\x46\x31\xf5\x75\xcf\x3d\x0e\x70\xfb\xa9\x9c\x26\xa1\x0e\x48\x95\xa4\xc9\x23\x51\x09\x46\x87\xbd\x4d\x5c\x97\x4d\x3d\xf8\xf6\x63\x9c\xd1\xaf\x82\xed\xc8\xa5\xd2\x83\x11\xa3\x5a\xe5\x04\x81\xed\xdb\xc6\x1e\xaf\x68\xd7\x9c\xc4\xd2\x1b\x41\x7c\xa2\x35\x74\x00\x52\xee\x83\x58\x08\xa6\x96\x20\xa4\xe4\x3c\xc8\x95\x40\xb9\x66\xe2\x8a\x2f\x6d\xb8\x7a\x45\x47\x18\x6d\x98\x65\xdd\xbf\xce\x82\x7c\xe6\x43\x3a\xeb\x35\xcd\xb2\xb2\x9b\x48\x7c\xe5\xe8\xbb\x57\xac\x92\x78\x98\x38\x1a\x86\xfc\xda\x0b\x71\x5d\xd6\x13\x7f\x5b\x25\xc7\x2e\xb2\x07\x65\x4a\x84\xaf\x52\x09\x71\x12\x65\x79\x51\x2e\x20\xae\x28\xe3\x95\x68\x40\x7c\x6a\x0f\xdf\xf5\xab\xf1\x55\xe7\xf8\x12\x02\x6d\xf2\x81\xd7\xcd\xb3\xd5\x58\x53\x94\xd7\xa2\x7a\x95\xa1\xfb\x79\x9a\xd2\xc9\x73\x20\xa1\x2b\x13\xd8\x95\x9b\x20\x3b\xdf\xbe\xe0\x76\xa5\x90\x24\x3e\x0d\x03\xb4\x1b\x0b\x5e\xb6\xd6\xac\x4e\x3b\xeb\xd9\xd4\x45\x4d\xd7\xa6\x0c\x34\x51\xf5\x0f\xd6\x06\x03\x6b\x07\x36\x2e\x70\xb4\xcb\x63\xa4\xbe\xb4\x2a\xef\xf0\x7d\x79\x30\x30\x7c\xe9\x96\x86\x9d\x1e\x8f\xc1\x2d\x6e\xca\xe3\x34\x45\xc9\xb4\x42\x36\x33\xd5\xd8\xe6\xc8\xf9\x24\x5e\xbb\x9c\x08\x8b\x43\x55\xa2\x10\xf9\x82\xa4\xae\xa6\x12\xd1\x84\x24\xa9\xae\x81\xb1\xb7\x45\x90\xe7\x34\xec\xb1\x2a\xb4\xeb\x3b\x06\x91\xa3\x25\x6d\xf2\x32\x45\x78\x30\x03\x16\x3a\x0d\x73\x48\x9f\xef\x54\xd3\x66\x95\xac\x2c\x43\x69\x4b\x79\xad\xad\x2c\x66\xc8\xb5\x24\xc4\xaa\x81\x08\x61\xd2\xa8\x40\x75\xa9\x27\x0b\x8c\xe8\x38\x58\xe6\x94\x9d\xc4\xc3\x34\x69\x17\xe4\x3c\x48\xc0\x28\x29\x5f\xa4\x51\xcc\xaf\xc3\x93\x82\x66\x93\x60\xac\xbc\x63\x37\x38\x8a\x37\x39\x6e\xdb\xfb\x54\x3d\x43\x24\x8e\x7f\x5d\xb5\xa8\xd1\xe2\xfc\x89\x16\xdc\x5d\x33\xdb\x20\x7b\xe4\x7c\x16\x8d\x67\x60\x35\xc0\xd6\x77\x91\x8a\x7d\x8c\x2c\xe2\x65\x5e\x7f\xf7\x2a\x18\x41\xcd\x04\x6b\xee\xe1\xb7\x64\xaa\x91\x61\x57\x17\x54\x55\xb1\x7a\x01\xf2\x36\xc2\x63\xb9\xe0\x88\xac\x95\x6f\x24\xc8\x54\x09\x31\xe6\x53\x87\x3e\xb7\x48\x6f\xce\x7d\x3d\xc7\x1e\xef\x79\xb7\xc1\xfd\x79\x19\x6f\x72\x4e\xc3\xde\x63\x70\xc9\x53\x16\xdf\x81\xd8\xdd\xfe\xb4\x61\x38\xc7\x9f\xfb\x7a\x85\x78\x4e\xd3\x5e\xbb\x25\x8b\x6e\x77\x95\xfd\xb3\x69\x2c\xd1\x1a\x7e\x5b\x66\x02\xad\x4c\x1a\x5a\xc3\xed\x1d\xd7\x26\x5a\x8c\xbc\x35\xdc\xd9\xba\x3e\xed\x6d\x3f\xb9\xb7\x7d\xba\xb7\x7d\xfa\x6b\xdb\x3e\x21\x63\x67\x61\x03\x79\x07\xd6\xce\x25\x7e\x2c\x85\x75\x25\x7f\x98\x75\x34\x91\x97\xce\xfb\xd9\x34\x1f\x96\xa8\x6e\x90\x90\x27\x8e\xb0\xa2\x12\x1c\xfb\x4e\x6e\x27\x8c\x7d\xca\x4a\x09\xb6\x71\x02\x3e\xdf\xf3\xf5\xe1\xfd\xbb\x03\xce\xdc\x6f\xd3\x01\x1e\x70\x09\x58\x2d\x85\x17\x8c\x45\x4a\xde\xbf\x3b\x10\x17\x05\xfe\x0e\x88\xf7\xe8\xe0\x45\x51\xb7\x3c\x4b\x73\x7c\xfd\xe5\x36\x7e\x70\xf4\xf6\xed\xcb\x83\x8f\x87\x47\x6f\xc9\xcb\xf7\xef\x8f\xde\x0f\xc9\x81\xd2\xff\x8e\x79\x95\xfc\x48\x1f\x52\xd2\xde\x20\xac\x3e\xb2\xd1\xee\xfb\xfb\xa0\x5d\xde\x34\x1d\xbb\x7a\x68\xcf\xb5\x08\x05\x5b\x3d\x11\xaf\xcc\xdf\x84\xb4\xa4\x1d\x12\xdb\x2a\x18\x0d\x13\xde\xa5\xd1\x3c\x0f\xa6\x94\xec\x91\xf5\x75\xf1\xd4\x90\x6d\xeb\xe2\x77\x9f\x87\x8c\x75\x52\xfa\xb2\xd8\x33\xe2\x4d\x1e\x12\x35\x5d\x7f\xff\x70\xf4\x16\x66\x25\x53\x5d\xf2\x84\x59\x15\x7d\x73\x1e\x93\x69\x1c\x88\xaa\xcd\xd1\xea\xd9\xfc\xc8\xef\xab\xf1\x78\xe7\x79\xd3\x29\xfd\x78\xf8\xe6\xe5\xd1\xf1\xc7\x21\x11\xb7\xde\x8c\xb8\x58\x27\xe7\x39\xd9\x20\x6d\xf6\x5f\x30\x9e\x31\x8e\xd1\x36\x62\xda\x08\x3f\x92\xdf\xde\xef\x56\xf7\xbb\xd5\x5f\x7b\xb7\x42\x9b\x15\x3c\xbb\xfc\xa3\x9a\xe9\x36\x7f\xcd\xde\xe8\x11\xfd\x1d\xbe\x65\x97\x4e\x87\xd8\xfa\x57\x87\x33\x1c\x93\x29\x37\x8e\x21\xe2\x91\x2d\xb4\xa5\x0f\x0b\xb6\x15\xf2\xd7\x7e\x08\xbf\x90\xb6\xbc\x48\x93\x8e\xf3\x79\xec\x0a\x52\xf1\x1e\x39\x4f\x93\x6e\xcd\x1b\x7a\x94\x99\xa4\xc9\xe5\x3c\x5d\xaa\x16\x55\x42\xc9\xe9\x4d\x22\x6d\x4a\x25\xae\x68\xc8\xe5\x01\x88\x62\xe0\x84\x6b\x12\x69\xea\x78\xf6\x3c\x4d\xe3\x6b\x08\xaf\x1a\x82\x0f\x72\xbe\x49\x50\x0e\x19\xa2\xd9\x81\x07\x22\x34\x34\x3c\xa6\xcb\x13\x1f\x44\x23\x60\x8b\x52\xd4\x3e\x58\x33\xa6\x09\xbb\xdf\x62\x10\xa6\xe7\x28\x5e\xaf\x1d\x81\x01\x21\xdf\xbd\x13\x89\x3c\xa2\x42\xd4\x17\x35\xc1\x05\x87\xf8\x5d\x62\xef\xea\x2f\xaf\x0d\x96\x4b\xaf\x88\x31\xb6\x39\x7d\x86\xdc\x07\x38\xb8\x31\xb2\x70\x1d\x6a\xf7\xe0\xde\x70\x41\xde\x0a\xca\x51\x87\xaa\xab\xf2\x12\xc4\x29\xd1\xf5\x50\xde\xd1\xf4\xda\x7c\x74\xb0\x42\x3d\x43\x2b\x84\x43\xf3\x8a\x71\xe1\xa2\xd5\xf4\xb0\xd2\x88\xa4\x2b\xf5\x1b\x0d\x27\x8f\xa6\x49\x50\x2c\x33\x7b\x38\x38\xbd\x6c\x3c\x18\xa6\x7c\x3c\x0a\xaa\x6a\x40\xe0\xc1\xa0\x79\xff\xc5\x13\x07\x49\xde\x82\x23\x05\x49\xa8\x54\x4b\x45\x0a\x41\x89\x27\x51\x12\xc4\x7e\xb3\x67\x5e\x87\xcf\xa8\x14\xaf\x6b\x2b\x4b\x54\x6f\x20\x45\xe6\xd1\x33\x9a\x5d\x16\x33\xae\xb2\x9e\x8f\x22\x60\x19\x29\x8f\x12\x0d\x7d\x13\x71\x16\x2a\xb1\xe5\xf1\x0d\x22\xba\xe3\xb8\xb6\x53\x8b\x5b\xfd\x42\xaf\x00\xef\x3d\x88\x68\x7f\x1d\xca\x41\x47\x9d\x6b\x11\xa9\xd7\x5c\xb7\x76\x1e\xb7\x9f\xa2\x72\xfe\xb2\x55\x38\x17\xe4\x8e\x3a\x25\xb5\x77\xba\xae\x4a\x53\xcc\xd3\x47\xd9\xb1\xdb\xb2\x74\x14\xc3\xa2\x92\x9f\x83\xe7\x65\x11\x4c\x5b\x94\x3f\x89\x20\xc6\x94\x65\x0d\x20\x80\xf0\xfc\x31\xba\xd1\xc9\xc9\x32\x8e\x4b\x9e\xb8\x68\xcd\x22\x71\x6f\xff\x4d\x85\x30\xd4\x57\x16\x9a\x11\x32\xad\xd1\x9c\x55\x5c\xf7\x0b\xec\x3b\x8f\x63\x3a\x7c\xfb\xea\x91\x33\xfb\xea\xbc\x6b\x07\xd7\x5b\xa9\x36\xe8\x7b\x0d\xc5\x99\x44\x32\x4e\x93\x71\x50\x74\x8c\xd9\xef\x96\x3b\xb2\x29\xe5\x7a\xc2\x8b\x4d\x39\xd7\xb3\x77\x5b\x5a\xc6\xe1\x42\x7e\xf7\xe0\xf2\x30\xc1\x15\x84\xe5\x10\x9c\x10\x78\x2d\xa1\x6a\xf6\xc1\x03\xd0\x37\x98\xbd\xa8\xde\xa6\xcb\xbd\xef\x00\x0e\xee\xd0\xfd\x4e\x90\x4d\xad\xd5\xa5\xc5\xc7\x67\x46\xc9\x21\xfe\x12\xae\x79\xb6\x90\x2b\x14\x31\x3e\x71\xff\xa2\xea\xb5\x9f\x6a\xf1\xc9\x24\x5f\x94\x94\x86\xeb\xdb\xee\xee\xb2\x95\xf9\x4b\x1a\x25\x9d\x56\xcb\xad\x5c\xbd\x8a\xe3\xe4\xc6\xf1\x84\xaf\x37\x40\x36\xec\xb0\x65\xde\xed\xe1\x1e\xe1\xab\x9a\x24\x2d\x0e\x8d\xbe\x2a\x14\x7a\x1c\x0e\x69\xe0\x86\x6d\xc3\xb3\x85\x6e\xcf\x6a\x05\xb7\xaf\x36\x12\xc4\xb5\xd3\x65\xb1\x58\x16\xaf\xd3\xa9\x66\xd7\xc2\x19\x0f\x5a\x2d\xd2\xfb\x0f\x77\x34\x83\xc4\x32\x13\x4c\x73\x6b\x18\x93\xed\x07\x8a\xc3\xf0\x5b\x2e\x83\x9f\x66\x34\x5c\x8e\x29\x9a\xab\x60\x3c\xee\x11\xe1\x8b\x12\xf3\x93\x60\x3c\x3e\x11\xc9\x9c\x27\x32\xa4\x88\x6f\x49\xe5\xcf\xcc\x29\xeb\xe7\xb3\x68\x52\x74\xba\x64\xe8\x60\x54\x66\x39\x4a\xab\x60\x3c\x96\x5a\x2a\x6e\xed\xcd\x49\x9b\xc6\xb4\xa0\x72\x1c\xda\x4b\x92\x99\xce\xa9\xea\x06\x2c\x03\xdd\x5f\x89\x87\x25\x62\x69\xb3\xad\x9e\x8b\x71\xa5\x9e\x15\xee\x4a\x2e\x32\x1a\xae\x16\x7e\x3c\x9e\x1b\x6c\xe9\xe7\x8f\xee\x92\x69\xbb\xde\x25\x53\x55\xf1\xad\x72\x23\x3b\xb3\x02\x62\x48\x80\x86\xf3\x07\x5b\xec\xb0\xfd\x3e\x39\x02\xe5\x1f\xca\x01\x54\x29\x2d\x63\xdb\xff\x06\xaf\x1a\xad\x67\x6d\xde\x27\x8d\x95\xd4\xf8\xb5\xbc\x4d\x31\x50\xf3\xe4\x5a\xc6\x01\xa5\x81\x21\xb4\x74\x82\x00\x4e\x0d\xea\xf5\x01\x60\x07\x56\x9a\x28\xbc\xa0\x27\x8a\xdd\xf3\xb6\x4f\x4b\x07\x60\x58\x4d\x78\xef\x84\x0d\x5c\x22\x97\x58\x55\x57\xc2\x75\x8e\xb2\x6e\xe8\x1b\xeb\x69\x13\x05\xfc\x6d\x9d\x5d\x0e\xfc\xba\xc9\x37\x9c\x06\x3d\xfa\xbf\xea\x48\x22\x38\x88\xc8\xda\x60\x40\x3e\x1e\xbd\x38\x1a\x92\x8c\x72\x8b\xac\x1e\xc9\x53\x61\x3a\xa3\xae\xb8\xb4\x31\x4e\xc0\x35\x5d\x7d\x56\x2e\x2a\xda\x39\x49\xe8\x98\xe6\x79\x90\x5d\xb2\xc5\x02\x21\xb0\x73\x46\x6e\x6d\x70\x58\x0c\xee\xa2\xc9\x79\x9a\x7d\xe6\x52\xde\x7c\x19\x17\xd1\x22\x46\xa1\x1c\xcc\xe0\x29\x7e\xff\x46\x83\x87\xc4\x6b\xcc\xfd\x8d\xb4\xe5\xe6\x75\x98\x66\x0c\xb2\x79\xc3\x88\x54\x37\x46\x43\xbe\x71\x98\x27\x13\x55\xaa\x2f\x71\xe4\xf3\x60\xb3\xce\x3a\x77\xe2\xc2\x9e\xfa\xce\x0f\x65\xb0\x16\x3b\x25\x8e\x81\xa3\xd9\x4f\xe1\xd0\xc9\x57\x53\x8d\x1d\xa4\xb7\x9e\xd2\x23\x94\xae\x5f\x10\xbc\x3d\x26\x07\xc0\x73\xe4\xe6\x39\x3e\x6c\xf0\x1c\xc5\xf4\x84\x49\x8f\xd9\x45\x8f\xe9\xa7\x28\x96\xd3\xc2\x0a\x15\xe3\x73\x72\x55\x79\x10\xab\x9e\xee\x88\x56\x8c\x57\xc3\x78\x86\x5c\x46\x2f\x44\x47\x39\xb9\x5c\x79\xd8\xaa\xe0\x1d\x0c\x9c\x20\xc3\x51\x7a\xd1\x37\xd8\x91\xfe\xd8\x25\x12\x40\x72\x21\xf8\x7f\x57\xa6\x2a\x96\xc3\x7f\xa8\x74\xc4\x68\xe4\x4f\x53\x8e\xa4\x17\xe2\x7d\xb7\xcb\xcd\x39\x1a\xb4\x6b\xa2\x12\xfe\x5c\xc2\x91\x5b\xc3\x1d\x70\x61\x84\xbd\x86\x33\xc6\xfc\xdd\xfd\xcd\xe8\xfd\xcd\xe8\x5f\xfb\x66\x54\x5c\x8b\x8a\x37\xbf\xff\x15\x01\xf6\xee\xd4\x65\x38\x1c\x02\x1e\x92\x83\x34\x39\xa3\x8c\x15\x05\x22\xe6\x31\x9c\x83\xe1\x2c\x00\x81\x8b\x65\x24\x17\x46\xc0\x41\x9c\xa7\x24\x88\xe3\xf4\x3c\xe7\xf1\xd9\x41\x51\x97\xf7\xd7\x58\x45\x52\xf0\x7f\x13\x5d\xd0\xf0\x9a\x67\xad\xb9\xf7\x1a\x6b\xe2\x46\xb5\x48\xed\x28\xc7\x42\x65\xa9\x0e\x9c\x1d\x53\x25\x4a\xae\xae\x64\x84\x74\x9d\xd1\x56\x3a\xd4\x76\xd7\x56\x06\xf0\xb3\x9c\x10\x91\xb8\x62\x96\xf7\xa1\x23\xf5\x8b\x46\x43\x5c\x0f\x71\x34\x01\x55\x73\x17\x6a\xdf\x74\xea\x04\x48\xc1\xf7\xf1\x93\x56\xe3\xce\x48\x46\x51\x52\xed\xc0\x91\x8b\x89\x9a\x8c\xd3\xca\xcb\x1f\xdb\x12\x36\x55\xfa\x7d\x71\xd8\xea\xb1\x49\x38\xa3\x59\x34\x01\xc7\x1e\x19\x1d\x07\x8c\xe3\xa0\x48\x35\x0f\x1e\x90\x38\xf8\xf5\x92\xc4\x69\x10\x92\xf0\x32\x09\xe6\xd1\x98\xa4\x09\xcd\xa1\x35\x31\x21\xba\x21\x11\xcd\x3a\x55\x7a\x02\x80\x92\x06\xf6\xb2\x71\x07\x8a\xcd\xd6\x94\x16\x47\xea\x90\xec\x71\xe1\xcc\x26\x46\x0b\xac\x75\xfe\x01\xb0\x32\x41\x4c\x89\x3c\x26\x97\xdf\x7a\x18\x9a\xfe\xd2\xab\x17\x9e\x9d\x9f\x47\x10\xb0\x04\xf5\x8a\x80\x0e\x22\xa7\xfc\x04\x3d\x74\x9e\x56\x71\xe1\x7d\x99\x51\xa1\x5e\xec\xc1\x05\xde\x98\xaf\x0e\x7e\x38\x9e\xd1\x0b\x9f\xda\x40\x6b\x4d\xad\x04\xcb\x15\x65\x83\x22\x86\xe6\x53\xc4\xd5\x2e\x55\xca\x5b\x0a\x7f\x19\x85\xfb\x99\x88\x4f\xce\xaa\x12\x8b\xac\x4b\x86\x72\xbd\x09\x30\x57\x56\xf2\x5d\x13\x78\xde\xd7\x41\x37\x87\x56\xb7\x7b\x0e\x1c\x5b\x02\x1a\x8a\x7d\xb9\x30\x45\x8a\xeb\x71\xf3\x03\x19\x96\x59\x02\x05\x38\x28\xb3\xdd\x1a\xdc\x5f\x0d\x57\xba\xd6\xea\xab\x72\x5d\x5f\xef\x6e\x52\xa3\x28\x65\xea\xa7\xd0\x41\x87\x53\x60\x3e\x63\x14\xe8\x41\xb8\x45\xea\x52\x55\xb3\x1f\x86\xfc\x59\x84\x52\xa2\x05\x49\x48\x72\x5a\xe4\x64\xb9\x80\x0c\x71\x1a\x01\x96\x11\x15\x34\x63\x7b\x47\x7a\x26\x84\x2d\xe1\xc7\xb4\xbf\xb6\x86\x9e\x46\xbc\x4e\xa7\xf9\x7e\xf1\xa1\x08\xb2\x62\xcd\xd6\x34\xe6\x34\x9e\xa8\xc4\x89\xfb\x80\x59\xb0\x70\xb3\x16\x23\x50\x18\x8d\x27\x8e\x13\x1f\xf9\xca\x6e\x4a\x0b\xae\xcf\x62\x85\xad\xa7\x76\xa0\x5f\xd0\xc3\xcc\xa1\x7b\x44\x9e\x3c\x2d\x9e\xc1\x5a\xe9\xfb\x18\x07\x64\x4c\x69\xd1\xb1\x1e\xfd\x08\x4b\x46\xe7\x94\x33\x18\x88\x17\x34\xf0\x4c\x94\xf5\x51\xa0\x0d\xcc\x26\xe1\xa2\x5b\x26\x4a\xb3\x23\x70\x85\xd1\xef\xf7\xc9\x2f\x4b\xee\x09\x98\xb5\xc9\x78\xaf\x73\x5e\x2e\x79\x19\x59\xf1\x2a\xf2\xda\x7e\x02\x6b\xad\x74\x35\x0c\xff\x19\x93\x67\x7a\x0f\xa6\xdc\x90\xb3\xee\x9d\x26\x7f\xbc\x63\x9a\x7d\x1a\xfd\xab\x77\xc4\xfa\xf5\x48\x77\x91\xc6\x31\x27\x1f\x3f\xd9\x0a\xda\xd4\x60\x36\x5d\x2a\x95\x08\xa8\x6d\x93\x37\xca\x0c\xd7\x20\x96\xb4\x84\x5c\xc4\x8c\xa6\xce\x9c\x4a\x23\x0b\x46\x7a\x72\xac\xbe\x49\xf0\x3d\x9b\xf2\xd1\x44\xda\xf8\x24\xdf\x94\x3a\x6e\x46\x19\xda\x4c\x19\x86\xa6\x95\xd7\xcf\xac\x04\x5d\xc9\x50\x16\x72\x49\xe7\x56\xe8\xb9\x1d\x91\x96\xea\x03\xa0\x4f\xb6\x37\x6a\xc6\x78\xde\xa5\x71\xcc\xf8\x8c\xee\x09\xa7\xc1\x21\x2f\xc2\xce\x69\x74\x4e\x93\x02\x8e\x9c\x7d\x46\x71\x30\x34\xbd\x97\x2c\x84\xa1\xfd\x09\xc7\x14\x90\xe3\x61\x78\xda\x93\x57\x54\x46\x72\x4f\x13\xa3\xc8\xc1\x7e\x8c\xb8\x82\x18\xe8\x97\x6d\xd6\x32\x6c\xa1\x43\xe2\x96\x4c\xd6\x23\x4e\x7c\x0f\xb9\xdc\x3c\xb7\x03\x3d\x71\x9a\x3a\xc8\x28\x8c\x09\xec\xb5\x0f\x3c\x2f\x1d\x81\xd9\x71\x0d\x36\xba\x70\x35\xf0\x81\x34\x7c\xab\xa8\xca\x4a\x75\x5d\xa5\xca\x1e\xbf\x52\xcd\xec\x0c\xb2\x25\x20\xa5\x2e\xe3\x4b\xad\x31\xb5\xb0\xa9\xc5\x60\x4b\xf4\x45\xd0\x0e\x1a\xcc\x04\x04\x29\x67\xde\x7d\x32\xa6\x56\x88\xb0\xac\x51\x19\x62\xcb\x3d\x28\xcb\xd7\x6c\xcf\xc9\xc2\xd7\x4e\xea\x77\x69\xbf\xfb\x09\x3d\x17\xb7\x4e\x18\x07\xd8\x59\x18\x67\x92\x51\x68\xf8\xc6\xf3\x33\xc7\x9a\x65\xdf\x19\x8f\x3c\x62\xee\x78\x54\xcb\x07\x89\xe0\xc8\xe2\x5c\x58\x41\xbd\x96\x47\x52\x97\xbd\x54\x94\xf5\x77\xa3\x5a\xef\x6c\x2c\x6d\x46\x04\xa1\xeb\x08\x10\xfb\x6a\xc8\x28\x5c\x32\xb0\x33\xc7\x82\x26\x21\x18\xb8\xa9\x49\x0e\x72\x50\xb4\x24\x39\xa3\x50\xe5\x0c\x46\x57\x94\x4e\x00\x98\x15\x62\x52\x4f\x97\x2b\x57\x54\xeb\xcb\x24\xc8\xf3\x68\x9a\xd0\xb0\xef\xf6\xd1\xa6\x28\x1f\x4f\xf6\xcd\x8e\x92\xb1\xc6\xa3\x9a\x09\xf2\x36\x83\x4d\xc6\xd0\x48\xb4\x3d\x31\x89\xb1\x74\x18\xc4\x19\x0d\xc2\x4b\xfd\x60\x5d\x0b\x8a\xf9\xed\x29\xcd\x14\x64\xa5\xf4\x5a\x37\xae\x68\xd2\xb1\x5a\x53\x4e\xe0\x36\x5d\x97\x5c\x7a\x65\x72\x2e\xee\xf3\x0b\xc9\xa4\xe8\x22\x15\x63\x8b\xe6\x73\x1a\x46\x41\x41\xe3\x4b\xbb\x59\x41\xee\xe3\xa6\xb4\x6d\x4a\x27\x50\x7d\xa7\xc4\xd5\x84\xcf\x6d\x15\xd6\x64\x73\x96\xcf\xb6\x1f\x3e\x18\x74\x97\x7b\xee\x84\xe9\xb0\x37\x73\x93\xb7\x71\xc3\x3e\xd4\x0f\xa9\x8e\x31\x98\x23\x1e\x8d\x35\x4f\xe2\xba\xd4\x1d\x08\xc2\x35\xba\x13\xbe\x6e\x3a\x10\xbc\xef\xd6\x8f\xc7\x91\x1c\xd2\x85\x14\x1c\xcc\x81\xd4\xf0\x77\x78\x5a\x3e\x4f\xcf\xa4\x4a\x93\x04\xf9\x65\x32\x56\x87\x1f\x9f\x60\xe4\xe3\xdb\xcb\x04\xde\x4e\x1b\x08\x40\x32\x86\x85\x2d\x87\x77\x61\x43\xf8\x55\x6a\x36\x04\x7f\x07\xa3\x53\x2b\x66\xbb\xcf\x7b\x82\x23\x53\x78\x4d\x4e\x54\x49\x5b\x28\xb7\x76\xd4\x12\x3b\xca\xc1\x80\x1c\x4e\x34\x67\x8c\x72\xf5\xae\xef\x92\x0a\xff\x2b\x24\x2a\x88\x76\xd3\xa5\xcb\x9d\xcf\x28\x18\x63\x88\xd1\x77\x09\x67\xaa\x39\x89\x0a\x93\xad\x7a\x37\x6a\x87\xd8\xd5\x32\xf3\xed\x1e\x3e\xf4\x8b\x1a\xed\x09\xc5\xfb\x31\x84\x48\xf1\xf0\xb7\xaf\xe8\xa0\xc7\x92\xc7\x33\x6a\x5b\xef\xc5\xe9\xb4\xac\x5d\x62\x31\xa6\x8a\xb3\x05\xd4\x32\x64\x7b\x42\x89\x3f\x3e\x7f\xc4\x12\x13\xc4\x39\x00\xd8\x03\x6b\x4e\x47\x8e\x9f\x29\x21\x88\x1f\xbe\xe0\x09\x43\x41\x63\x9d\x6e\x9f\xef\xc8\xe3\x40\x7a\x2c\x04\xbf\x2a\x34\x24\x6c\x75\xcf\xb2\x34\x49\x97\xb9\x72\x5f\x28\x0c\x03\xd8\x6e\x6f\xbb\x22\xe2\xd5\x08\x61\xb7\xed\x35\xaf\x05\xa7\x12\xa9\xb6\xd2\x6b\x42\x40\xae\x0d\x1d\xab\xa1\x7e\x0e\x6f\x31\x6f\xd7\x35\xfc\xd8\xb9\x22\xe5\xb8\x75\x82\xbf\x55\x5c\x90\x5e\x9f\xf6\x76\x36\x9b\x5c\x81\xb6\x97\x39\xd7\x8b\x8f\x8b\xf6\xda\xfd\x85\xe8\xfd\x85\xe8\x9f\xf8\x42\x54\x3f\x15\x45\x2a\xeb\x9b\xbc\x17\x15\xc0\x2b\xdc\x64\xfa\x82\xbf\x35\x7e\x62\x9a\x4c\xa2\xa9\x17\x8e\x67\x49\xc0\xc3\x51\x60\x05\x75\x89\x46\x41\xe2\x09\xd4\x02\xda\x64\x1e\x69\x8a\xdb\x48\xf3\xcb\xcc\x51\x34\x15\x1e\x0c\x2c\x2b\x46\x0e\xf4\x3c\x9a\x5a\x4a\x7d\x6c\xcd\xc8\x35\xce\x57\x1c\xe2\x4a\xc1\x5e\x9b\x5e\xab\x74\x3a\xb6\xc4\x05\x3d\x63\x49\x1b\x86\x54\xc4\x7b\xe7\x7d\x86\x56\xa4\xaa\xac\x04\xdb\x55\x4a\xa0\x28\x7f\x97\x51\x71\x0d\x8a\x6e\x27\x8c\xba\x47\x3a\xdd\x6a\x60\x84\x4b\xb0\x83\x84\x70\x7f\x4f\xae\xae\xdc\x3c\x71\x36\xf5\x67\xd2\x20\x8b\x23\x56\x14\x75\x2d\x59\x2c\x8b\x17\x74\x12\x2c\x63\xef\xc5\x49\x5d\x1f\xd9\x8e\x6c\xb7\xa3\xae\x7c\xbd\xe1\x5b\x18\xc9\xf4\x43\xd4\xa2\xc7\xf7\x54\xf9\x3d\x0e\xee\x82\x35\x8a\xdf\xa2\xfb\xf6\x8b\x2e\x2e\xa0\xb0\x5a\x4a\xe6\xd8\x68\xd4\x53\x21\xca\xf6\xe0\x41\xd2\xd6\x2b\x7a\xe1\x19\xb9\x58\x55\x7c\xb0\x39\x32\x8a\x4c\x27\x24\x30\x7c\x03\x82\xe3\x49\x65\x47\xa0\xec\x02\xd8\xba\x7b\xf5\xf2\x9f\xd6\x72\x83\x3a\x98\x5c\xec\x5d\x68\x52\x97\x6f\xf8\xd8\x75\x0c\xdf\xe5\x15\xb9\xd4\xf6\xbb\x75\x7a\x23\x7f\x7f\x31\x2e\x8f\xe1\xfa\x0f\x5d\xc1\xc2\xe7\xd5\x95\x45\x43\xfb\x63\x88\xbb\x80\x1c\x9f\x61\x78\x8f\xc7\x2d\x59\x2d\xf4\x49\xb8\xa1\xf2\x5f\x3d\x9a\x72\x10\xae\xba\x48\x45\xc0\xe8\xa8\x20\xf3\x68\x3a\xe3\x82\xa3\xf2\x5e\x2c\x94\x54\x4e\xcb\x45\x5a\xdb\x6e\x91\x9a\xad\x9e\xb4\xe7\xc1\xc5\x8f\x94\xbe\xa3\xd9\x4f\x41\xde\xee\x11\xf6\xfd\x2e\x8b\xd2\x2c\x2a\x2e\x8d\xf4\x69\x90\xbf\xcb\xa2\x31\x15\xbf\xd9\x7f\x30\xcd\xec\x47\x92\x26\x63\xea\x7b\xc5\xf8\x99\x5e\x56\xbc\x63\xfc\x4c\x2f\x9b\xbe\x64\x84\x9a\x1c\x5c\xf3\x1a\xf6\x90\xdd\xc5\x0b\x3a\x8e\xe6\x41\xdc\xc1\x00\xee\x4b\x32\xf3\xb2\xf5\x6b\x13\x3b\xf2\xb9\x79\xd7\x34\xef\xab\xfa\xee\x49\xff\xa6\xd4\x7d\x4f\xd7\x7f\x44\xba\x16\x42\x91\x43\xd8\x70\xff\x2a\x83\x09\x09\xaa\xf6\x8a\x4a\x8d\xe9\xf9\xc2\x14\x8f\x44\xfa\x9a\x21\x13\xd5\x52\x70\x71\xd1\xfd\xa2\x34\x83\x17\x7d\xbc\x9d\xae\xcb\xd3\xb9\xd6\x88\x99\x00\xca\x43\x46\x2a\xf1\x67\x02\xa8\x37\x20\x2c\x1d\xe1\x02\x5e\x9b\xf9\xab\x77\xa0\xbc\x6d\xd8\x50\x52\xf9\x77\xd1\x07\x92\xf2\x17\x82\x2c\x0d\x39\x0d\x72\x3f\xdc\x34\xc8\x0d\x28\x20\x5f\x04\xaa\x45\x45\x94\x6f\x0c\x15\xaf\x0d\x93\x50\x35\x55\xdb\x60\x25\xf5\x63\x18\xc3\xcf\xa7\x6a\xc9\x59\x75\xd5\x2d\xba\xe0\xe5\x2d\x3b\xb0\x46\x0f\x8a\x8b\xbe\x34\xfc\xf3\x56\x80\x9f\x19\x4b\x2d\xc4\xc5\xca\xcb\x46\x86\xe8\xb9\xc9\xf2\x11\xd1\x82\x2a\x57\x91\x0a\x5e\xb5\xca\x52\xb2\x2b\xb6\x5c\xd6\xe0\xa8\x43\x3a\xca\x50\xcd\xda\xf2\x41\xb9\xf4\xe9\x81\xd2\xa4\x27\x33\x1b\x2c\xb5\x52\xd0\xf2\x26\x4b\x16\x9d\x0a\x86\xb3\x9c\x2f\xe3\xa0\x88\xce\xe8\x4f\x41\x7e\x9c\xc3\x9b\xbe\xb2\xaa\x1c\x58\xab\xae\x69\x6d\x0d\x53\xa3\x1c\x1a\x3b\x9d\x4c\xe8\x58\xd4\xcc\x57\x6e\xc9\x72\x28\x2f\xe0\xa3\xe7\x52\x68\x7b\x51\x9a\xd6\x22\xb2\x58\x9c\x4e\x6d\xe3\x4b\x9d\x81\x22\x0a\x39\xda\x41\x50\xf1\x79\x75\x83\x9e\x07\xca\x0c\xb6\x4e\x11\x28\x5a\x6a\xb4\x0e\x81\xc8\x9a\xaf\x3c\x38\x38\x55\x2e\x36\xa8\xb0\xc1\x52\x33\x6b\xc2\x26\x46\x50\x83\xf6\xd9\x4d\x94\x73\x26\xf0\x52\x28\xf4\x0f\xf8\x68\xd8\x1f\x05\x39\xad\xe5\x8d\x3e\x50\x1f\x19\x78\xe0\x0c\x02\xe0\xf9\xd3\x20\x7f\x1d\xcd\xa3\xc2\x43\xbf\x26\x80\x28\xab\x12\x4b\x88\xde\xc8\x37\xca\xe4\xd1\xaf\xbe\xdd\x4e\x67\x1a\xd0\x45\x34\xa7\x79\x11\xcc\x17\xa5\x45\x14\x84\x5e\x58\x3c\x23\x29\x63\x5b\x46\x76\x59\xb5\x4a\xa7\x82\x3a\x13\x46\x93\x49\x34\x5e\xc6\xf0\xae\xa7\x0c\xd3\x1a\xc8\x1c\x48\x5a\x04\xf1\x8b\x26\x15\x58\x90\x58\x6a\x36\x57\xaa\x00\xd7\x3c\xce\x5c\xaf\x6e\xb6\x2b\x6b\x46\x05\x9d\x77\xed\x17\x7d\x8e\x59\x25\x40\xb9\x17\xd8\xc6\xaa\xf6\x49\x6d\xbc\x60\xdd\xf2\x1e\x71\x9d\x4c\x83\xc5\x1d\xa7\x53\xef\x2a\xc6\x1c\xc5\xb7\x86\xe3\x74\xaa\xd5\x6f\xee\x42\x86\x7a\x8d\xc5\x8c\x2b\xc4\x4b\x19\x5d\x7b\x44\x13\xf6\x65\x6c\x6a\x6a\x9c\x56\x86\x87\xc6\xec\xa2\xbb\xb8\x4e\x67\xd7\x32\x2a\x6e\xb0\xfd\x79\x2b\x31\x9a\x88\xd3\xa9\xa7\x6a\x99\x5a\x52\xa5\x2a\x64\x9e\x2e\xe0\x2a\xa7\xfe\xc4\x7c\x3e\x8b\x72\xc6\x8d\x17\x69\x5e\xdc\xe0\xc8\xfc\x2e\xcd\xab\xa5\x22\x37\x06\x53\x25\xd7\x76\x2b\xc5\x13\xcd\x3a\xa9\xcc\x42\x07\x03\xe8\x74\x7f\x11\x5c\xc2\xbb\x8a\x3d\x43\x4d\x86\xb3\x04\x92\x21\xa9\x28\x62\xef\x79\x4d\x66\x62\xd8\xf3\x34\xfb\xfc\x31\x7d\x97\xa5\x67\xb4\xbc\x0c\x02\xc2\x65\x17\x42\xe2\x2d\x2f\x28\x21\x50\x68\x81\x09\x8e\x3c\x65\x58\x52\x73\xd6\xc2\x3b\xc9\xdd\xac\x60\x9e\x82\xd2\xc9\x9e\xf1\xf5\x8c\x9c\xa0\xcf\x53\x32\x54\x66\x0c\xd7\xba\x55\xae\x83\xe7\xea\xf8\x38\x4e\xcf\xe1\x59\x89\xd4\x6b\x54\x55\x5f\xfd\x0c\x82\xc7\x4e\x64\xc4\x44\xd2\x24\xbe\xe4\x01\x21\x0a\xe3\x75\x86\x7c\x21\xc1\x5f\x42\xf8\x1e\xf6\xc8\x67\x12\x64\x68\x3f\xda\xc1\x0f\x24\xec\xa3\x35\xeb\x63\x23\xde\xa5\xee\x83\x80\xfe\x85\x95\xaa\x97\x9b\xd5\x51\xba\x9f\xac\xcd\x7b\x85\x6a\xc2\x16\x74\x0d\xf8\xa5\x17\x8b\x28\xbb\xf4\xac\x78\x94\x8b\xc9\x2d\xe7\xee\x63\xbc\xd0\x2c\xaf\x6c\x09\x58\xa0\x9e\x05\x00\x94\xed\x93\x25\x2c\x88\xee\xae\x6f\x55\xbe\x0f\xce\x25\xc9\x88\x14\x2f\x18\xaa\x7e\x3f\x1f\x47\x91\xbd\x7c\x65\x19\xbc\xdb\xfe\x3d\x17\x88\x53\x70\x48\x9a\xd3\xeb\x50\x35\x00\xfe\x94\x21\x0a\x9a\x8f\x39\x0c\x06\xab\xac\x08\x58\x9b\x78\x35\x96\x2e\x46\xbd\xdc\x6e\xb1\x92\xac\x6b\x00\x8e\xa2\x66\xf4\xaf\x98\xaa\xad\x91\xf3\x85\x4b\xc1\x66\x3e\x11\xbf\xc6\x4b\xe8\x39\xdc\xe8\x75\xcc\x60\xda\x70\xd5\x31\x0a\x92\x7e\x94\xff\x23\x88\xa3\xb0\x03\xb1\x2e\x44\xca\x8b\x28\xa3\xe3\xa2\xe3\xbb\xe7\x10\x2e\xc5\x00\x50\xd4\xd8\xe9\x3a\x97\x28\x58\xd0\xd1\x31\x88\x64\x0f\x3c\xd5\x1a\x5e\xeb\x3c\x15\x35\xa8\x42\xf4\xcc\xac\x89\xab\x27\x6c\x1b\x12\xe1\x57\x5c\xc2\xb6\x65\x50\x70\xbd\xd0\x3f\x5c\x26\xe3\x28\xf1\x4b\x2b\xc2\x91\x37\xba\x79\x5a\x37\x93\x88\xeb\xd7\xc8\x10\xcf\xc0\x0b\x12\x18\x21\x46\xc9\x14\x0e\x38\xde\xe3\xad\x0b\x66\xfa\x92\x12\x6e\x9d\x6a\x2a\xc0\x50\x66\xf9\x59\x34\x9d\xd1\xbc\xae\x3c\x86\x32\xcb\xe7\x97\xc9\x98\x86\x42\x77\xed\xd3\xa0\x79\xe1\x2a\xea\x78\x2e\x82\xa4\x34\xa9\x07\x60\x7d\x75\xb1\x8c\x71\x1a\xd6\x57\xa4\x00\xab\x6a\x69\xd4\x25\x03\xd8\x57\xdb\x87\x22\xcd\xf8\xd3\x8e\xca\x7a\x04\x58\x45\x0d\x8d\xba\x83\x61\xad\x19\xa7\x41\x4c\xc3\x8f\x59\x44\x93\x6a\x0c\x59\x80\x55\xb5\xd4\x74\xc9\x03\xec\xab\xad\xc1\xac\x59\x80\x55\xb5\x34\xea\x53\xc5\xac\x31\x80\x28\x99\x36\x44\x14\x86\xf4\xd6\x23\x5b\xaa\xad\x46\x02\x56\xf8\x53\x2c\x89\x7f\x51\x77\xfd\x3f\xac\x85\xe8\xe9\x7a\x2a\x2a\xa8\x29\x69\xdc\x0c\x0f\x3d\x69\x08\xd6\x77\xb2\x1d\x96\x67\xa1\x92\xf6\x4e\x38\xf4\x27\xa3\x12\x86\xdc\x36\xf4\xa4\x71\xd8\x32\xe3\x91\x61\x69\x0e\x2e\xe7\x1f\x50\x79\x5e\x49\x59\x5b\xa3\xec\xa9\xc2\x06\x31\x7a\x6f\x28\xcc\x86\xde\x54\x0c\x8f\xcf\xe4\x43\x4f\x1a\x86\xb5\xd0\xe8\x49\xc4\xd0\xf6\x1e\x3a\x2c\x49\xe7\x7b\xaf\x61\x71\xc8\xaf\x47\x5b\xc3\xad\xa7\x65\x7e\xb4\x98\x00\xd2\x1a\xee\xec\x5c\x9f\xf6\x76\xb6\xee\x7d\xb0\xdc\x9b\x1c\xfe\xd7\x98\x1c\x0a\x4a\xbf\x8b\x60\x4a\xab\x45\x9e\x68\x68\x67\xc8\x63\x3d\x99\x06\x84\x3c\xed\x2b\x84\xb0\x68\x1e\x74\x22\x88\xe3\x81\x15\x97\x15\x5e\x93\xdb\x41\x9d\xdc\x50\x14\xf2\x49\x84\x1b\xbf\xae\x22\x04\x85\x2f\x80\xdd\x27\xbe\xb9\x89\x10\x09\x38\x74\xf3\xea\xe1\x0b\x74\xa5\x62\x77\xc0\xb5\xf2\xa4\xdb\x55\x0b\x51\x1f\x03\x50\x4a\x40\x9d\xf2\x1b\xc3\xc8\xd0\xce\x02\x44\x7c\x62\x88\x3b\x09\x9f\xc1\xf6\x07\x7b\x32\x0c\x47\xab\x60\xd5\xa2\x1f\x20\xe2\x73\x71\x36\xcd\xf1\x49\xed\x06\x01\xcb\xe5\x09\x55\x07\x77\x04\x2f\x26\xc0\xeb\xf9\x93\xb9\x6c\x9a\xf3\x18\x19\xeb\xe2\x9c\xd7\xac\xc3\x58\x18\xab\xec\x34\xee\xde\x0f\x0e\x29\xc9\x1c\x1c\xaa\x52\xbc\xcd\x75\x07\xe7\x1f\x9b\xed\x79\xa3\x42\x4c\xec\x68\x3c\x34\x44\x44\x55\x40\x4a\x1c\x43\xdb\x17\x75\x2d\xca\xc9\x38\xcd\x32\xd7\x21\x2a\x9c\xdf\x83\x82\xee\x67\xd3\xdc\x17\xa3\x52\x47\xc9\x7f\x48\xfe\x06\xe7\xff\x9c\x7c\x81\xd3\xff\x35\x6b\x2f\x2a\xc4\x8b\x24\xc3\x67\xaa\x67\xaa\x70\x3b\xa5\x73\xa4\x55\x34\x1c\x0a\x50\xe4\x58\x32\x05\x1a\xf1\x83\x81\x7c\x7a\x06\xea\x4c\xc3\x19\x11\x6c\x9e\xe0\x02\x53\xc7\x9e\x63\x5b\x6d\x00\x0f\x57\xb3\xe0\x52\x3e\xc3\x14\x73\xb7\xde\x71\x42\x97\x06\x5d\xe5\x50\xbf\x4b\x1e\x3c\x70\x2e\x98\xac\xfb\x2e\x01\xce\x9d\x63\x57\xc2\xeb\x2b\x35\xa3\x8c\x55\xc0\x7a\x03\x0e\x9a\x26\x89\x1d\x49\x88\xeb\x7b\x7b\x65\x84\x6c\xbe\xbb\xcb\xd2\x73\x19\x3d\xb0\x22\xc6\x5f\xc7\x71\x87\x51\xe5\x40\x5a\xaa\x14\xc1\xc2\x0e\x93\x8a\x11\x02\x25\x7d\xc7\xc1\x3c\xe4\xe5\x6c\x1a\xda\x71\x7c\x89\x33\xe9\x20\x56\xad\x6a\x83\xc3\x4a\xca\x53\xed\x57\x92\x9d\x11\x34\x77\x75\x86\xb1\x2a\xbf\x30\x63\xdd\x96\x04\xd3\xbd\xd6\xdc\x1c\x2f\x9f\x8e\x27\xb2\x6d\x91\xfa\xe3\x56\x18\x91\x6f\xf7\x48\x49\x4c\x0a\x5f\x68\x03\xf1\xea\x0a\x0d\xd7\x08\xa9\x5b\x61\xb9\x57\x12\x73\x49\xa2\xfe\x66\xb1\x65\xbc\xc5\x2b\xe7\xfd\x46\x11\x66\x84\x73\xfc\xcd\x1e\x79\x2a\x75\x99\x15\x4d\x2c\x93\x45\x30\xfe\x7c\xc4\x6f\x51\x0c\x1b\x56\x48\x32\x34\x9b\x66\x92\xee\x82\xa1\xd2\x4c\x65\x55\xfc\x87\x22\xbd\x3d\xb2\x4d\x9e\xc9\x44\xe9\xbf\x9f\xc8\x73\xa0\x76\x68\xa1\xbc\xee\x97\xb9\xef\xc7\x42\x4e\x4f\x14\x37\x67\x54\x28\x52\xb0\xf3\x71\x15\xb9\xf1\x64\xf3\x94\x0c\x7d\x2e\xe6\x0f\x20\x72\x79\x80\x82\xc5\x4b\x64\xd9\xe1\xe8\x83\x38\xc6\x8b\xbb\xdf\xef\xcb\xf5\x7d\x60\x97\xb5\x36\x1f\xc7\xb9\xd3\x21\xdf\xee\x20\x28\xb5\x04\x65\xbb\x51\xa0\x6a\xe8\x71\x3f\x3e\x76\xc5\xdc\x93\x21\xbc\x9c\x95\x87\xae\xc0\x78\xdb\x18\x24\xa1\xe9\x01\x48\x82\xf1\xb0\xed\xfc\x64\xc4\xea\xe0\x21\x2f\x19\xb8\x40\x9b\x97\x76\xc5\xac\x42\xd4\xe9\x3a\xaa\x85\x5e\x95\x45\xf6\x5e\x25\x6c\xb7\x7f\xdf\x94\x32\x98\x65\xeb\xab\xf6\x18\x38\xc8\x68\xf9\x4f\x38\xfc\x36\xc4\x42\xcc\x7e\xc0\x69\xb9\x29\x7d\xe1\x22\x58\xfc\xb1\x8b\xe9\xeb\x28\xee\x68\x5c\x72\x69\x09\xa7\x8d\x4a\xd6\x7d\x0f\xc9\xf5\x0d\x83\x62\x7c\xb4\x98\x71\x24\x88\xaa\x7b\x46\xd7\xdc\x67\xa3\x50\x0a\x2f\xe1\x8e\xb1\x1e\x90\xab\x7c\xe7\xa1\x77\x93\x06\x7b\xae\x7b\x26\x97\x07\x20\xe7\x4c\xf2\x3d\x90\xe1\xc2\xa3\xc7\x4d\x83\x76\x4d\x97\xd8\xbc\xd3\x34\x74\xdc\xff\x17\xd9\xa5\xf5\xea\x15\x81\xc2\x43\xd7\xf2\xf1\x12\xe3\x65\xee\x18\x5c\x23\x74\x1c\x07\x4b\x9c\xe2\xf7\x08\xc5\x85\x50\x29\xb3\xf3\xb2\x75\x24\xc9\x54\x6e\x14\x4d\xce\x95\xf6\xb6\x61\x16\xa9\xdd\x15\xac\x16\xfe\x54\x4b\xad\x76\xcd\x48\x92\x12\x80\xc2\x98\xf8\x07\xb2\x09\x87\x1a\xe3\xac\xe9\x4a\x87\x38\xde\x6d\x90\x70\x2f\x03\x49\x28\x3c\x91\x42\xc0\xe2\xe4\x91\x3c\xa8\x3a\x91\x9b\x6b\x96\xab\x11\xad\x90\xad\x1b\x6b\x1e\x3a\xe6\x1d\xb4\xa8\xae\x16\xbc\x79\x98\x08\x9a\x17\xd1\x3c\x28\xe8\x4f\x01\x28\x10\xeb\xa8\x0a\x81\xd7\x51\x14\xae\xf9\x2e\xa8\xe9\xeb\x53\x47\xb3\x19\x42\xe3\xaa\x9b\x1d\x0f\x68\xd9\xcc\xbc\x97\xcd\x50\x19\xf8\x0e\x02\xf8\x48\x5d\xa0\x90\x0f\xf0\x54\x4c\x69\xf1\xc2\x0e\x4c\x25\x77\x56\xbb\x9a\xba\xb9\x12\x75\xdd\xf1\x3c\x35\x42\xbc\xb4\x47\x10\x2b\x93\xc7\xf4\x69\x2e\x35\xdf\x22\x7c\x26\x2e\x2a\xf1\x8c\xc8\xbe\x12\x61\xbf\x6d\x2c\x4d\x55\xff\x8d\xc2\x69\xaa\x42\xab\x0e\xf2\x6b\xc6\xd6\xd4\x3a\x1a\x36\xc0\x6c\x31\x96\x4e\xdb\x72\x7e\x6a\xae\x63\x44\x02\xba\xdc\xa6\xaa\x62\x5c\xa2\xec\x1f\x9b\x2b\x11\x23\xc6\x90\x04\xc3\x62\x8a\x11\x2a\x07\xcf\x89\xeb\xe5\xd0\xd2\xb8\x3e\x03\xd7\xc9\x9f\x58\x8f\xdb\x64\xc8\x3f\xac\x9d\xa4\xdd\x73\x84\x97\xa1\xf6\x2e\xa8\xf2\x94\x5f\x44\x31\x9c\x53\x9d\xc5\x3b\x2e\xbd\xee\x72\x06\x59\x4b\x0c\x32\x28\x50\xd9\xf6\xa3\xc2\x6d\x55\x6f\x3d\x9e\xc8\x58\x78\x82\x0b\x43\xd0\x59\x37\xb1\xa3\x6d\xc9\x60\x9b\x2f\xb0\x0c\x25\x3d\xcb\xe8\xb4\xb2\xad\xc2\x42\x67\x3f\x58\x2c\xe2\x4b\xe1\xe7\xaa\x11\x61\x75\x6d\x23\x4c\xbe\x05\x58\xcd\xb0\xc4\x1b\xd5\x5d\x33\x0f\x22\x7a\x94\x66\x3c\x3a\x80\xd4\xad\x23\x47\x79\x26\xec\x6b\x05\x8f\x92\xe9\x7a\xc5\x63\x47\x5f\xa5\xe0\xe2\xb0\xa9\x31\x5c\x06\xe8\x4a\xcd\xde\xc9\x2f\x2b\x6e\x8a\x48\x7c\x24\x3a\xa9\xb4\x98\xde\xad\xa5\x83\x2a\xf6\xf9\xa7\x8c\x9c\x25\xcb\x02\x81\x47\xd9\x78\x19\x07\xd9\xfa\xfa\xfa\x7a\x75\xbc\x2c\x49\x41\xbb\x77\x12\x31\x8b\x6b\x7f\x5b\xc3\xed\x27\x7e\xf7\x43\xdb\xf7\xb7\xff\xf7\xb7\xff\x7f\xed\xdb\x7f\x71\xf5\xcf\x60\x65\x44\x33\x7f\x1c\x96\xdf\x2d\xc2\x8a\xcf\xb2\xa0\xda\x10\x60\x6d\x30\x80\x88\x6d\x41\xc6\x48\x99\xed\x60\xcb\xdc\x1c\x22\x23\xb8\x30\x9a\x4c\x68\x46\x93\x82\xd0\xe4\x2c\x87\x42\xa3\x2c\x3d\xcf\x69\xb6\x86\xdc\xd1\x9e\x47\x49\x98\x9e\x83\xc6\x02\xc5\x29\x21\x0f\x1e\x88\x9c\xfe\x3f\xdf\xbc\x7e\x55\x14\x0b\xe1\xe9\x98\x73\x4d\x33\x8d\xec\xf9\x61\x81\xf5\x89\x30\x1b\xd1\x34\x49\x19\x23\x88\xa3\x84\xb2\x9e\x24\x69\x48\xd7\x90\x6f\x3b\xa7\x46\x35\xf0\x8b\x79\xcc\x46\x26\x36\xb6\x76\xb7\x69\x23\xd7\x1c\x93\xff\x7c\xf5\x7e\xdb\xa8\x6e\x96\x6d\xb7\xbb\xa5\xa5\xa4\xe4\xc0\x5a\x78\x27\x91\xe9\x9a\x44\x80\xfc\xc4\x44\x7b\x70\xee\xca\x5d\xc1\xb3\x5e\x2a\x03\x08\xa3\x3c\xde\xf2\x67\x69\x5e\xf4\x48\x11\xcd\x69\xba\x2c\x7a\xac\xc2\xac\x07\x4a\xe6\xf3\x34\x13\x4f\x29\x61\x33\x61\x70\x64\x8f\xc0\x7f\x57\x57\xa4\x2d\x88\x3d\x4e\xc7\x41\xcc\x12\x87\x4f\xbf\x79\xfc\x0d\x84\x45\xe6\x7b\x0f\xaf\x90\xed\x84\xe2\xd7\xd5\x15\xd9\x54\xd9\xac\x19\xb2\x07\xad\xa9\x34\xd9\x28\xd9\x53\xed\xd7\x0a\x4f\x8b\x8c\x2e\x20\xce\x20\x3d\xb7\xa6\xcc\x92\x9d\x04\xe0\x7b\x74\x96\x11\x92\xd3\xf3\x34\x8d\x69\x90\x5c\xc3\x1d\x2b\xdb\x9f\xa5\x04\xa3\xb1\x2c\x9c\x8a\xa2\x03\x9f\xd9\x96\xe1\xb9\x0a\x63\x1a\xc9\x5d\x66\x07\xcc\x8b\x40\x56\x3d\x47\x35\xbf\x41\xe1\x84\xc4\x78\x18\xdc\x00\xea\x6c\x42\xb4\x78\x05\x43\x7e\xf5\x7e\x5b\x47\x25\xe6\x92\x16\xc2\x3c\x9a\x08\x06\x63\xb8\x6e\xb4\x2a\x32\xc6\xc3\xab\x04\x79\x58\xd6\x9a\x2e\x68\xd2\x69\xbf\x3b\xfa\xf0\x51\x06\x52\xe5\x84\xc3\x3b\xb7\xbb\x86\xfc\x40\xc2\xdc\x3e\x78\x60\x4e\xaa\x71\xe8\x5b\x82\x41\x4d\xfb\x79\x90\x47\x63\xd2\x26\x1b\xd0\x85\xe7\x4b\xc6\x1e\x50\x15\x1b\xa4\x3d\x54\x57\x85\xaa\x9e\x7e\x91\x8a\x27\x97\xed\x51\x90\xd3\x27\x8f\xdb\xd6\xf8\xb5\x17\xf4\x57\x34\x08\x69\xd6\x69\xef\x03\x5f\x8d\x7e\x0d\xf8\x69\x0b\xda\xe7\x23\xac\x28\xc4\xe4\x63\x9a\x14\x8f\xd8\x41\xbb\xdd\x23\x6d\x26\xf9\x47\x63\xa8\x62\xf0\x4b\x2e\xd5\x8e\xea\xc6\x4a\x4c\x59\x0d\xb9\xf2\x78\x39\x97\xc9\x18\x1d\xaa\x6d\x4d\xb2\xef\xe2\x79\x81\xae\xaf\xfd\x91\xd1\xab\x48\x2f\xb7\x23\x65\x4a\x5d\x9a\x4d\x72\x92\x66\x4c\x5a\x15\xa1\xb6\x81\x1e\xb5\x76\x5f\x63\x2e\x09\x3b\xf0\x9c\x87\xc7\x40\x8a\x26\x97\xaa\x7e\x81\x64\xa9\xc8\xc7\x4e\xce\x7d\xd6\x00\x07\x69\x92\x50\xf1\xe8\x46\x52\x98\xa6\x44\xe3\x72\x51\xb6\x2e\xc3\x8d\x7c\xa4\x17\x85\xd3\x41\x01\x8b\xde\x1a\x89\xb7\x1d\x66\xb7\xaa\xba\xf4\x5e\xd4\xdf\xf1\x35\x88\x57\x49\xf3\xc8\xd7\x40\x03\x41\x0d\x11\xec\x2b\x8e\x53\x41\x09\x22\xeb\x47\x27\xd6\x0c\x29\xb2\x68\x3a\xa5\x19\x0f\x90\xc5\x66\x1f\xc4\x16\xe5\xed\x96\xe1\xa0\x8e\x60\xa0\x07\x3e\xaa\x31\xe3\x5c\x37\xa1\x1f\x30\x5e\xd9\x35\xb8\x49\x02\x9e\xc9\xf3\x22\x28\xe8\x78\x16\x24\x53\xbf\x02\x81\xb1\x14\x8d\xf8\x20\xbc\xfc\xc0\x0a\xc0\x8d\xf0\x63\xc6\x61\x6c\x96\xb7\x6e\xc6\xa9\x6e\x40\x31\x1a\x50\xde\x2a\xa1\x00\x68\xf6\x65\x56\x0d\x45\xc1\x99\xcc\x7b\x6b\xa5\x6e\xac\x56\xa4\x2d\x82\xaf\xb6\xec\x8b\x2d\xa3\x65\x76\x16\xbc\xb6\x50\xac\x37\x02\x17\xb3\x66\x65\x79\x5f\x2f\xbd\x8f\xbc\x54\x07\x6f\x1e\x62\x21\xdf\x2d\x07\xb0\xbb\x50\xc5\x04\xc4\x4a\xc3\xeb\x4a\x5f\x96\xc7\x97\x8c\xde\xf9\xcb\x68\x58\x5c\x8c\xaa\x4b\xd6\x56\x94\x8b\xfa\xa9\xc9\x4c\x95\x10\x20\x15\x9c\xb6\x30\xc0\xce\x0f\x49\xbb\x20\x93\x20\x8a\x69\xd8\x27\x47\xec\x9c\x76\x1e\xb1\xb3\x47\x00\x31\xed\xca\x57\x13\x6a\xd3\x33\x17\x1a\x9f\x4a\x9f\xa1\x62\xa7\x44\xe1\x90\x7c\xa7\xfe\xa4\xbe\x8f\xed\x3e\xd9\x62\x3c\x24\xed\xed\xfe\xa6\x52\x1e\x4a\xfd\x63\x3b\xa1\xc5\xa7\x38\xca\x0b\x9a\x44\xc9\x54\x65\x2b\xed\xe1\xa9\x61\xd0\x25\x15\x5c\x19\x0f\xd0\xe7\x92\xaf\xb4\x2a\x64\x83\xd4\x93\xe0\xa8\x0b\xf0\xd0\xa5\xaa\xc0\x38\xed\x33\x31\xb7\x35\x7c\xca\x7e\x19\xf2\x73\x6b\xb8\xf5\x2d\x3b\xf9\xef\xdc\x9f\xfc\xef\x4f\xfe\x7f\xf1\x93\xbf\x36\xfc\x87\x27\xb7\x77\x64\xf4\xaf\x0c\x39\xf1\xa9\x72\x14\x4d\xb9\x0d\x6e\xff\x17\x7e\x42\xe7\xf7\x20\xe1\x6b\x3a\x31\x37\x04\x15\xa9\x14\x5e\xce\x21\x21\x5d\x6d\x76\x1c\x82\xb3\x8b\xf3\x19\xeb\x7d\xc7\x34\xd0\xfa\x9e\x17\x26\x0f\xc9\x76\xd7\xb7\x5b\xb6\x37\x99\x14\x6f\xbe\x9e\x25\xfe\x17\x71\x82\xb9\xbf\x13\xa7\xba\x20\x21\x87\xcf\xf7\xdf\x8a\x49\x0e\xc9\x77\xdf\x92\x71\x3a\x5f\x2c\x45\x94\xa0\xd1\x25\x99\xa7\x67\x51\x32\x45\xb1\xf0\x1e\x93\xf1\x2c\xc8\x60\x2f\xe0\x37\xb3\x21\x37\xa5\x92\xe6\xea\x12\x3a\xa6\xfc\xd1\x42\x91\xb2\x06\x39\xae\x72\xd2\xd9\x27\x7b\x64\x6b\xb3\x47\x9e\xb3\xff\xb7\x7a\xa4\xdf\xef\xf7\xc8\xff\x91\x3d\xb2\xf3\x4d\x97\x1d\x76\x48\xbe\xa0\xe3\x68\x12\xf1\x85\x74\xf8\xe1\x68\x6b\xe7\xc9\xd6\x13\xdb\xc4\x2c\xca\x53\x48\x17\xe3\x70\x7d\x22\x5f\xf3\x17\xdd\xac\x23\x6c\x80\xe6\xd5\x1a\xbe\x59\x16\x92\x54\x28\xc1\x84\x63\x0e\xb3\x7e\x63\x42\x59\xc5\x78\x1e\xd9\x88\xda\xfb\xed\x3e\x43\xcb\x41\x1a\xd2\xfd\xa2\xb3\x89\xb4\xd6\x6c\x6c\xed\xff\x73\xb2\x39\x03\xe4\xaf\xce\x81\x58\x8b\xf4\x78\xb1\xa0\xd9\x41\x90\x6b\x55\x36\xca\xce\x97\xa3\xbc\xc8\x3a\x8f\xbb\xf2\x3d\xb9\x48\xd8\xec\x3d\xb6\x6e\xcc\x78\xee\x22\x8e\x8a\x4e\xbb\xdd\x35\x9f\xda\x27\x5d\xd3\xba\x4a\x3c\xb4\x4c\x7c\x9d\x97\xf2\x21\xc0\xfc\xb0\x47\xf6\x99\x40\x08\x1f\xdf\xef\x91\xff\xeb\x3a\x11\x2c\x3c\x33\x2b\x26\xd6\x80\x54\x2e\x91\x43\x4a\x1e\x91\x7d\xb2\x41\xb6\x36\x91\x9d\x91\x2f\xaa\x83\x8c\x9c\x6b\xdb\x30\x5d\x77\xfb\xbf\xa4\x51\xc2\x86\x69\x5b\x2a\x8e\x97\xe0\x2f\x1a\xa6\xf8\xcd\xd1\x0b\x46\xd8\x5b\x9b\x92\x29\x09\x0b\x3f\xa0\x7c\x0f\xc5\x7d\xbb\xf9\xe4\xb1\x4d\x70\xf3\x34\xfc\xee\xdb\xad\xcd\x32\x42\x33\xe9\x4b\x7b\xe1\xe6\xd4\x24\x0a\x57\x52\x51\x46\xe7\x41\x94\x70\xdd\x11\xcb\xd3\x77\x8f\xc2\x35\x91\xc9\x1e\x04\xb0\xb6\x5b\xde\xee\x5a\x4e\x97\x80\x59\x49\x30\x65\xf1\xfa\x9d\x61\x22\xa7\x9b\x04\x59\xfb\x30\x29\xb8\x3f\xa7\x1e\xd9\xda\xec\x92\xff\x3f\xc3\xda\x86\x53\x0b\x77\xe9\x24\xcc\xcf\x7d\xbe\x94\x54\x5d\xaa\xa4\xae\xcf\x98\xa7\xfa\x77\x48\xdc\x04\x1d\xd6\x81\x30\xf8\x87\x0b\x75\x48\x10\x6f\x1d\x04\xfb\x94\xf3\xe5\x9f\x9c\x01\xf6\xd5\xee\x9f\x04\x61\x09\xad\x97\x9c\xdb\x55\x27\x46\x72\x5d\x3f\x29\x84\xd0\x5a\xce\xe5\xeb\x1c\x8b\xa8\x18\xcc\xbe\xca\x71\xfa\x1e\xa0\x2c\x29\x46\xb3\x21\x5c\x2b\xb6\x86\xb5\x62\x2c\xa7\x8f\x6a\xac\xf3\x78\x11\xe4\xcf\xa5\xc7\x0b\xf4\x52\x41\xc4\xb2\x25\x5b\x4f\x10\x0b\x1b\x05\x39\xdd\x79\x42\xf6\xa0\x8c\x56\x0f\xed\x3c\x31\x4c\x00\xc2\x90\x72\xcd\x22\xec\x81\x1d\x5e\xa8\x47\xb6\xbe\x31\x25\x61\xd5\xcf\xe7\xa3\x20\xe9\xf0\x62\x26\xf3\xb3\x16\xb3\x70\xaa\x83\x16\xee\x73\x36\xf4\x22\x35\x76\x2f\x36\x7d\x04\x9c\x1d\x64\x97\x72\x45\x73\x65\x12\xd8\xeb\xbe\xe3\x91\x4c\x92\xb4\x10\x42\xd9\xf7\xd1\x0f\xad\x29\x48\x24\xdc\x59\xd3\x44\x23\x35\x9f\x05\x5c\x5a\x83\xfd\xed\x62\x1c\x2f\xf3\xe8\x4c\x05\x5e\x8d\x46\x51\x1c\x15\x4a\xc0\x19\x05\xc9\xe7\xc1\x28\x0b\x92\xf1\x8c\xe4\x34\x3b\x8b\xc6\x72\x03\x0c\xb8\xef\x85\xd6\xf7\x83\xe8\x87\xbe\x4d\x43\x2a\x08\x4a\x2e\x77\xa1\x09\xcd\xd8\x36\x14\xc4\xd3\x34\x8b\x8a\xd9\x9c\x84\x34\x1f\x67\xd1\x88\xb3\x25\x21\xff\xd0\xa4\x7f\x1e\x7d\x8e\x16\x34\x8c\x02\x10\x82\xd8\xd7\xe0\x30\x29\x68\x96\x04\xfc\xe9\xc4\xa7\xe7\x41\xf2\xf9\x93\x70\xfc\xf0\x89\xcf\xeb\xff\xef\x27\x31\xd2\x64\xfa\x89\x0d\xf1\x13\xbc\x25\xfa\x14\x46\xd3\xc8\x79\xca\x21\xa7\xc6\x47\x91\x23\xb9\xa7\xca\x19\x90\x1e\x8f\x8a\xd4\xb3\xcd\x36\xa0\xd5\xe7\xf6\x8a\x1c\x59\x6c\x51\xcc\xe8\x01\xdf\xa7\xda\xff\x7c\xd9\xde\x5d\xf3\xf2\x4c\xc1\x63\x3b\xd6\xce\xdd\xc1\x15\x6c\x90\xf6\x26\x88\x4a\xd0\x0a\x36\x77\x61\xe8\x78\xc1\xb0\x41\xf6\x48\x87\x8b\x53\x9d\xef\x9e\x92\x47\xba\x89\xae\x7c\x36\xf0\x68\xdb\xda\x6f\x95\xcf\x18\xb3\x29\x54\xa7\x68\xb0\x46\x6d\x25\x98\x08\xc2\x15\x10\x36\x0f\x7f\x1f\x25\x79\x11\x15\xcb\x42\xba\xfa\x8e\x42\x9a\x14\x6c\xd3\xb2\xc3\x46\xf0\x5a\x0e\x93\x30\xca\xa8\x69\xc0\x60\xbe\xb1\xc9\x7b\x52\x96\x55\x8f\x6c\xe0\xd5\x54\x0b\xb5\xd4\x82\xa6\x5a\xba\xad\xd6\x2a\xbc\xc8\xec\x89\xd7\xfd\xb7\x79\x04\x36\x39\x43\xfb\xe5\xc7\x57\x6c\x1e\xe4\xeb\x16\x8c\x01\x94\xaa\xfa\xd6\xb5\xf8\x75\x5a\xc5\xaf\xe5\x53\x3a\x8e\x5c\x11\x5b\x3e\xca\xf9\x4b\x39\xcc\xc7\x1d\xb9\x13\xfc\xff\x94\xca\x9b\x6a\x2f\xf2\x28\x3e\xa4\xc2\x83\x3f\xa7\xe3\x2d\x29\xa1\xf3\x10\x79\x17\xaa\x94\x13\x22\xec\xa5\x28\xe2\x64\x85\x85\x3f\xed\xa2\xa8\x56\x57\xae\xb0\x00\x5d\x2f\x7d\x3d\x88\xc7\xac\x63\x8a\x78\x47\xd5\x23\xa9\x47\x6b\x03\x63\xc3\xda\x1a\x77\x94\x16\x25\x0c\xfe\xf3\xcf\x97\x27\x9b\x8f\xbe\x3b\xfd\xb2\x7d\xdd\x79\xf9\xf1\x15\xfb\xbd\xff\xe8\xff\x4e\xbf\x6c\xed\x5c\x5f\xa9\x8f\x9d\xcd\xde\xce\xd6\x75\xf7\x7f\x06\xfd\x02\x94\xa0\x6a\x03\x37\xde\xe5\x95\x31\x06\x04\xce\x9f\xe7\x6d\xad\x88\x30\xf1\x04\x13\x4e\xff\x5e\xb4\xbd\xd0\x4b\xf0\x6e\xf0\xf6\xc2\x5d\x49\x16\xe2\xf4\xa0\xf0\xe3\x9e\x9d\xc7\xe4\xea\xaa\x2c\xef\x9b\x1b\x0e\x7b\x42\xa2\xa4\x64\xe0\x06\xf7\xb9\x9b\xa1\x7b\xd9\x48\xa3\xc1\x6f\x6f\x36\xb2\xda\xe4\x22\x25\x1b\x69\xbe\x9c\x33\xc0\xe3\x5c\x1c\x1f\xe6\x69\xf8\xe8\xbb\x6f\x1f\x6d\x6d\xaa\x6c\x38\xe3\x42\xef\xc6\x69\x4c\x3a\x87\x1f\x8e\x06\x87\x2f\x0f\x08\x3b\x37\x0c\xb7\x37\x37\x77\xba\x36\x4f\x46\xd5\xba\xa7\x50\x94\xeb\x0c\x5c\xe6\x35\x1c\xb6\x38\x13\x6e\xf7\xc8\x76\x33\x5b\x55\xcc\x54\x8d\x2d\x85\xd0\x69\x9f\xfc\xf3\xfd\xcb\x9f\x1c\x37\x98\xaa\x80\x7f\x34\xa5\x35\xba\x93\x8a\x20\xeb\x86\xa7\x09\xa0\x03\x3e\x12\x9d\x21\x7f\xdb\x23\x8f\xbb\x64\x48\xda\xed\x46\xe3\x1e\xc7\x11\x3c\x24\x53\x1d\x04\xe5\x53\x94\xd8\xe3\x63\x58\xf8\x69\xff\x1f\x47\x3f\xfe\xeb\xe8\xfd\xff\xda\xb3\x0a\x75\x94\xcc\xa9\x5d\xbf\x77\x72\x39\xd0\xad\xc7\xbe\xb5\xb5\xfa\xc8\xc5\x6a\xf2\x9f\x4b\xdc\x83\x87\x3b\x34\xa7\x02\x67\x78\x81\xe7\x1c\x82\xef\x9d\xc4\xe0\x7c\xee\xf3\x8c\x43\x87\x3b\xe0\xc7\xe8\x10\x5b\x7a\x94\x91\xe7\x0f\x75\x4a\x31\x4e\xa8\xfc\x8c\x62\x9e\x67\xb6\x9e\x74\x7b\x64\x7b\x53\x39\xe8\x33\xa4\x3c\x89\x5e\x6b\x90\xb2\x70\xb3\x05\x5a\xe2\x95\xea\x10\xb2\xb8\x52\x1f\xeb\x15\x5b\x43\xf3\xf3\xfa\xb4\xb7\xf3\xf8\x5e\x8d\x7f\xaf\xc6\xff\x8b\xab\xf1\x85\x0a\x7f\x31\xae\xb6\xdf\xbb\x85\xc5\x5d\x4b\x07\xd8\x6c\xed\xae\x14\x18\xb0\xc6\x4e\x8f\xeb\x99\x16\x63\xaf\x25\xd8\x22\x28\x66\x3d\x92\x50\xc3\xfa\xfb\x13\x68\x2e\x9c\x87\xa7\xf2\xaa\x1a\x87\x26\x97\x5e\x0b\x84\xbd\x0e\xd8\xf8\xb0\xff\x78\xaa\xce\x1a\xab\x1b\x5e\xe0\x8a\x85\x4c\xe8\x7c\x61\xd0\x43\x5d\x5e\x39\x52\xb5\x8a\xf5\xd3\xa4\xd3\x86\x51\xb5\x71\x28\xdf\xae\x61\x3f\x9d\xa7\x8c\x89\xf1\xb7\x84\x87\xef\x0e\x88\xbe\x57\xe6\x2f\x0c\xdb\x3d\x42\x11\xeb\xfd\xc4\xd9\xa0\xb8\xf0\xee\xd8\xae\x5c\xbd\x3d\x48\x42\xdc\x3e\x6a\xbe\xb4\x32\xb2\xa6\xde\x18\xbc\x3e\xfc\xf0\xf1\xe5\x5b\x58\x41\x07\x47\x6f\xdf\xbe\x3c\xf8\x78\x78\xf4\x96\xbc\x7f\xf9\xe1\xdd\xd1\xdb\x0f\x2f\x3f\x94\xb6\x1a\x06\x45\x80\x9b\x65\xdf\x78\x73\x1a\x3c\x14\x66\x84\xf3\xe0\x62\x9c\xce\x17\x31\xbd\x88\x8a\xcb\x21\x79\x02\x94\x65\xf5\x10\x74\xa1\xca\x0e\x81\x55\xa5\xf7\x9b\xae\x27\xee\x92\xb0\x39\xf8\x62\x86\x61\x87\x83\x5f\x68\xdb\x4e\x88\xee\xf0\xf0\xf4\xc0\x5f\x42\x72\x3e\x8b\xc6\x33\x32\x0f\x8a\xf1\x4c\x88\xaf\x7c\x13\x62\x0c\x2d\x34\xca\x79\x62\x6e\x40\xd3\xfe\x38\xed\x70\x1d\xe5\xf4\x16\x2c\x10\xfc\x51\x94\xa3\x49\xe7\x93\x9f\x90\x4f\xe0\x6d\x5c\x14\x9e\xba\xce\xfc\x55\x61\x36\x56\x01\xb6\xeb\x40\xd9\x11\xef\x4b\x23\x41\x43\x35\xa2\xef\x76\x45\xd7\x0e\x16\x27\x51\x46\x0d\x8f\x00\x36\xba\xca\xc6\xc3\x86\xe2\x69\xbd\x02\x5c\x87\xa5\xc6\xa6\x2d\xfa\x2f\xa4\x31\x2d\x68\x55\x0d\xf6\x60\x6c\xdc\xe0\x57\xd8\x3f\xb3\x5d\x0b\x08\x51\x10\x04\xaf\x0f\x94\x3b\xdc\x56\x2a\xe1\xce\x72\x48\xca\x1d\x85\x47\x45\x7f\x6d\x4d\x0a\x83\x26\x09\xaf\xd9\x6a\x0f\x78\x91\xc9\x84\x3f\xcd\xf3\x90\x78\x64\x16\xc6\x7e\x81\xf1\xaa\xb2\xd9\x60\xcf\x92\xd7\xfe\xc1\xfd\xf2\x6b\xf7\xd2\x72\x89\xbf\x78\xf9\xe8\xe0\xd5\xf1\xdb\xff\x7d\xf9\x5e\xd5\x13\xd2\xf1\x6c\x99\x7c\xa6\xa1\x78\x55\xc2\x5f\x8c\x8a\xbf\x7e\x46\x17\x71\x30\xa6\x9d\xc1\xbf\xaf\x4f\xfe\x9d\xfc\x3b\x3b\x7d\xf6\xef\x2f\x83\x69\xaf\x7d\x7d\xf5\xe8\xd1\xd5\x97\x76\x17\x1c\x63\x7f\xf1\xc2\xff\xfb\x54\x96\x38\x11\x65\x4e\x59\xa1\x13\x59\xea\xf4\xc4\x5f\xce\x2e\x65\x14\x2a\x29\xa3\xdb\x42\x2d\xa9\x86\x50\x19\x71\xcd\xc7\xb2\xdb\x92\x93\x1a\x18\x70\xd7\x2c\x20\x1e\xf1\x97\xc1\x00\xee\x40\xa9\x70\x87\x01\x9e\x36\xa0\x82\x35\x87\xf4\x59\xde\x01\xcb\x32\x57\xae\xf0\x3b\x63\xc1\x90\x0d\xc2\xdf\xbf\x1a\xa2\xba\xba\xb3\xb6\x38\x99\xeb\xd4\xc0\x67\x0b\x06\x7d\x47\xa5\x84\x35\x0d\x37\xa6\x59\x73\x17\x9f\xee\xcc\x9e\xdd\x19\x31\x74\xf0\x18\xad\x2c\xa8\xc1\xf5\x5d\x32\xa6\x31\x84\x83\x90\x8f\x38\x8d\x32\xe3\x98\x06\x99\x34\xe1\xb2\x5a\x11\xc9\xd6\x82\xf6\x03\x81\xaf\x86\x42\x56\xe4\xdb\xe3\xcc\xf2\xf6\x5e\x87\xff\x2a\xed\x2a\x05\xce\x30\xfc\x75\x8f\x6c\x6d\x6e\x6e\x92\x87\xfc\x72\xc6\x73\xd7\xea\x75\xfc\x00\xef\xf6\x00\x3b\x12\x5f\x8c\x83\xe4\x54\xd0\x0b\x8f\x24\x24\xde\xf5\xad\x8e\x2a\x77\xc6\x2c\x12\x81\x60\x55\xc2\xb2\xd2\xe9\x30\x67\x11\xfd\xc5\x32\x9f\x99\x16\x83\xb6\xaf\x78\x0c\x2e\x9c\xff\x30\x1e\xf9\x93\xd8\x42\x83\x30\xcc\x71\x9c\x7b\x61\xe5\xe0\x4a\x63\x5c\x3d\xdc\x5b\xe3\x1b\xae\x3c\x18\x88\xb3\x76\xc4\x83\x2d\x08\xae\x07\xbb\xb1\xbc\x15\x52\xa9\x87\x21\x2f\x15\x64\x59\x74\x46\x31\xc3\x0d\x42\x35\x7b\xb2\xbd\x0a\x0e\xeb\x81\x36\x62\x2d\xf8\x6d\x4a\x91\x4c\x21\x5f\xab\x47\x21\xb9\xba\x92\x5f\x27\x9b\xa7\x6a\xcb\x84\x2b\x6c\xde\x37\x0d\x2d\x12\xcc\x12\x3c\x11\x4b\x74\xde\xcd\x8b\xec\xa9\xde\x54\x49\xbc\x0c\xb4\xaf\x1a\x96\x75\xcb\x5d\x4d\xae\x23\xbc\x52\xc9\xf9\x8c\x4a\xbf\x03\x21\x17\xcb\xe1\xf4\x05\x1a\x77\xb6\xbf\x87\x08\xcd\x82\x88\x2b\x50\xeb\xda\x77\xaa\xa3\xfd\x24\xcd\x3a\x0c\x2f\x9f\xe9\x25\x3f\x29\xfa\x06\x60\x3a\x81\xe9\xf8\x81\xfa\xb3\x20\x3f\x3a\x4f\xde\x41\x98\xb0\xe2\x12\x22\x6f\x5a\x5c\xa0\x04\x3d\x9f\xe9\xe5\x69\xb9\x6d\x67\x3b\x4d\xc8\xe1\xbb\x83\x76\xd7\x5a\xfc\x42\xb6\xa8\xa8\xd3\x31\xb3\xd0\xcb\xe4\x00\xfb\x20\x14\xce\xf2\x09\x3a\x6e\x44\x39\xc9\x8b\x88\x87\xd2\x89\x42\x44\xd4\xd8\x2c\xb4\x14\xe1\x7e\x3b\xce\x4e\xf9\x69\x49\xca\x01\x6c\xf7\xc8\xa8\xe8\x47\x8f\x53\x81\xd9\xab\x69\x9a\x50\xa1\x79\xea\xac\x7f\xb2\xc5\xfe\xf3\x2c\x2a\xc0\x5f\x8a\xc5\x8d\x10\x88\x75\x84\xfa\xe4\x9e\xa1\xa4\x8b\xc1\xf5\xb2\xda\x85\x02\xc9\x3b\xf4\xaa\x17\x04\x6b\x98\x7e\xac\x7a\xe9\x07\xf4\x74\x85\x18\x9b\xec\xae\xc1\xb9\x57\x40\x91\x44\x53\x3d\x96\x88\xe7\x08\x55\x7b\xd6\x94\xbd\x0c\xd1\xb3\x5f\xdf\xa8\x2a\x2c\x9e\x6f\x26\x36\x28\xaa\xc6\x52\x83\x39\x94\xda\x7d\x94\x58\x7f\xbe\x7d\xd2\x32\xbb\x13\xda\x44\xeb\x8c\xe2\xb8\xe3\xf9\x57\xba\x04\x2b\x6b\xfd\xda\xac\xd5\xde\xb0\xd9\xed\x46\xbb\x45\x72\x6c\x98\xdd\xc7\x76\xda\x9a\x0f\xc2\x8b\xad\xb4\x20\xf9\x72\xb1\x48\xb3\x02\x74\x6b\xfc\xa6\xf6\xdd\x01\x51\x5a\x95\xb6\xe1\x08\xb2\x9c\x30\x1b\xbf\x54\xb8\xc9\x62\xac\xa7\xb2\x95\x28\xcc\x7b\xac\x07\x9a\xaa\xb4\xa0\x47\x0e\x75\xed\xdd\xb4\xd4\xdb\x8d\xab\xc7\xd5\x18\x74\x9c\xb4\x97\xbc\xd2\xbe\x3e\xed\xed\x7c\x73\xaf\xd2\xbd\x57\xe9\xfe\x57\xa8\x74\xc5\xc3\x8a\x5b\x3d\xc7\xde\x0f\xb2\x34\x21\xff\xbb\x9c\x07\x67\x51\x4e\xbe\x0f\xd8\xe7\xdf\x3e\xf3\xcf\xfe\x9c\x7a\xd5\xbd\x83\x01\x39\x4c\xa2\x22\x0a\xe2\xe8\x57\x4a\xfe\xce\x7b\xc1\x08\x35\x20\x39\x58\x62\x49\x83\x1b\x18\x28\x5b\xaa\x86\x93\xf3\x3e\x68\x75\x65\x31\x19\x45\x44\x84\x4f\x3b\x0c\x87\x64\xb3\xee\xe6\x8d\x5b\x7b\xb0\xe1\xdb\x6e\x75\xbd\x66\x26\x5e\x77\xba\xfa\x15\x9a\x8c\xd4\x36\x91\x08\x85\x96\xb4\x41\x8f\xc7\x09\x2f\x7f\x9d\xd2\x43\xaa\x9e\x89\xac\x46\x66\x49\xdf\xbb\x5e\x37\x44\x68\x04\xac\x3d\xa7\xf7\x83\x35\x81\x9e\x12\x57\xbc\xbc\xad\x9e\x68\xcc\x70\x9a\xca\xb3\xba\x65\xaa\x65\xd9\xa4\x63\xcc\xa3\xcc\x76\xd7\xdb\x28\x66\x5a\x10\x9e\xb1\x33\xaa\x9c\x1d\x72\xf8\x02\x72\x64\xef\xd4\xa4\x6d\x6c\x94\xf9\x19\xf2\xbf\xfe\xe1\x6f\x85\x9c\x6a\x74\xb6\x7c\x1e\x24\x46\xaa\xd2\xe5\xbb\x20\xfe\x3f\x3b\x30\xc9\x17\x42\xcd\x0d\x2f\x24\x0e\xd4\xe1\x51\x1a\x10\xf9\x4d\x75\x94\xb2\xae\x2e\x6e\x9f\xe7\x65\xb6\xd5\x80\xdf\x3c\x43\xa2\xc1\x6a\xcf\x0a\xe7\xcd\x13\xad\xcb\x50\xee\xd3\x07\xe9\x9c\x05\xd0\x33\xd5\x76\x9f\x9e\xd1\xec\xb2\x23\xbd\x21\x7f\x88\x92\x69\x4c\xdf\x70\x84\x77\xc9\x90\x78\x33\x74\x4d\x62\x5a\x55\x47\xfc\xe0\x62\x02\xd5\x41\x4b\x09\xef\x92\x6e\x90\x05\x91\x4c\xe3\x14\x69\xd8\x16\x89\x0c\x39\x3f\x7b\x7b\x7b\x9c\x6a\x30\x90\x70\xbb\x20\x61\xd9\x99\x9b\x81\xf1\x6b\xdd\xb6\xaf\x3a\x21\xc3\x5a\x3e\x25\x07\x03\x1e\x58\x52\x25\x09\xaf\xec\x98\xb9\xc8\xf5\xd8\xc8\x9f\x3c\x67\x44\x23\x78\x8f\x56\xc3\x8e\x9e\x33\xa0\x72\x17\xdf\xa2\xe3\x16\x7f\xe1\x75\xe5\x9c\xa9\x8a\xaa\xa4\x80\x13\x76\x41\x79\x24\x16\x45\x47\xf2\x9e\x2e\x99\x44\x34\x0e\x2d\xd3\x03\xd1\x8a\xd1\x53\x8b\xe7\xe0\x0e\x5a\x8c\x87\x77\xcd\x22\x43\x99\x6c\x45\x7d\x90\x64\xe1\x3a\xc2\x72\xd8\x9b\x84\xed\x4b\xd6\x26\xbf\x05\x8b\x33\xf5\xf0\x8e\xac\x28\xea\x13\x72\x22\x13\x03\x9f\xdc\x8b\x81\xf7\x62\xe0\x5f\x5b\x0c\xd4\xef\xf3\xf8\xa2\xb9\xab\x17\x7a\x77\x73\x77\xcf\x40\xde\x48\x75\x63\xa9\xb1\x32\x9c\x13\x45\xa4\x16\x69\x85\xcc\x3e\xd1\x29\x52\xb8\x5c\x93\xb9\xec\xd3\xb8\xb8\x07\x9e\xa7\xf3\xb5\x64\xb0\x89\xc0\xc0\x27\x3f\x0e\xa9\xa9\x0d\xa1\x71\x06\x2a\xc1\x3d\x3d\xfb\x8a\x58\x39\x86\xd2\x15\x34\x06\x6f\x82\x24\x98\x52\xfd\x3a\x9f\xb1\x2c\x8e\x0a\x43\x15\x20\x5d\x78\x68\x70\xb4\xdf\xcf\x0d\x0c\x39\x15\x67\xf3\x1a\xfb\xf7\x90\x32\x0e\x13\x25\xa6\x7f\x4f\x4b\xfc\x1b\x05\x39\xf7\xb9\x50\x16\x89\x62\x4a\xc1\x4b\xa5\x67\x93\x32\x3d\xcd\xdb\x8e\x45\x65\x9b\x66\x7b\x40\x62\x0e\x22\x44\x1b\xa5\xb1\x26\x0c\x77\xa2\x28\x7c\x8e\x22\x0e\x65\xc7\x27\x7d\x19\xe6\x4c\xb0\x51\x29\x75\x6e\x8e\xb9\x33\x4e\x7d\x49\x21\x42\x73\x88\x6d\x57\x8d\xb3\x4f\xde\x30\x56\x1e\xd1\x5c\x84\x40\x07\x7c\x38\x5e\x28\x0d\xcf\x9e\x8d\xf1\x26\x07\x75\xf5\x76\x19\xc7\xda\x31\x46\x8f\x49\x91\xf4\x22\x82\x6b\x33\x1f\xee\xfe\x98\xf1\x87\xee\x2c\xec\x0e\x59\xfb\x5a\x71\x77\x1c\x4c\x36\x8a\xb6\x63\x07\x38\x51\xa1\x64\xcc\x83\x18\xa9\x09\x1f\xf3\xfe\xdd\x81\x88\x30\x51\x1d\x3b\x46\xa3\x4d\xb8\x7a\xe5\x84\x07\x48\x57\x27\x4e\x1b\x4d\x1c\xf4\x90\x41\xba\x58\x32\x88\x4e\x25\x79\xd0\x81\x6a\xa9\xc4\xc6\xba\x87\xbb\x96\x50\x90\xef\x71\xa3\xa7\xb4\x25\x43\x2a\xa7\x8b\x3d\x02\x21\xde\xab\x42\x48\x91\x67\xfa\x37\xa7\x6e\x28\x72\xca\xd8\x01\xfa\xac\xf1\xac\xef\x60\x9d\xf3\x7b\x15\x7b\x19\x63\xde\x45\x3c\x77\xc0\x5b\x7d\x56\x34\xdd\x11\x97\xe0\xde\x13\x23\xc5\x0c\xf5\x8c\x51\x68\x6f\x56\xe0\x6c\x06\x8e\x3d\xcf\xbc\x00\xaa\x2a\x6f\x6c\x12\x81\x0b\x5f\xc8\x22\xf9\x7e\x4a\xd2\xe1\x0a\x91\x8b\x02\xb9\x6e\x1b\x21\xa1\x59\x0c\x22\xec\x8e\x55\xec\x23\xb6\x97\xe4\x95\x9d\x2f\x0b\x79\x02\x80\xd1\x32\xc0\x80\x90\x67\x04\x18\x52\xc7\x14\xbf\x16\x44\xaa\x33\x40\xb3\x54\xa2\xcc\xa8\x72\xab\x8c\x55\x1c\x0e\xaa\xa4\x8b\x5c\x8e\x4f\x53\xda\x1a\xfd\x82\xd1\xc5\x32\xe4\xd0\x46\xcb\x28\x0e\x01\x61\x62\x50\x2c\xd3\xf1\x6f\x0b\x0c\xff\xe3\xd1\x8b\xa3\xf5\xf5\x75\x10\xef\xdb\x39\x59\x4e\xe3\xcb\xbe\x88\x22\xc6\x0e\x04\xcb\x9c\xed\x89\x85\x6a\x25\x41\x2e\x65\xd9\x6f\x69\x57\xa3\x6e\x48\x18\xe3\x80\x0c\xf5\xde\x7a\xcb\x88\xf4\x34\xfa\xe5\x84\x65\x9f\x6c\x9e\x9e\x32\xb1\x0b\x7f\x5e\x5d\x29\xbb\x4d\x1b\x94\xff\xd8\x82\x32\x6c\x2c\xbb\xfe\xab\x22\xab\x76\x80\x24\x88\x0b\x3b\xe8\x55\x88\x2a\xbb\x45\x55\x97\xea\xda\xe8\x94\x87\x40\x49\xfc\xcf\xb2\x88\xe3\xe7\x5b\xc8\xef\xfa\x34\xbc\x8a\x1f\x68\x62\x45\xb0\xf0\x85\x2a\x30\xce\xea\xd0\x96\x29\x51\xea\x8b\x29\x7d\x3f\x63\xc4\x62\x51\xe6\x75\x1e\xd3\x3c\xbb\x61\x0e\x2f\xda\xc1\xcc\x4c\x19\x45\x5a\x06\x34\xde\x70\x2a\x66\x77\x8d\x6a\xca\x87\x60\x5f\x43\x09\x52\x61\x59\x4d\x3d\x3d\xcb\x30\x57\x34\xa9\x77\xe7\x28\x39\xe4\x32\xa3\x70\x43\xfa\xfe\xdd\x81\xf2\xc0\xc4\x4d\x59\xc6\x41\xa2\x84\xcd\x28\x11\x4a\x17\xbf\xaf\xa7\xcc\xf5\xf5\xd8\xef\xf7\xaf\x71\x7c\x37\xdb\x97\x9e\xd6\x64\xca\xa2\x1e\x4e\x5a\xe7\xd3\xbe\xd4\xdd\xfc\x2a\x44\x28\x69\xc0\xf4\x49\x8f\x67\xad\x0c\xd1\xa2\x64\x89\x62\xe7\x8d\xb4\x81\x69\x7a\xfd\xf7\xed\xbd\xde\xe7\x5e\xef\xf3\xd7\xd6\xfb\x08\xa5\x4f\x38\xba\xc5\xcd\x9f\x4f\xef\xa3\xb4\x35\x58\xf1\xc3\x99\x93\xd2\xe8\xbc\x78\x6e\xf0\x11\x36\x0c\xd3\xe5\x87\xa3\xa9\x80\x91\x5a\xc9\xbb\x15\x81\xc2\xd6\xb4\xbc\x94\x77\x3c\x36\xfd\xe2\x82\x8b\x7c\x21\x96\x74\x65\xc9\x41\x1d\x56\x33\xda\x59\x04\x90\xa3\x76\xe9\xf8\x3a\x68\xe9\x9b\xf5\x2e\x5f\x1e\xb0\x68\xb1\x2c\xd4\xe3\xb5\x84\x9e\x0b\x6c\x76\xf4\x76\xc9\x84\x8e\x21\x69\x2b\x38\x2b\x8e\xc6\x90\xb4\xc3\xd1\x27\x5f\xae\x14\x13\x77\x54\x9f\x54\xa3\x53\xda\xac\x51\x05\xe7\x6d\xd4\x97\x2b\x1b\xdd\x76\x1b\x5d\x2c\x8b\x57\xf4\xa2\x7e\x98\xaf\xe8\x45\xd9\x18\xcd\xac\xea\x01\xd6\xb7\xc5\x81\xca\x86\xe6\x6f\xcb\x1a\x97\xd8\x8c\x4e\x34\x9c\x9c\x88\x9e\x46\x72\x4f\x0c\xbd\x27\xba\x05\xc0\xa7\x25\x3b\xd7\x8b\xe7\x7a\xd7\xe2\xb4\xd3\x1a\xee\xc0\x16\xf5\xf4\x7e\x8b\xba\xdf\xa2\xfe\xda\x5b\x94\xbe\x9a\xa0\xc5\xec\x46\xf7\x12\x02\xf8\x6e\x5f\x25\x96\x44\xff\xf7\x85\xff\xf7\x5d\x82\xf8\xef\x41\x6a\xb6\x4d\x06\x22\xcd\x91\x2d\xa0\x85\x48\x96\x60\xe3\xb2\xf6\xc6\x69\x32\x89\xa6\x12\x0c\x85\xc2\xc1\xd0\x32\xb2\x8a\x04\x3b\x17\xcf\xd6\x8c\x0b\x1a\x91\x28\x61\x7e\xe4\xa1\xc0\x2d\x64\x40\xa2\x04\x39\xcc\x3f\x5c\x26\x63\xbe\xc5\x60\xa8\x9c\xa7\x4a\x30\xc6\x8a\x33\x6a\x03\x89\x54\x55\x17\x77\x50\x84\x21\xa2\x51\x90\xc8\x6c\xee\xf5\xd0\xe9\x8f\x4c\x56\x42\x08\xf8\x4c\x6b\x72\x67\xa0\x74\xde\xe2\x8d\x20\x28\x01\x37\x4f\xbb\xe4\xc1\x03\x22\x7e\xf7\x41\x27\x78\x34\xe9\xb4\x37\x2f\xda\xdc\x75\xc9\x66\x97\x3c\x23\x2d\x5a\xcc\xd8\xee\x01\x81\x49\x9f\x5f\xbe\x0a\xf2\x59\x8b\x0c\xed\x64\xae\xd1\x6d\x69\x29\x01\xc5\x7f\xfa\x31\x4b\xe7\xcf\x7f\x83\x9e\xb6\x45\x97\x50\x58\xa1\xe7\x97\xd0\x30\xeb\xf4\x7e\x12\x1e\xb2\x72\x2a\x9a\x97\x17\x92\x8f\x43\xc1\xea\xf1\x2c\x93\x71\x4c\x7f\xa3\x01\x1c\xb3\xb6\x6a\xba\x8e\x61\x4a\x3b\x2d\xe7\x07\x8d\xf3\x20\x5d\x26\x8d\xae\x99\xee\x60\x1c\xde\xb6\x39\x09\xe1\xa1\x94\x80\xf1\x51\x39\x53\xf0\x1b\xf6\xff\x58\x35\x88\x26\xc3\x99\x04\x0c\x60\xf4\x59\x75\xef\x65\x31\xbb\xeb\x03\x42\xe3\xc3\xc1\x1d\x9d\x0d\x20\x00\x70\xf9\xd9\x80\xab\x3e\x38\x17\x8f\xa8\xb7\x47\x0b\xdc\x99\x45\x4d\x3f\x16\x37\xe8\x02\xba\xe3\xe6\xdc\x95\xfb\xbf\x20\xd8\x43\xf7\xe1\xf3\xfd\xb7\x56\x30\x32\xc1\x53\xb9\x56\x86\x3f\xa0\x15\xba\x99\xeb\xb5\x35\xde\xbb\x3e\xb7\x8c\x52\x6f\x69\x5e\x16\x33\xad\x0d\xea\x91\x36\x0e\xdd\xdc\xee\x89\x61\x4e\x69\x31\x2c\xd1\x79\x4a\x5f\xa5\x7d\x5c\x50\x8c\xa4\x27\xf4\x74\x46\xe1\xb3\x20\x36\xa2\x8c\xf5\xad\xc0\xd9\x67\x41\xec\x38\x23\x51\x69\xd7\x6b\x80\x9e\x95\x86\x22\xfc\xfc\xdd\x64\x30\xa2\xe8\x4d\x86\x23\x8a\x36\x1c\x50\x93\xb3\x28\xe3\x2e\x41\x0c\x96\x9b\xb5\x27\x27\x01\xe8\x9e\x9e\x24\x9b\x72\xf2\xd5\x11\x0a\x59\x73\x1a\x57\x78\x43\x72\xa2\x05\x2a\x7e\xbd\x27\xdc\x68\xfe\xa8\x6f\xf3\x6c\x08\x1c\xf9\x9c\xf3\x13\x05\x8c\x42\x47\x5a\xf7\x58\x43\x5c\x0d\xcf\x53\x3e\x6b\x14\x50\xc9\xb1\x39\xcd\x82\x29\xdd\x2f\x9a\x9c\x9c\x05\x68\x29\x8e\x7c\x10\xea\x50\x5b\x81\x25\xbe\xee\x38\xc7\x2e\x52\x38\x59\xae\x82\x16\xef\xc0\x84\x73\xc7\x9a\x31\x31\xa8\xd2\xe1\x58\x99\xbf\xfd\x7c\x7b\x07\x26\x57\x7d\x1d\x3d\x73\x76\x64\x0d\x4d\x89\x8c\xb7\x1b\x96\xaf\xb7\x3d\x67\x89\x6b\xfb\x57\xb6\x78\xc9\xf5\x6a\xf4\xcb\x9a\xa8\xa6\x5d\xd8\x7f\xeb\x31\x01\x60\x0e\x26\x94\x44\xf7\x35\x30\x81\x48\xf9\x16\x83\xee\xad\x95\x50\xf6\x7c\x11\xc5\xfc\xf0\x56\x4b\xde\x02\xb4\x82\xc6\x5d\x08\x89\x87\xcd\x72\xfa\xb3\xe5\xb5\x86\xf4\x68\x17\x73\xba\x55\x25\xb2\xba\x1d\xdc\xba\xe5\x44\x55\xcd\x8d\x9c\xc2\x17\x74\x1c\xcd\x83\xb8\x1c\x15\x5a\x0a\x6c\x88\x04\x5d\xa0\x84\x28\xff\xb8\x03\x36\x85\xa7\x9a\xc1\x96\xc7\x4a\x2e\x39\x82\x81\x7c\x5d\x39\xe8\xfa\x15\x84\x2a\xac\x66\x1e\x1f\x3d\x27\xd4\x95\xc6\xa4\x4a\x39\x83\x2b\x3b\xfc\xfe\x91\x38\xcd\x4d\xf0\xf4\x9e\x8e\x69\xb4\x68\x40\xe6\x6e\x99\x26\x04\xe0\x82\xde\x96\x02\x44\x8d\x8d\x07\xd8\x70\x15\xd7\x72\x31\xcf\xe0\x6c\xc0\x26\x14\xc0\xc5\xa2\x3b\x12\x10\x6b\x97\x37\x3b\x20\xbd\x0f\xce\x9b\x2f\x71\xb7\x80\x1f\x11\x95\x70\x4d\x38\x1b\xc3\x83\x47\x16\x72\x43\x4b\xd7\xf5\xb6\x51\x57\x6f\xde\x4f\x7b\xa6\x7c\x6b\xcc\x37\x8e\x68\x9a\xac\x30\x0e\x13\xba\x64\x1c\xa5\x40\x5f\x79\x1c\x0d\x3a\x5f\xde\xe3\x3b\x97\xb5\x4b\x08\x47\x18\x77\x55\x75\x14\x02\xff\x7b\x3b\x6a\xe5\xdc\xa4\xa3\x6c\x2f\xb8\xb3\x13\x81\x19\x21\xbd\x6a\x4c\x08\xd2\x3f\x34\x3f\xc0\x4d\x28\xc6\x18\xe1\xad\xb8\xd2\x98\xcb\xa7\x32\xae\x79\xdd\xb4\x71\xe8\xbe\x0c\x76\x5e\x32\x85\x66\x9d\xbe\xb1\x96\x76\xe4\xf5\xeb\xd7\x0d\xfb\x10\x97\x52\x90\xaa\x69\xa5\x96\x3f\xd0\x6c\x41\x6b\xb7\x27\x85\x01\x0e\x5d\x8d\x00\x07\xa6\xa2\x17\xf9\x72\x34\x8f\x8a\x9f\xd3\xac\x4e\x4a\xd2\x80\x25\x2b\xdd\x97\x5f\x7d\xf5\xdd\xa0\x55\x01\x55\xba\x15\x97\xb4\x67\x1d\x71\x9c\xeb\x6f\xad\xf8\xe9\xe1\x34\xa5\xe8\x30\x52\x0f\xd2\xd0\x04\x83\x25\x6c\xa4\x80\xec\x6f\x15\x12\x07\x30\xb7\xa4\x2d\x3e\xb8\x10\xfa\x28\x61\xe4\xa1\x82\x65\xe9\x4a\x30\x2d\x03\x10\xb2\x53\x59\xb6\xd5\xa8\x69\xd0\x8b\x18\x89\x4e\x74\xc5\x00\x94\x67\xee\x57\x66\xa1\xd2\x12\xa8\x79\x73\x45\x3b\x19\xaf\x5f\xbf\x76\x81\x39\xf5\xa3\x2a\x15\x61\x1a\x83\x66\x09\xf0\x2d\x2c\x1c\x78\x4c\x36\xa5\xec\xae\xf2\xd1\xac\xe9\x88\x91\xae\xd2\xba\x9a\xa6\xa3\x6a\xe1\x46\xc9\x28\xc8\xb1\xa2\x42\x74\x00\x18\xa5\x58\xb7\x02\x46\x81\x5c\x77\x7b\x2b\xb4\x31\x8f\x12\xd3\xba\xc5\x69\x41\x40\xdc\xb0\xfe\x59\x90\xcf\xb2\xa0\xa8\x1c\x43\x09\x4c\xa3\x9d\x61\xf5\x1e\xc9\xdb\xd9\x8a\x0e\xf9\x41\xea\xcf\x19\xe2\x3a\xd8\x3c\x5c\xac\xde\xc3\x69\x90\xbf\xcb\xa2\x71\x25\xce\x4a\x60\x6e\xac\x04\x5e\xbd\x97\x22\xec\x50\x5e\xd5\x4b\x05\x73\xc3\x36\x46\xe8\x0a\xad\xa2\x99\x72\xb0\xaf\x44\x43\x32\x26\xc3\x3f\xb8\xad\x4d\x55\xdf\x6c\x50\xd4\x22\x66\x21\xc6\xb5\x4b\x7f\xac\xed\x18\xd0\x95\xe6\x28\x32\x5e\x2c\x04\xe3\x22\xcd\xa4\xf8\x23\x2d\x1f\xc0\x8c\xb8\x47\x18\xac\x61\x4b\x2c\xa0\x7d\x8d\x4d\xa4\xa5\x83\xf3\x14\xb5\x87\x9e\xdb\x71\xa8\x83\x8c\x82\xa5\x12\x3c\x1e\x3b\xb0\xcc\xe9\x51\x9c\x22\x6e\x3d\xa1\xeb\x61\xa8\x90\xe1\xe6\xad\x1b\xbb\x9e\x34\xc7\xe8\xd3\x62\xd6\xe9\xf6\x5c\x92\x7d\x9d\x4e\x91\x6c\xdc\xac\x4b\xbe\x81\x6a\x43\x8c\x6a\x17\xfa\x12\xfb\x1d\x51\xa0\x3f\x8d\xd3\x51\x10\xf7\x19\x52\xfb\x81\x9b\x2c\x62\x9e\xf9\x9a\x8c\xc6\xc1\xff\xc7\xde\xbb\xef\xb7\x6d\x33\x8b\xa2\x7f\x27\x4f\x81\x66\x9f\xaf\x91\x63\xc6\x16\x75\xb3\xa3\xc4\x5d\xcb\x91\xed\xd8\x2b\x71\xec\x6d\x3b\x6d\xbf\xed\x9f\x9b\x1f\x25\x42\x16\x1b\x89\xd4\x22\x29\x5f\xda\x78\xbf\xcf\x79\x8e\xf3\x62\xe7\x87\xc1\x85\xb8\x52\x94\x2f\x69\xda\x65\x7f\x6b\x35\x22\x09\x0c\x06\xc0\x60\x30\x18\xcc\x65\xfa\xf1\xb6\xcd\x92\xca\x46\xa3\xf4\x65\x59\x93\x92\x55\x4a\xd1\xa0\xee\xfa\x20\xe7\x94\xe2\x15\x6a\xf6\xe9\x99\x17\x8d\x6a\x3b\x1f\x15\xf6\x7c\x9a\x61\xce\xb3\xae\xbf\xee\x3d\x33\x0c\x84\x98\x81\x7a\x61\x99\xf3\xac\xdb\x68\xc3\x0b\x3a\xa7\xcf\xba\x8d\x57\xf4\x51\xd0\xc2\xb3\x6e\x93\x56\x89\xfa\x41\xfc\xac\xdb\x6c\x7a\xaa\xf9\x20\x3c\xb2\x41\x7a\xd6\x6d\xb5\xe0\x99\x9b\x11\x3d\xeb\xb6\x28\x78\xc6\xd9\x9f\x75\x5b\x14\x2d\x7e\xd9\xfb\xac\xdb\x22\x0d\x72\x23\xa0\x67\xdd\x56\xf3\xe6\xcc\x6b\xbe\x7a\xb4\x47\x7c\xb4\x47\xfc\x67\xdb\x23\xba\x8c\x11\xef\x6c\x33\x5f\xdd\x4c\xb0\x82\x0d\x20\x94\xfb\x88\xf3\x87\x34\xb1\x87\xb7\xf3\x4d\x56\x0a\xe3\xfa\xdb\xd8\xac\x54\x30\xa9\x5f\x5d\x5d\x2d\x62\xd2\xd8\xe2\xdc\xb0\x84\x8d\x84\xc5\x03\x38\x9c\x8f\x50\x30\x8d\x24\xdc\x1f\xe8\x40\x62\x26\xa3\xd7\x04\x1e\x35\x63\xfd\x6d\x85\x2b\x8c\x53\x5d\x37\x6e\xb4\xe2\x2a\xb4\x80\xc0\x27\x8b\x5f\xc6\xa6\xf6\x11\xe7\x96\x4d\x4d\xdd\xbc\xe4\xdd\xe5\xe6\xcc\x6b\xd5\x1f\x77\x8b\xc7\xdd\xe2\x9f\xbd\x5b\x7c\xa7\xd6\xeb\xf7\x67\x68\x5e\xd1\x0e\xbe\x30\xe5\x3c\xc4\x69\x96\xc4\xc1\xf8\xd1\x9e\xf3\xa1\xed\x39\x6f\xaa\x59\xf8\xc5\xf8\xb2\x30\x1b\x2c\xd3\x0f\x17\x05\x4d\x15\xf1\x94\xcd\xea\x67\x6b\xa1\x3b\xdc\x50\x46\x13\xb2\x11\x1c\x05\x97\xef\xf1\xbc\x9b\x0b\xb9\xe8\x73\xef\xe9\x93\x27\x3a\x6e\x46\x81\x12\xcf\xb4\xea\x37\x71\x66\x3b\xe2\x83\x64\xb8\xf7\xe4\x49\xc5\xbb\xe9\xca\x57\x70\x78\x70\x84\x07\xc9\x05\x0d\x0e\x55\x76\x67\xc5\xcb\x59\x71\x55\xbf\x96\x0c\xc8\x2c\x1e\x27\x83\x2f\xd5\x28\x45\x29\x5b\x42\x2c\xae\x72\x55\x0c\x1e\xab\x8d\x9b\x73\xf4\xee\xf9\xe6\xbb\x98\xfb\xb9\xd7\xdf\x8b\x5c\x73\xda\xae\x8d\x9d\x5d\xaa\x3e\x3f\xd5\x66\xa7\x7c\x6e\x16\xb9\xcb\xd4\xe7\x46\x43\xde\x26\x59\xb3\x86\xa5\x46\xa4\xc5\x5b\xbc\x55\x28\x48\xba\x3d\xe1\x54\xed\xba\xed\x70\x5e\x8a\x48\xe0\x64\x79\xf7\xe1\xce\x07\x9b\x73\xd4\xc2\xc5\x74\xc8\x85\x1d\x62\xb9\x29\x97\xf3\xed\xb6\x10\xce\x2d\x2a\x22\x4d\x2b\xa4\xcb\xe9\xfe\xa3\x9c\xfe\x28\xa7\xff\xb3\xe5\x74\x26\xa4\x67\x23\x87\x56\x67\x8e\xf8\x8d\x53\x3c\x9b\x10\xd0\x3f\xcd\x51\x02\x0d\x92\x14\xaf\x44\x89\x2a\xa7\xaf\x55\x0e\x9c\x50\xd1\xd1\x72\x9e\xbf\x26\x14\x3a\x1e\x8d\x1e\x5c\x3b\xf4\xfd\xc8\xe3\x84\x3b\x1e\x8f\x94\xdb\x0d\x7c\xc9\x82\x4e\xef\x7c\x8b\x0b\x9d\x6c\x34\xff\x42\x27\x1b\xc1\x85\x0e\x15\x5c\x16\xb9\xb7\x29\x93\xf3\xdd\x9b\x93\x21\x1e\x48\x5b\xd3\x85\xf5\xa6\x8e\x89\x08\xd9\x68\xf4\xd9\x5e\x40\x35\x0a\x41\x16\x5d\x56\x59\xa3\x51\x3c\x4c\xdc\x2d\x5a\xbe\xde\xad\xb9\x0c\xe7\xfb\xc1\x15\x23\x82\xe3\xe8\x0f\xfd\x72\x58\x6a\x7b\x5e\x51\xd5\xea\xe7\x36\x88\x44\xf1\x61\xf2\x4b\x39\x02\xb6\x22\x77\x6b\x78\x12\xa4\x5f\x4e\xd2\x59\x96\xe3\xf0\x10\x1b\x97\xc1\x52\xf3\xe5\x05\xef\x86\x44\x8c\x89\x4c\x77\x18\x44\x25\xed\x3b\xcb\xdc\x8d\x02\x82\x30\x3c\x4c\xa3\x8b\x20\xc7\xf4\x48\xe8\x68\xbd\xac\xd8\xdd\xfa\x4e\x93\x7e\xcd\xed\x7e\x59\xb1\xbb\x21\x30\x0a\xb2\xb9\xad\x3b\xcb\xdc\xad\xe9\x73\x9c\xd3\x0d\xbd\x74\xec\x4b\x4a\xdd\xbd\xf9\x0a\x73\x5f\x56\xec\xce\x74\x7f\x7c\x3d\x29\x6d\xdc\x55\xe4\xce\x54\x3f\xaf\x61\x57\x91\xbb\x0e\x39\x91\xe3\x72\x4c\x41\xef\xa4\xc9\xe4\x30\xc8\xb2\xcb\x24\x0d\xcb\xc6\xbf\x62\x9d\x3b\xaf\x83\x79\x63\xe2\x2a\x72\x67\x32\x9c\xd7\xb0\xab\xc8\x7d\xb0\x9e\x79\x6d\x97\x94\xb2\x37\x2f\x1e\x56\x57\x51\x36\xeb\xc3\xcd\x1b\x4d\x69\x3c\x8b\x8b\xe7\x49\x94\x65\x51\x7c\xfe\xb4\x32\xb6\xd3\x24\xd3\xaf\xae\x24\x2c\x2d\x5f\x2d\x7a\x0a\x54\xae\x77\x44\xf3\x6f\xb9\x8e\x47\x23\x29\x81\x98\x66\x7b\xa1\x9c\xa2\x35\xcb\x88\x56\xe3\xf1\x0c\xfd\x78\x86\xfe\x67\x9f\xa1\x8b\xbb\xae\xfe\x1f\x7f\x68\x77\x5d\x9b\x63\x7c\x85\xde\xe2\x14\x9f\x67\x7f\x04\xd9\x1f\x11\x7a\x13\x8c\xf1\xd5\x7f\xa6\xf9\x30\x5b\x19\xcd\xd4\xe3\x70\x87\x45\x33\x3d\xc2\x43\x9c\xe2\x78\x80\xbb\x88\xb4\x9f\x75\x57\x57\xcf\xa3\x7c\x34\xeb\xaf\x0c\x92\xc9\x2a\x3f\x75\xaf\x9e\x27\x2f\xc5\xef\xfe\x38\xe9\xaf\x66\x97\x41\x3a\x59\x8d\xe2\x1c\xa7\x71\x30\x5e\x25\x5d\xc2\x57\x39\xff\x77\xe5\x3c\xf9\x5f\x1f\x9a\xcd\x07\xbe\x1a\x2b\xee\xbb\x8e\x09\x36\xff\xf0\xc3\x35\xfc\xf8\x5b\x5c\x76\x51\xcb\x57\x9c\x5f\x26\xe9\x97\x23\x0c\xa1\x6a\xcb\x14\xe5\x7a\x71\x53\x5b\xde\xff\xe3\x8f\xcf\x25\xa5\xee\xe2\x9b\x77\x1d\x0f\xb6\xe3\xa0\x3f\xc6\xf3\xb0\x94\x4a\xda\x11\xb4\x17\xb8\x0b\x6e\x97\xc1\xb4\x22\x6e\x45\x49\x07\x6e\xd6\x02\x77\xc0\x2d\x4c\x2e\x63\x16\x85\xb8\x0c\x31\x5e\xcc\x8e\x95\xe5\x6b\x75\x77\x53\x07\x62\xb3\x69\x05\xb4\x68\x21\x3b\x52\xc6\xb7\x3b\xa3\x94\xe2\x3c\x8d\xf0\xc5\xbc\x08\x10\xbc\x98\x1d\x2d\xcb\xd7\xbb\x90\x56\x4e\x76\xbb\x39\x44\x45\xca\x38\xc8\x49\xfb\x74\xe7\x21\x3a\xc7\x15\xdc\x99\xed\xb8\xa8\x1f\xee\x30\x26\x34\x7b\xc3\x9c\x18\xa9\x76\x1c\xd4\x0f\x77\x1e\x0d\x96\xb0\xa5\x1c\x19\x5a\xc8\x8e\x8f\xf1\x8d\xa3\xd4\xaa\x84\x52\xc9\xad\xae\xa1\xe2\xd4\xd9\xb2\x74\xfb\x57\xf0\x43\xe9\x65\xc1\x88\x8a\x97\x9c\x0f\x48\x37\x8e\x53\xf5\x99\x53\xbf\x04\x88\x90\x60\xf1\x78\x8e\xa5\x8b\xc9\xe9\x4c\x7a\x90\x64\xf1\x07\xbd\x66\x1c\x45\x17\x4e\xdf\x18\x32\x27\xf0\xdd\x79\x86\x2c\x87\x6d\x51\xca\x2a\xb0\xe1\xbb\xe3\x78\x65\x39\x5f\x11\x61\xc9\x16\xec\xd6\x7a\x2f\xd9\x7c\x3c\x53\x3d\x9e\xa9\xfe\xd9\x67\x2a\x76\xa0\xe2\x17\x44\xdf\x36\x4a\xfb\x6d\x0c\xab\xb9\x77\x54\x30\x8d\xb8\x30\x4e\x53\xfc\xe5\xa3\x32\x0b\x34\x7a\x5d\x56\x1a\x95\x92\x97\xce\xaf\xa7\x44\x3e\x60\x11\x28\x5f\x3f\x95\x18\x78\x94\x0f\x46\x35\xf2\x5d\xcf\x2d\x32\x08\x32\x8c\x9e\x13\x8a\xcf\xf2\xe7\x5d\xe5\x13\x4c\x56\x7a\x9e\xad\x64\xa3\x68\x98\xd7\xb4\x84\x22\xc8\x48\x0e\x58\x37\x0b\x30\x96\x0c\xee\x6b\x31\xbe\xa4\x11\xa8\xe8\x85\xec\x6b\x0b\x1a\x53\x1c\x87\x51\x7c\xfe\xe0\x78\x1c\xd2\x76\x64\x1b\x22\x1b\x52\x2c\x74\xa0\x89\x8d\x06\xce\xa8\x4c\x13\xac\xdc\x48\xd2\x81\x28\x35\xdf\x92\x90\x41\xd3\x65\x04\x85\x14\x2c\xb2\x93\x45\xaa\x8e\xa3\x38\xcb\x83\xf1\xb8\x52\xcb\x5a\x69\xbb\xaf\xbb\xbb\x50\x09\x1e\xe7\x38\xff\x90\x9c\x57\x08\x28\x46\x4a\x39\x7d\xec\x69\x8b\x5a\x91\x92\x56\xa7\xc9\xdc\x38\x1c\xa4\xc8\x9c\xf6\x7a\xa3\x20\x3e\xc7\x15\x9a\xb4\x09\x1f\x14\x84\x6c\x92\xa5\x8c\x9e\x22\x08\x91\x8e\x49\x8d\x24\xe3\xb1\x2c\x0f\x2c\xcc\x6f\xb2\xd1\x68\x05\x58\xa3\xc1\x6e\xb2\x91\xc9\x6e\xdc\xe2\xd3\x9c\x5b\x1a\x83\x0c\x90\x71\x4b\xa3\x58\x12\xdc\xab\x9a\xde\x4d\x8c\xc8\xa6\xa9\x7f\x38\x44\x4c\xd2\x45\xc6\x35\x05\x6d\x96\xe1\xa0\x17\xbd\x5b\xf3\x1a\x19\xdf\x43\xdb\x32\xe9\x19\x92\x28\xc5\x01\xe7\xa3\x2e\xf9\x0f\x05\x96\x8d\x46\x5d\xf2\x1f\x2a\xbc\xda\x12\x32\xb4\x5a\x8f\x22\xe9\xa3\x48\xfa\x0f\x17\x49\x0b\x3d\x3f\xf7\xb1\xbe\xa7\x6c\xd1\xd4\x3f\xfc\x08\x9f\x93\x79\x0e\xd2\xcd\x7e\xe4\xc8\x4b\x90\xad\xbe\x53\x8b\x42\xa2\x7f\xae\x9e\x8f\x06\xc1\x54\x06\xe2\x82\xb1\xd7\xdb\x3c\x34\x21\x48\x98\x30\x47\x74\x66\xbd\x8c\x36\xd0\xf3\xfa\xd5\xa0\x13\xbe\x0a\x1b\x83\xb0\xd5\x7a\x15\xac\xb5\x5b\x83\xd6\xab\x56\xa3\xd3\xc2\xfe\x7a\xfd\xd5\xa0\x5d\xc7\xcd\x56\xd8\x69\xb5\x3b\x8d\xfe\xf3\x02\x17\x1b\x98\xc0\x0f\x7c\xdf\xef\x0f\xea\x6b\xad\xc1\xab\xc1\x30\x58\x5b\xf7\x87\xf5\x41\x73\x1d\x77\x9a\xfd\xb0\xed\x0f\x5e\xf9\xfd\xf5\x60\x58\xaf\x3f\x77\xf3\x26\x8a\x63\x57\x92\x74\x83\x7e\xd4\xb5\x0c\x62\xc1\x09\x99\x1b\x7c\xd7\xda\x3f\xba\xd3\xd3\xc2\x04\x6d\x03\xb2\x3e\xae\x16\xb8\x66\x77\x29\x54\x85\x63\x96\xcf\xe2\xb3\xae\xef\x3d\x9b\x33\x4f\xcf\xba\x0d\xc2\x6c\xdb\x8f\xcc\xf6\x91\xd9\xfe\xb3\x99\x6d\xc1\x6b\xb9\xf2\x4b\x63\xb6\x65\x86\xc9\xc3\x34\xf9\x03\x4f\x82\x78\x25\xc4\x3f\x7d\xab\x74\xfe\xfa\x05\xe9\x5d\xd2\xf5\x53\x45\xa2\xf4\x9d\xbe\x50\x32\x12\x68\x25\x32\xbd\xc4\x6d\x72\xef\x2f\x9e\xe1\xbf\x24\x5b\x3f\x1f\x8b\x87\xcf\xd7\x5f\x35\xc7\xf7\x9d\x53\x7c\x5b\xba\x54\x92\xe4\xdb\x16\xba\x45\x1b\xe1\xff\xb0\xbd\xa5\x75\x21\xdd\xf6\x77\x92\x5e\xdb\xd9\xef\x7b\x4a\xb0\xfd\xc3\x06\x25\x1c\xed\x15\xd9\x50\x86\x51\x8c\xc3\xbb\x64\xe0\xe6\x99\x63\xf3\x04\xb1\xb4\xd5\x45\x3a\x6b\x48\xc7\x2d\x0e\xdb\x22\x21\xeb\x0a\xda\x27\x1b\x5b\x84\x33\x46\x49\x30\x4c\xda\x58\x6a\x39\xb0\xe7\x66\xd3\xe7\xe3\xba\xc3\x46\xea\xeb\xc7\xd9\x78\x7c\x23\x19\xbb\x47\x43\x84\xaf\xa2\x0c\x8a\x5b\x87\x5c\x6b\xb1\x34\xa7\x3c\xcf\x60\xc3\x5b\xa3\x39\x6c\xe4\xbc\xfb\x2f\x91\x7f\xb6\xe4\x48\x33\xbf\x32\x4d\xa6\xb5\x25\xc8\x5b\xcd\xee\xbd\x08\xff\x87\xf5\x04\xa3\xf5\x83\x2c\xdc\xa8\xc3\x4d\xed\x1b\x0a\xcc\xf2\xc4\x4e\x8a\xaa\x1d\x84\x8b\x18\xd9\x2b\xde\x0b\x27\x35\x56\x4f\xdf\x0d\x75\x84\x28\x89\x78\x82\x92\x22\x9d\xf7\x3b\x9c\xd7\xa4\xd3\x39\x8e\x67\x13\x9c\x06\xfd\x31\xee\xa2\x3c\x9d\x61\x53\xf5\x17\x4c\x70\x56\x9a\xaa\x5b\xca\xe7\x0d\x85\x41\x79\x8b\xa4\x1c\xde\xd9\x9c\x24\xde\x99\x96\xc5\x3b\x73\xa4\xf1\xd6\x8b\xbc\x56\x34\x62\xa2\x79\x9f\x67\xef\xa7\x9d\xb0\x67\x77\x49\xfa\xbf\x7b\x50\xde\xa3\x43\xc6\xfa\x42\xe0\x07\xd9\x75\x3c\x78\x07\xfb\x0d\x11\x79\xa1\x0b\x4b\x67\x4a\x4e\xf4\x4d\x56\xa4\x26\xb9\x69\x68\xd5\x94\x49\x02\x10\x2a\xcb\x80\xdb\x65\xb4\x0c\x38\xac\x0c\x46\x41\xba\x99\xd7\xea\x4b\x2b\x79\xf2\x69\x3a\xc5\x69\x2f\xc8\x70\x6d\x89\x7f\x86\xac\xca\x35\x7f\xc9\xb9\xf1\xf0\x99\x75\x67\x1e\x2d\x36\xee\x22\x8d\x2a\x8f\x88\xc6\x6b\x9c\x93\x0e\x99\x2b\x46\x08\x28\x4a\x82\x6d\xf1\xd6\x96\x62\x5b\x55\xf4\xf0\xcc\xf6\xa2\x0a\xdd\xee\x25\x8d\x4d\x91\x8a\xbb\xac\x83\x7c\xd4\x17\xeb\x65\x71\xd7\xef\x0e\x02\x86\x0a\x33\x27\x6b\x87\x68\xda\xf3\x05\x7b\x55\x31\x3d\xba\x9a\x12\xdd\x3e\xd8\x66\x52\xf4\x1b\x35\x7b\xf9\x39\xce\x17\x4c\x5e\x7e\x8e\x5d\xdb\xc9\xf7\x9d\xbb\xdc\x42\x1c\xd5\xb3\x97\xeb\x66\x73\x5d\x59\x1e\x35\x95\xe4\xa7\x67\xaa\x7a\x9d\x4c\x13\xab\xa2\x6d\x56\x15\x13\xa1\xcb\x53\xf6\x50\xe9\xd0\xf9\x00\x49\x07\x73\x2d\x66\x0f\x39\x62\x77\x1e\x8f\xd8\x8f\x47\xec\x7f\xf6\x11\x5b\xd2\x67\x32\x0e\x31\x61\x2c\x5d\x3d\x69\xff\x17\x1e\x0e\x53\x7c\x8d\x7e\x89\xc6\x83\x2f\x18\xbd\xf9\x1d\x0f\x87\xae\x68\x3d\x0b\x85\xf6\xd9\x0f\x52\x72\x84\x3f\x08\xe2\x01\x0e\xa0\xac\x2d\xa8\xcf\x2d\xe2\x00\xb1\x2a\xef\x82\x0b\xf4\x4b\x92\x84\xe8\xcd\xb9\xf3\x90\xdf\x2a\x0e\xf9\xff\xc5\xb8\xa9\xe2\x3c\xcc\x58\x6c\x59\x4a\x5b\x4b\xa0\x3a\x3d\x0b\xad\x2d\x05\x2d\x4e\xd3\x44\x0b\x1e\xb4\x4a\xdf\x51\x1b\x04\xba\xed\xec\xe5\xcf\x33\xb2\x31\x4e\x93\x38\x8b\xfa\x63\x4a\x60\xd3\x00\x9c\x48\xd0\x84\xdd\xf9\x90\xbd\x68\x9a\x26\x17\x51\x88\xd3\x4c\xd4\x0a\xc6\x59\x62\x56\x4d\xc6\x63\x52\x95\x50\x1b\xb7\x1e\x47\x71\x12\xd2\xaf\x51\x3c\x48\x26\x32\x64\x02\x8c\xe5\x14\xa0\x57\xae\x79\x34\xc1\x64\xb1\x45\x19\xf2\x51\x86\x07\x49\x1c\xc2\xee\x18\xc5\xe7\x63\x9c\x27\x31\x0c\x27\xe9\x5e\xc9\x41\x9f\xa3\xaa\x1c\xf7\xf9\x4b\xb4\x21\xba\x22\xe9\x19\x48\xdb\xa0\x01\xbe\x91\x5e\x72\x5c\x64\xad\x83\xf3\xf0\x47\x24\x94\x51\x9a\xc4\xc9\x2c\x1b\x5f\x43\x18\x0c\xc7\x3e\x4c\x3e\x59\xce\x23\x28\x0c\xf2\xc0\x79\x42\x56\x7b\xab\xa8\x3c\xe2\x50\xe9\x3c\x01\x23\x9f\xd4\x7e\x50\x7a\xaf\x24\x37\x4c\xe2\x2c\x21\x5b\x17\x21\x8a\x1a\x25\x8d\x95\xbd\xf8\x22\x18\x47\xe1\x21\x2b\x5f\x93\x65\x1e\xee\x85\x0d\x83\x21\x49\xf8\xea\x1e\xcf\xc8\x7c\x25\x4f\x0e\xe9\x3b\x40\x69\x85\xf6\xde\x83\x6e\x32\x63\x0b\xe9\xfc\xc2\x4e\xe5\x1b\xea\x5c\x51\x61\x96\x81\xe6\x57\xe5\xd0\x29\xde\x48\x94\xfd\x4c\xd0\x3d\xa2\x54\x88\x85\xa0\x26\x75\x33\x1f\xa5\xc9\x25\x52\xbb\xa7\x97\x57\xba\xc3\xba\x49\x3f\xad\x54\x3a\xf9\x07\x0b\xcd\x3e\x48\xb3\xa5\x24\xa0\x9f\x4b\x85\xf4\x33\x9f\x18\x00\xb8\x41\x11\x52\xf0\xdc\x4a\xb4\xc1\x53\x67\x4a\xb2\x71\x19\x75\xdc\x0f\x21\x98\x73\x4f\xe5\x7e\x06\xb2\x82\x3c\x4f\x3a\x85\xd3\x54\x17\xf1\x2d\xbd\x59\xd2\xcd\x6d\xc8\x9f\x02\x67\x11\x1a\x9b\x3f\x64\x46\x6d\xb9\x7d\x43\xc8\x65\xd9\x5e\x15\x12\xd4\x83\x73\xba\x8f\x0d\x36\x6a\x2c\x3b\x19\x90\x02\x6f\xc9\x77\x8b\x92\x89\xd6\xbb\x0f\xc2\x84\x16\xbe\x33\xc2\x04\x9c\x64\xea\xe4\x4c\xe6\x76\xa4\x98\xdd\x03\x2d\xaa\x34\xc8\xf5\x6c\x30\x1b\x35\xde\xca\x9d\x48\x2f\x9b\x47\x7b\x4a\x87\x04\xd1\xa1\x39\xdb\x1f\xce\xc5\xbe\x4a\xa4\x4d\x7e\x26\x64\x22\x9f\x41\x71\x39\x9f\x2a\xbb\x6a\xae\x94\x96\x44\x5d\x75\xd7\x77\x6e\xf7\xf3\x76\xee\x9c\x1c\xa9\x98\xe0\xa2\x23\x4a\xbe\x1d\x8a\x4f\x73\x39\x36\x8d\xfd\x7f\x03\xd0\xf6\xc2\xb9\x4b\xc6\xf2\x55\x98\x25\x71\x4c\xf2\x24\x4c\xd0\x60\x8c\x83\x78\x36\x45\x31\xc0\x27\x03\x2c\x8e\xed\x65\x43\x25\x61\x6f\x59\x79\x14\x49\x39\x20\x8a\x68\x5c\x1d\x4b\x22\x1c\x9d\xd2\xd2\x67\x44\x48\x22\xd5\xbb\x88\x02\x89\xc2\xae\x01\xa8\x6b\x03\xd9\x2d\x7e\xde\xf0\x9c\xd8\xab\xab\xfa\xe8\x2b\x0c\x80\x09\x60\xea\x6e\xce\x10\xaa\x89\x15\x3e\x67\x72\x93\xa9\x10\x4a\x89\x08\xca\xcc\x68\xe1\x74\x73\x1e\x91\x23\x5d\xa4\xeb\x8e\x49\x1d\xcb\x9c\x1b\x73\x5b\x3a\xf2\x02\x84\x4a\xa4\x50\x97\x77\x88\x5a\x96\x59\x06\xf9\xb5\x34\x3c\x05\xfe\x6c\x74\x6a\x4c\xa3\xfa\x05\x5f\x67\xb5\xa2\xee\x12\xd7\xf2\x42\xb2\x78\xf4\xe3\x8f\xc8\x35\x86\x84\x98\xd2\x13\xfa\xbe\xa6\x14\x7a\xad\x8e\xb3\x2e\x00\x97\x8c\x77\xb1\xfb\xa4\x98\xf0\x02\x22\xff\xf3\x61\x9f\xe0\xc1\x28\x88\xa3\x6c\xc2\x8f\xa1\xe5\xcc\x01\x00\x94\x0f\x2f\x6d\x43\x1e\xd8\x2f\x18\x4f\x45\xfe\x00\xde\xd9\xd5\x17\xbf\x67\xa3\x28\x26\x0d\x5d\x0d\x92\xc9\x74\x8c\xaf\xa2\xfc\xba\xdb\x86\x23\x19\x29\x40\x08\xa2\x46\x36\x87\x2f\xf8\x9a\x6a\x0a\xc4\x68\x4a\xe3\xb5\xba\x8a\x52\x3c\x49\x2e\x30\x0a\xc6\x63\xe8\x55\xe6\x21\x7c\x35\xc0\xd3\x1c\xc4\x7e\xf6\x4a\x2e\x9f\x8f\xf0\x35\x8a\x31\x1d\x91\x3e\x66\xf5\x43\xd2\xe3\x59\x30\x1e\x5f\xa3\xfe\x35\x0c\x19\x19\x1e\x96\x0a\x00\x68\xe6\x17\xb2\x21\x45\xf1\x79\x6d\x49\xda\x07\x6a\x3f\x28\xbd\x43\x5f\xbf\x12\x7c\x57\xa2\x38\xc4\x57\x07\xc3\x1a\xb8\x29\x12\x62\xfb\xfc\x7c\x09\x26\xff\xa5\xaf\x6f\x10\x12\x85\x7d\xc1\xd7\x67\x2b\x62\x25\xea\xe6\xd0\x26\x45\x92\xf2\x86\x69\xf2\xdf\x98\x3c\xe1\x94\x49\xe6\x7d\x40\x6d\x73\x51\x12\x57\xe1\x09\xd4\xa4\xb6\x8c\x26\x99\xc5\xb0\xa9\x02\x75\x50\x21\xea\x10\x70\x96\xce\x64\x38\x57\x7a\x4f\x00\x4b\xaa\x48\x0f\x0d\x56\xb6\x4f\x76\x3f\x1f\x1e\x7c\xf8\xb0\xf7\xf1\xdd\xe7\x93\xbd\xfd\xed\x83\x4f\x27\xf2\xf1\xa8\xca\x0c\x98\x42\x95\x22\x31\x3d\xc8\xd1\xd1\x94\xc9\x08\x5e\x5b\x41\x1e\xa0\x0d\x74\x7a\xf6\x5a\x7d\xbf\x07\xee\xc6\xfc\x75\xb5\xa5\x2a\x00\xae\x4c\x67\xd9\xa8\xa6\xd3\x3d\x13\xf1\x94\xd2\x7b\x61\x46\x0b\x7f\xc1\xd7\x4b\xc6\x18\x14\x00\x17\x18\xbc\x4a\xe2\xa6\x80\xcc\x1a\xe5\x4b\x6a\x12\x4c\x15\x26\x19\x01\xd9\x02\x43\x01\x12\x23\xa4\xa9\x0e\xd3\x7e\x30\x95\x54\x17\x92\x5e\x5b\xf5\x14\xa7\x82\x2b\x70\x8d\xa5\x3f\xf5\x31\xd8\x0f\xa6\xa7\x50\x2d\x82\x2d\x9e\x8f\xcc\x29\x14\x3f\x93\x3c\xd2\x45\xe3\x8a\xdf\x3c\x5a\x58\x66\x4e\x54\xa9\x59\x09\x6f\x72\x72\xb0\x75\xd0\xe5\x44\x86\xc6\xc9\xf9\x7f\xe8\x52\x75\xe2\x90\xab\xef\x2a\x49\x57\x50\x16\x64\xd6\xa3\x23\xfb\xb6\x32\x09\xa6\x35\x97\xb1\x02\xff\x03\xfb\xc5\xb0\x18\x65\x32\xf6\xec\xa8\x17\x85\xb2\xe3\x8d\xa0\x88\x2f\x18\x65\xb3\x14\xf4\xc4\x9c\x59\x45\x19\xca\xf2\x88\xd0\x03\xe5\xe4\x38\x44\xc1\x10\x1c\x84\xd2\x34\xba\x08\xc6\xda\x5e\xab\xc0\x24\x03\x02\x6e\xff\x74\x69\x44\xe1\x99\x8e\x62\xd1\xa5\x95\x41\x61\x0f\xa0\xd6\x11\x5f\x9c\x0e\x33\x5c\x77\x22\x7f\xba\x41\x78\xcc\xf4\xcc\x96\x1a\xc3\x60\x9c\x61\xf9\x96\x8d\xb9\x3d\xcd\x1d\x53\x91\xce\x9f\xb5\x89\x6e\x01\x83\xcc\x0b\xcc\xb8\xb4\x68\x1d\x87\xff\xd7\xc6\x78\xfe\x00\x35\x2b\x8c\x63\x75\xc5\x00\x52\x28\x4c\xea\x25\x54\x54\x47\x49\x5b\xec\xee\x61\x52\x71\x71\xeb\x19\x90\x7c\xc9\xe9\xca\xb8\x74\xa4\x07\xd5\x50\x6f\xbc\xb4\xd4\x4b\x66\xea\x0a\xa6\x90\x7e\xd6\x6d\x40\x68\x1f\xa6\x0c\x7f\xd6\x6d\x82\x1b\xea\x5a\x95\x3b\x32\x16\x73\x13\xe7\x79\x14\x9f\xdb\x3d\x7b\x81\x31\x85\x52\xe6\x5a\xb4\x21\x7c\xd6\x5e\x1b\x25\x8a\x48\xcf\xc2\x3e\xc8\x15\xb4\x88\x35\xca\xfa\x4d\x50\x5e\x7f\xbc\xd6\x7b\xbc\xd6\xfb\x87\x5f\xeb\xb1\x88\xbe\xec\xd4\x72\x9b\xa8\xbe\xf3\xcc\x61\x1d\xb9\x2f\xb4\xd4\x17\x8b\x18\xce\xf2\x25\xed\xb3\xc3\xc1\x66\x18\x66\x30\x74\x62\x77\x0b\x62\x50\x4b\x65\x68\x46\xc5\x2f\xe6\xf4\xe6\x11\xe1\x2b\xca\x21\x52\x1e\x82\xa4\x00\x74\x53\xa5\xbb\xfd\xd3\xa7\xf2\xf9\x80\x9d\xcf\x9e\xea\x4a\x22\xb2\x6d\x3e\x65\xd7\x56\x52\x39\x89\x57\xd1\x38\x3d\xdc\x95\x8e\x94\x4b\x62\xe6\x70\xa5\x70\x34\x26\x37\x91\xb1\xb7\xa8\x1a\x5d\x42\x11\xdd\xb7\x79\x4f\x33\xcb\x66\x61\xb3\xc7\xe1\x7f\xea\xbe\xa5\x6f\x4f\x2e\xdd\xa5\xb0\x10\xe4\x81\x88\x00\xe5\x1f\x7f\x04\xdc\xa9\x62\x2a\x8a\xcf\x81\x1b\x2f\x29\x10\xf9\xf5\xc5\xbc\x94\xa6\x14\xa2\xec\xa5\x7c\xdb\x4e\x0a\x69\x68\x1c\x64\xd0\xcc\x71\x4e\x26\xfb\x87\x8d\x0d\x63\xa0\xf9\x9f\xf1\x62\x75\x95\x66\x6e\x57\x48\x0a\x96\x5a\x9e\xce\x88\xcc\x96\x66\x39\xca\x12\x6a\xe7\x38\x9d\x02\xeb\x86\xb3\x73\x10\x5f\xe7\xe4\xc0\xef\xa1\x3e\x1e\x12\x06\x40\x97\x38\xbf\x42\x85\xd1\xa0\x4a\x46\xed\x2f\x1a\xd6\x7e\xb0\x60\xfd\xe3\x8f\xc8\x36\xf2\x4b\x46\x7d\x64\x5e\x37\x10\x54\x2d\xee\xd1\xce\xce\x26\x94\x6f\xc6\xf8\x2a\x47\xbd\xc3\x4f\x68\x70\x3d\x18\x63\x4f\x74\x13\x86\x5d\x6c\x36\xd0\x13\xe8\x32\xb3\x59\x9a\xa6\xc9\x80\xf0\xac\x8c\x8e\x8e\xd1\x8a\x74\x0c\x16\xcb\xc4\x36\x17\x96\x8e\x30\xd2\xd0\x4b\xdd\x78\xa8\x5e\xa5\x7f\x96\x61\xa5\xa4\xe0\x12\xcd\x24\x63\xb0\xa7\x02\x80\x6e\xc6\x26\xe9\x62\x6b\xa6\x1d\x94\x23\xdd\xaf\x6e\x09\x75\xe3\x15\x42\xf8\x5e\xe8\x15\x6c\x82\xbd\x97\x75\x48\x54\x67\x00\x9c\x85\xac\x13\x6e\x27\xb9\x67\x4d\xcb\xe9\xcc\xb5\x59\x6e\x32\xaf\xc9\x7f\x48\xd6\x35\xed\x11\x39\x5a\x52\x4e\x2d\x53\x2e\xbc\xbc\x2c\x95\x13\xeb\x55\x3a\xe9\xc3\x87\x20\x0c\x85\x6d\x97\x94\xf7\x53\x7c\xd7\xa7\x47\x3a\x38\x48\x2c\x96\x1b\x6f\xc1\x7b\xc9\x56\x9c\x0a\x74\x62\x24\x64\x4b\xdf\xa2\xdd\x52\x8b\xc5\x68\x58\xbc\x52\xb5\x52\x05\x0b\x02\xad\x82\x86\x7c\x25\x24\xe4\x59\x74\x4b\xb4\x06\x81\x09\x95\x73\x4d\x9a\x83\xa5\x92\xd1\xb6\x4a\xb5\x02\x21\xb7\x01\x1b\x91\xd5\xd5\x6c\x17\x44\xf6\x7d\xcc\x51\xfa\x28\xfb\xfe\xd3\x65\xdf\xc2\xa4\x8d\x27\xec\xbd\x2f\x1f\xdd\xbd\x7e\x10\xab\xd2\x6e\xd4\x0f\x84\xeb\x2d\xbe\xa2\xea\xea\x32\xd7\xdd\xe3\x49\x90\xe6\xdb\xac\x60\xe1\x76\xeb\xbc\x1a\x03\xb5\x12\x34\xcb\xfb\xa2\xe9\xbc\xa5\xd7\xe2\x12\xec\x38\x4f\xa3\xf8\xfc\x06\x5c\x5b\x6c\xef\x89\xb4\xdc\x0f\x62\xf9\xd3\xcf\xc1\x78\x86\x6f\xd0\x05\xf9\x87\x5d\x87\x10\xc8\x43\x9c\xe2\x39\x37\xa4\x9e\x6a\x5e\x00\x41\x6a\x18\x4e\xaa\x58\x9c\x8f\x3c\xc0\x88\x48\xeb\x1e\x6d\xc9\xdc\xc2\x40\xed\x46\x47\x19\xb2\x4d\xf7\x83\xb8\x96\x27\x4b\x4c\x55\x04\x3a\x1c\xf2\x99\xab\x7c\x6a\x16\x2b\x22\x52\x0f\xd2\x44\xd4\x9e\x47\x54\x7d\x43\x21\x32\x3f\xdd\xe7\xa6\xfe\x98\x41\xdc\x8a\x52\x22\x8b\xd9\x1c\x62\x78\x8f\x4e\x12\xe6\xd9\x2b\x77\x07\xaa\x33\xe8\xb5\x25\xb3\x6b\xbc\x3d\x21\xc7\x40\x37\x6c\x92\x2e\xb8\x48\x08\x4f\x69\x9c\x8f\xe4\x94\xe0\xb5\x25\x68\x84\x61\x1b\x67\x79\x94\xcf\xa8\xc0\x65\x9a\x7f\x85\x78\x9a\x64\x51\x2e\x63\xc9\xe0\x0a\xf4\x00\xcc\x60\x1c\xe1\x38\xd7\x2d\x31\x2a\x37\x6c\x98\x58\xf0\x54\xe3\xe6\x08\x2e\x8a\x91\x39\x7e\x5c\x05\x5f\x7a\x95\x2c\x48\x6f\x38\x8b\x43\xb0\x89\x1c\xe0\x34\x0f\x22\x31\xfd\x8e\xe5\x23\x26\x76\xb1\x75\xf4\xe0\x4b\x48\xe0\x75\x8b\xb5\xc4\x46\x9e\xcc\xa6\x96\xf1\x4b\x92\x6d\x85\xf7\x7a\x9e\x14\x12\x2d\x01\xdd\xa5\x0d\x48\xb4\x39\x9e\xe1\x2e\xfd\x87\x8b\xb9\x5a\xb2\x77\xe7\xac\xb0\xc9\x2f\x26\x05\xe2\xda\x47\x03\xc4\x39\x21\xe2\x1c\x12\xd5\x26\xb3\x2c\x87\xad\x0e\x4f\x70\x9c\x0b\xba\xe9\x5f\xe7\x38\x6b\x36\x96\x98\x30\xfe\xc3\x92\x36\x91\xac\xdc\xbd\x4f\x1f\x99\x04\x6d\x02\x79\x7d\x4a\xaa\x68\x16\x47\xff\x3d\xc3\x28\x0a\x71\x9c\x47\xc3\x48\x65\xc5\x95\x26\x9b\x0f\x4f\x85\x29\x86\x26\xed\x6c\x33\x80\x6d\x47\xda\x84\x5e\xeb\x54\xc0\x07\xb9\x16\xf4\xa3\xa5\x95\x20\x27\x9c\x75\x85\x0f\x30\x07\xfd\xe7\x5d\xa9\xc0\x10\x56\xf9\x30\x5a\xa3\x20\x98\x1b\xe2\xb3\x6e\x93\xc8\xae\x3c\x73\xff\xcd\x99\xd7\xae\x94\x2b\x99\xa9\x77\xdb\x95\x12\xb6\xbd\x96\xb5\xf0\x09\x11\x30\x86\xc1\x20\x4f\xd2\x6b\x8f\x6a\x94\xc9\xc0\x3e\x21\x7c\x9a\xc8\xfa\xc9\x10\x89\xde\x6c\x6c\xa0\x67\x34\x22\xd3\x33\x28\xf3\x64\x75\x15\xf5\x92\xc9\x24\x89\xff\xeb\xf8\xe9\x93\x27\x46\xe7\x8b\x5f\xac\x01\x8e\x53\xed\x19\x19\x86\x14\x3f\x5b\xf2\x90\xf4\x0a\xc7\x83\x97\xfd\x20\xc3\x9d\x96\xf6\x61\x12\xb6\xf5\xa2\x17\xd3\x2f\xe1\x50\x7b\x39\x88\xa6\x23\x9c\xbe\xa4\x90\x97\x5e\x3f\x7d\x72\xf3\xf4\x09\x1e\x67\x18\x49\x9d\xa1\x1a\x73\xda\x17\x3e\x0c\xcf\xd0\x8f\x3f\xb2\x0f\x2b\xc1\x24\x14\x7d\xdb\xdc\xdf\x7a\xfa\xe4\x09\xfd\x50\x3b\xe5\x38\x7b\x48\x45\x15\x9e\x09\x86\xf4\x03\x45\x0c\x7e\xcb\xf8\x9c\x89\x51\x96\x11\x63\x0d\xd1\x70\x18\xa8\xd6\x4f\x93\xcb\x0c\xa7\x4b\x4f\x9f\x3c\x11\x23\x96\x24\xf9\x4a\x2f\xbd\x9e\xe6\xc9\x7f\x1d\xd3\xaa\x37\x70\x7c\x92\xf7\x1f\xf1\x1d\xfd\xf9\xf4\xe9\x93\x9a\x7a\x1e\x7b\x82\xa8\x4a\xe4\x78\x94\xa4\xf9\x60\x96\x67\xf4\x0d\x59\x36\x3d\xb4\x81\x78\xdd\xd7\xd2\xeb\xcf\xe3\xa8\x4f\x3e\xad\x8c\xa3\xbe\xf4\x1e\xb4\x61\x3d\xe8\x14\xf9\x4a\x4a\xad\x48\xef\x14\x08\xc1\xf8\x3c\x01\x10\xe4\xc7\xeb\xa7\x02\x8b\x0f\x49\xf2\x65\x36\x45\x79\xd0\x1f\x63\x09\x93\xe3\xb7\x07\xbf\xb2\x43\x9f\x78\xb7\xf7\xf1\xe7\xcf\xb6\xf7\xc7\x9f\xde\x7e\xde\xdf\xfb\xf5\x73\xdd\xf5\xc1\x77\x7d\x68\xb8\x3e\x34\xad\x6d\xbb\xda\x91\x3f\x1a\x6d\xc9\x1f\x8d\xf6\xe4\x8f\xbc\x4d\x31\x34\xbd\x64\x32\x25\x27\xc5\xb1\x39\x44\xb6\x29\xd5\x6a\x85\xc9\xac\x4f\xc4\x7e\x52\xab\x28\x00\x2c\x56\xc6\x02\xc9\xa6\x0a\x11\x84\x13\x44\x11\x7a\x83\x1a\xed\xce\x6b\x14\x2d\x2f\x2b\xe0\x85\x90\x88\xde\x20\xbf\xb1\x6e\x7c\x23\x7f\xe1\x69\x74\x86\x36\x08\x8c\x37\xc8\x7f\xad\x7e\xa7\x77\xa9\x25\xb5\x6a\xb4\xda\x12\xfa\x0d\xd5\xaf\x7c\xbf\xaf\xd7\x2f\x1e\x6f\x9e\x2a\xbd\xfe\x25\x18\x7f\x41\xef\x76\x6a\x8d\xdf\xd6\x97\xd4\xde\x5e\xd1\x10\x89\xea\xbb\x48\x7b\xb9\xd0\x08\x48\x83\x9c\xf5\x93\x2b\xf5\x23\x58\x1a\x90\x36\xaf\x22\xf4\x1b\xaa\x5d\x15\x1d\x62\xbf\x1b\xd2\xef\xa6\xf4\xbb\xb5\xa4\x75\x16\xa0\xd4\xb2\x2b\xf4\xd3\x4f\x3f\xa1\x75\x28\x99\x5d\xa1\x1f\x51\xfd\x6a\x38\xa4\x03\xd4\x69\x6a\x55\xc8\xea\x38\xbd\x22\x03\x99\x5d\x69\x9f\xf8\xe2\x39\xcd\xe0\xfb\xd5\xeb\xa7\xce\x4e\x4d\x66\xe3\x3c\x9a\x8e\xa3\x01\xa8\x09\xcc\xee\x5d\x11\x32\x0e\x4f\xaf\xce\x5e\x5b\xbe\xb5\xe8\xb7\x86\xf5\xe3\x3a\xfd\xd8\x3a\x2b\x69\x3d\x9b\xf5\x11\x08\x38\x1e\x9a\x44\x57\x68\x90\x8c\x67\x93\x38\x53\xa8\x5f\x86\x49\x24\x85\x5a\x08\xbd\x7a\x41\x68\xa6\xee\xf3\x91\x62\x8f\x75\xbf\x5e\xd7\x87\x56\xac\x64\x3a\x58\xb5\x1c\x26\xa6\xb5\x84\xbe\x92\xdf\x74\xbc\x1d\x55\x7c\xb9\x8a\xdf\x91\xaa\xf8\x1d\x57\x9d\x86\x5c\x67\x7d\x09\x15\x75\x1a\xc6\xac\x0b\x6e\x40\xeb\xe4\x25\x23\x15\xc5\x17\xf2\x68\x91\xc7\xca\x23\x76\xb5\x2e\x8d\x0f\x23\xcf\x16\x7b\x55\xe7\x2f\x1a\xca\x90\x96\x8e\xa8\xc2\x1f\x19\x8d\x55\x19\x56\x85\x75\x2a\xf5\xe6\x8c\xad\xc2\x56\x95\x8a\x73\x06\x58\x61\xb9\xac\x62\xd9\x28\xc3\x6d\x01\x28\x82\x71\x6a\x72\xc2\x1f\xae\xac\x4c\x90\x31\x80\x8d\x05\x38\x20\x54\x69\xa0\xdf\x50\x78\x4a\xfe\x77\xb5\x8e\x7e\x43\x57\x8d\xb3\x33\x7d\x21\x41\xd9\x08\xfd\xb6\x01\x05\xaf\x22\xa3\x80\xc2\x24\xe1\xe7\x0d\x1c\x6a\xc5\xbe\x72\x98\xe2\x01\xed\x5c\x88\x8e\x06\x49\xcc\x36\x98\x62\x57\x3a\xea\x1d\x7c\x24\x7b\x44\xfd\xaa\x5e\xf7\x50\xfd\xaa\xee\xc3\x7f\x1b\xf0\xdf\x16\xfc\x77\xdd\x03\x5a\x20\xff\x6d\xc0\x7f\x5b\xf0\xdf\x75\xf8\xaf\xdf\x27\xff\x6d\x76\x8a\xcd\xec\xc5\x0b\x86\xd4\x0b\xb4\xb9\x7d\x4c\x03\xb2\x23\x2a\x0e\x21\x22\x10\xa4\x51\x3e\x9a\xac\xf0\x32\xab\x05\x2a\xa4\xf4\x06\x13\x1f\x56\xe8\x83\x24\x61\xac\xe0\xab\x9c\x86\x0f\x10\x5d\xfe\x1c\x26\x47\x38\xc3\x79\x17\x39\xb6\x48\x36\x08\xc7\x5f\xa2\x29\x33\xfd\x4d\x86\x28\x3e\x4a\xe0\x38\x36\x0a\x32\xd4\xc7\x38\x06\xf7\x00\x76\xc1\x15\xc4\x21\xd8\xf0\x85\x51\x88\xe2\x24\x67\x76\x98\x26\x29\xd0\x6c\x2e\x1c\x12\xb7\x17\xfd\xfc\x05\x5f\x1f\xa6\x51\x92\x1e\x51\x13\xe0\x8d\x8d\xe2\xbd\x95\x74\xb8\x5d\x98\x36\xa7\x66\x07\x54\xf1\x8d\xff\x71\x8b\xc3\x0d\x7b\xf3\xc5\x5b\x0b\x7f\xfe\x82\xaf\x7f\x49\x52\xb0\x62\xfc\x82\xaf\x57\x2e\xc9\x6f\x7b\xb1\xe3\xe8\x0f\xcc\x4a\x65\xd1\xf9\x5b\xc2\x80\xd0\x2a\x6a\x95\x2d\x23\xe1\x08\x90\xc2\x00\x99\x60\xf9\xc8\x71\x1c\x8b\x67\xde\xe0\x32\xea\x54\x6a\x81\xf4\x3f\x1b\x8c\x30\x39\x7e\x20\x22\x42\x5b\xfa\x90\x1d\x25\x97\x04\x76\x8d\x37\xb3\x4c\x76\xe9\x17\xa5\x7d\x90\xe1\xda\x87\x85\x37\x2a\x8d\xb3\xf4\xee\x54\x5f\xaa\x85\x8d\x28\x41\x87\x8a\x1e\xf4\xe7\x1b\x86\x21\x7b\xb6\x48\x21\x88\x91\x9d\x28\x4f\x07\xc9\x5a\x8e\xfc\x49\xa8\x9c\x42\x9d\x33\x3a\xb2\x30\xe3\xec\x8d\x85\xd5\xb8\x19\x16\x92\xf6\x13\x03\x38\x84\xd3\xd1\x87\x52\x46\xfb\x07\x86\xf8\xbf\x04\xe2\x4e\xcc\xd9\x2c\x1c\x25\x39\x22\x24\xe9\x2e\x94\xcb\x7b\x80\xba\x05\x94\x42\x3e\x9e\xf5\xab\x40\x06\xf1\x89\xc3\x3c\x93\xf6\x36\xf8\x50\xec\x54\x4c\x46\x3b\x93\x76\x31\xb9\xc4\xba\x52\x00\x30\x65\x90\xd9\xeb\x39\xd8\xee\x47\x57\xc0\xb6\xcb\xb0\xfd\x6d\x03\x98\xf8\x29\x1b\xe4\xd5\x82\x3a\xbe\xa2\x3a\x43\xdd\x32\xd9\xa8\x98\x70\x20\x2d\xb6\xee\x7e\x42\x1d\xc2\xcf\xb4\x09\x43\x1b\x1b\xa8\x35\x6f\xd2\xbe\xbb\xa1\xb5\xf7\xd9\x31\xe2\xae\x35\x63\xd0\x3a\x1b\x92\x33\xf4\x1b\x91\x25\xcc\x45\x34\x97\x9b\xcb\x32\x5d\x39\x9b\x89\xe2\x8b\xf7\x16\x4e\x63\xbc\x76\x33\x1b\x52\xb4\xe0\x37\xe2\xa9\x60\x39\xfc\x95\x83\xeb\xc8\x0c\x8b\xf1\xd1\x97\xa2\x8e\x8d\x78\xe1\xc8\xc8\x9b\xf9\x57\x09\xd1\x38\xd9\xc9\xfd\x72\xa6\x96\x15\xdc\x3c\xc4\xdf\xa0\x16\x78\xb2\xd0\x87\x32\xda\x57\xe7\xe2\x94\x43\x60\x92\xe6\x82\x1d\x29\x01\xa6\x0a\xdd\xea\x1a\x22\xa4\xa8\x0a\xd7\x8e\xa5\x74\x86\x7e\x73\x2f\x4e\xc7\x9f\x2a\x7c\xdb\x57\xa0\x8e\x40\xf3\x54\x5d\x8a\xf6\x39\x70\x4a\xb2\x9e\x34\x3d\x38\x1e\xa4\xd7\x53\x6a\x1a\x2b\xcb\x79\xfb\x1e\x4a\x86\xc3\x0c\xe7\xc6\xcc\xd0\x35\x12\x26\x3d\x51\xaf\x28\xec\x99\x7b\xb5\x57\x9c\x10\x8b\x9f\x7e\xf1\xb3\x51\xfc\x6c\x7a\xc0\x62\xe4\x53\x86\x82\x6b\x88\x17\xc5\x95\x70\xcd\xcb\x60\x8a\x1a\x71\x08\xb2\x67\x2b\x1f\x39\x84\x18\x42\xdf\xfb\xa7\x14\x0c\x91\x5f\xf4\x21\x55\xbe\xa9\x65\x9b\x25\x65\x9b\xd6\x23\x51\x95\x21\x54\x69\xd5\x53\x09\x54\x7d\xf4\xd5\xc7\x86\xfa\xd8\xf4\x84\xc2\xc2\xd8\xbc\x57\x57\xd1\x1e\x39\xf9\x7e\x17\x63\x64\x9f\x74\x65\x98\xac\xb3\xee\xa1\xbb\x91\x9b\x8d\x68\xd8\x81\xa0\xb2\x64\x6d\x19\xd8\x77\x98\x05\x0b\x85\x1b\x49\x2a\xaa\x13\x4c\x2d\x3a\xae\xba\x34\x58\x67\xf0\xfa\x37\x85\xd9\xd6\x6d\x1a\xa0\xcc\xd7\xa7\x43\xab\x65\xcc\x0f\xd4\x6a\xa8\xb5\x1a\x7a\x2d\xab\xb6\x29\x6b\xea\xd3\xa9\xd5\x6a\xda\xd4\x50\xef\xb5\xb3\x83\xfd\xe8\x2f\x6f\x81\xb6\x13\xc3\x91\xe5\x8c\x23\xf6\x5f\x3a\xaa\x1b\xc8\x7f\xcd\x7e\xbe\xe1\x33\xc4\x5e\x38\xf6\x5d\x98\xe3\x68\x98\x03\xa5\x7b\x0e\x45\x59\xe9\xc4\x71\xd4\x73\x32\x79\x92\xba\xa6\x2e\x24\xaf\xdf\x24\x45\x57\x2d\xf3\x0d\xb9\xeb\x37\x49\xa9\x55\xcb\x1a\xba\xd4\xf5\x9b\xa4\xbf\xca\x9a\xd2\x6b\x63\x1b\x5e\x5e\xb6\x6d\x00\x80\x9c\xaf\x22\xe7\x3b\x90\x6b\xcc\x41\xae\x59\x8a\x5c\xfd\x96\xc8\x35\x54\xe4\x1a\x0e\xe4\x9a\x73\x90\xab\x97\x22\xe7\xdf\x12\xb9\xa6\x8a\x5c\xd3\x81\x5c\x7d\x0e\x72\x7e\x29\x72\x8d\xb9\xc8\x59\x49\xf7\xd3\x14\x8c\x88\xb2\x3c\xc8\xb1\x59\x00\xd8\x49\x5e\xb7\x74\x0c\x58\x46\xae\xeb\xd1\xe0\x0b\x99\x8b\xbc\x61\xfb\x42\x06\x22\xd7\xb5\xe3\x56\x25\x8a\x75\x3d\xcd\xe1\x7d\xb0\x7c\x6a\xf4\xe4\x21\xad\x1d\xfd\xd4\x62\x59\x3e\xfa\xb1\xc5\x5c\x41\xca\xb9\xa5\x58\x42\x4b\xd5\x28\x41\xac\x1f\x8e\x9d\xef\xc6\xce\x5c\x3f\x06\x76\xc6\x12\x52\xb1\xab\xdf\x06\xbb\x86\x84\x5d\xc3\x8d\x9d\xb9\x80\x0c\xec\x8c\x35\xa4\x62\xe7\xdf\x06\xbb\xa6\x84\x5d\xd3\x8d\x9d\xb9\x82\x0c\xec\x8c\x45\xa4\x62\xd7\x98\x8f\x9d\x49\xad\x98\x47\xb6\xb6\xcb\x25\x74\x1b\xb6\xac\x23\x5d\xc8\x31\x96\x93\xba\xb9\x5a\x56\x95\x21\xfa\x34\x5d\xb2\x0f\x3b\x0a\x77\x51\xa3\xdd\x59\x6d\x36\x98\x06\x7a\xc9\xa6\x0a\xe6\x12\x8b\x10\x90\x32\xe6\x39\xcc\x54\xc3\xcf\x33\x96\xf0\x09\x41\x0e\xef\x61\x30\xc0\x42\x47\x2c\x80\xfc\x27\xbe\x0a\x26\x53\x71\x52\x2e\x3e\xf0\x39\xa5\xb0\x72\x7c\x95\x4b\xb7\xdb\x2b\x9b\xdb\xc7\x2b\xec\x1c\x51\x9b\x70\x93\xf4\x2f\xf8\xda\x43\x83\xe1\xb9\x90\xe6\x0b\x28\xd3\x71\x40\x90\xb8\xca\x91\x0e\x85\x49\xf8\xb5\xa2\x1d\x1b\x20\xa6\xd3\xee\x59\x94\xd8\x9f\x69\xd8\xd4\x5d\x3c\x9e\xe2\xb4\xb6\xb9\x4d\xaf\xf5\xa9\xce\xfe\xe9\x13\x66\xb3\x22\x37\xf9\xfa\xe9\x53\x08\x81\x0b\x06\x24\x8a\x55\x41\xb7\xdd\xf0\xb8\x5d\x42\xb7\x0d\xb6\x23\x92\x65\x42\xb7\xdd\xf2\x0a\x93\x84\x6e\x1b\x7c\x18\x27\x61\xfb\x59\xb7\xe3\xdf\x9c\x79\xed\xc6\x9d\xac\x45\xbe\xa5\x99\xc8\x83\x19\x73\x7c\x43\xb3\x0c\xba\x12\x5e\x20\x66\x40\x41\x9a\x47\x83\x64\x32\x4d\x62\x88\xb9\x4e\xbe\xad\x3e\x7d\x22\xe6\x7d\x1c\xf5\x57\x58\xd1\xaf\x5f\x65\x03\x00\xe1\xf5\x79\xcf\xc6\x1d\x41\x86\x0b\xab\x8e\x20\xc3\xd2\xb7\x5f\x92\x34\x04\xbf\x74\x51\x40\xbc\x91\x21\xcc\x86\x60\xf0\x07\xb4\xbe\xc9\x6f\x79\x0a\x98\xd6\xcf\x0a\x66\x18\x5c\xab\x7a\x64\xa1\x4a\xef\x3f\xe5\xc3\x75\x80\x82\xe3\xc1\x0a\x79\xd0\xb0\xee\xb4\xc4\x57\xfa\x58\x66\x88\x22\xbe\x6c\x5f\x4c\xdf\x6f\xed\x14\x97\x4d\xf4\xd9\x7a\x83\xd5\xcf\xa8\x7d\x1e\x59\x56\xfc\x16\x2b\xc7\x93\xe9\x38\xc8\x6d\x0c\x4a\x44\x99\xfe\x33\x66\x11\x79\xb8\x06\x15\xbc\x0a\x04\xaf\x03\xbd\x5f\xf4\x07\x5e\xe1\x11\x26\xbb\xa8\x85\x6a\x7e\x63\x1d\xf5\xa3\x3c\x5b\x2a\x03\x18\x5d\x58\xe0\xed\xfd\x7c\x5b\x70\x9f\xb7\x3f\xf6\x3e\xff\xba\x73\x70\xb4\xff\x79\xff\x60\x6b\x1b\x6d\x42\x6c\x83\x3c\x88\x73\x94\xe2\x69\x8a\x33\x1c\xe7\x51\x7c\xce\x15\x31\x84\x0c\x27\x49\x58\xf4\xdd\x0a\x73\x6b\xbb\x12\x4c\xc6\x4e\x0d\x98\xd2\xa5\xa0\x66\x72\x24\x1e\xed\x14\x65\xb9\x24\x2c\x66\x93\xa2\xdb\x03\xbf\xef\x59\x0a\x06\x0f\x22\xc9\x87\x5c\x44\x29\x2e\xf5\x4e\xd0\x3d\x99\x03\x74\x32\xc2\x64\xd4\xf3\x04\xcd\x98\x9f\x00\x61\x01\x88\x14\x06\xd0\x0a\xc8\xd5\xe2\x61\x30\x3c\xef\x02\xe9\x72\x5c\x97\xe4\x1d\xd5\xc0\x16\xb6\x8b\x8c\xc2\x66\xe4\x17\xc5\xae\xc9\xb0\xa1\x4f\xed\x31\x25\xdc\x09\xe9\x11\xe4\xbf\xe0\xeb\x15\x6b\x59\xee\x1a\x3a\x18\x9e\xa3\xda\x01\xb4\x12\x8c\x97\xa0\xce\xc0\x36\x78\x15\xc7\x40\x6d\x8b\x07\x12\xa5\x13\x7a\x43\x48\x84\xf7\x8e\x10\xca\xa0\xac\x4f\xe4\x5c\x11\x0d\xdc\xdf\x55\x29\xc1\x2c\x80\x14\x69\x41\xde\xe3\xf9\xd5\xf3\x0a\xdd\xa6\xb7\xe9\x30\x27\x69\x8d\x5d\x9e\xc1\x10\x7a\xe8\x4f\x14\x5d\x74\x51\x74\x51\xf0\xc6\x1b\xc5\xf4\x40\x99\x6f\x15\x52\x57\x89\x0b\xc5\x24\x07\x5d\x03\x20\x67\x0e\xa1\xf5\xd9\x8d\xb3\xba\x56\x2d\xb2\x87\x2e\xa1\x55\xa4\x27\xc7\x42\x7c\xa4\xa7\xfb\xa5\xa7\x2d\x7c\x5f\xf4\x24\x20\xdd\x8d\x9e\x54\x3e\x7d\x0b\x7a\xda\x8b\xa3\x3c\x0a\xc6\xd1\x1f\x38\x43\x01\x8a\xf1\xe5\xf8\x9a\x61\x18\xb2\xe1\x98\x4f\x4b\x7c\xd7\xb8\x1a\x26\xe9\x64\x3f\x09\x31\xda\xa6\xce\x6a\x10\xa7\xb9\xe0\x74\x49\x2a\xd3\x29\x58\x57\x83\x9f\x1f\xa7\x5a\xb1\xc9\xd8\xc9\xf0\xbb\x23\xd9\x7b\x23\xab\x9a\xf9\xc1\xc6\x29\x6e\x49\x70\x51\x1c\x29\x16\x36\x62\x9a\x24\x72\xb1\xa8\xa8\x37\xa7\x53\x42\x0b\x30\x5a\x3c\xdd\x74\x66\xb9\x66\x20\x43\xbc\x21\x7e\xf2\x4d\x91\xd2\xa0\x79\x2a\xce\x89\xe4\x4c\x0d\xeb\x93\x74\x42\xa7\x3d\xb0\xe9\x6e\x28\x7d\x17\x24\xb5\x51\x90\xd7\x6b\x5b\x49\x6a\x47\x03\xb6\x32\xd6\xb3\x78\x44\x09\x9d\x7a\x00\xd8\xfa\x01\xf6\x45\xb5\xca\x0b\x07\x6c\x74\x54\x3e\x0c\xc1\x1c\x32\xd1\x12\x68\xcf\xee\x48\x3e\x6c\x09\x9a\xb8\x29\x33\x9c\x56\x31\xa2\xa2\x46\x45\x61\x90\x07\xa8\x0f\xb2\x97\x5a\xc2\x21\x8f\x01\x68\x9a\xea\x82\xbb\x3b\xeb\x80\x0f\x71\x0a\x73\x39\x48\xe2\x41\x8a\x73\xfc\x92\x0d\xc7\x38\x39\x57\x98\xb2\x74\x2f\x75\xb4\xd8\x58\x43\x40\x0d\xc0\x9c\xfa\xb7\x30\x9e\x82\x43\x89\xa5\xe0\x70\x81\x4d\xef\x6b\xc6\x5c\x61\x08\x50\xa6\xec\x24\xbc\x81\xb7\xc1\x1a\x90\xc0\x57\xd8\xb9\x24\xfe\x24\x60\xd1\xa8\x59\x2c\x1a\x41\x14\x9f\xdf\x03\x37\x29\x3a\xbf\xc1\xc9\x83\xc1\xaf\x3d\x27\x6d\x3e\x57\xc9\xa4\x4a\xbd\x4b\x8e\xb9\x93\xc2\x58\xc9\xae\x16\xe6\x95\x0e\x9d\x83\x7b\xe0\x38\xb4\xcd\x7e\x00\x5f\xe4\xea\x36\x9a\xa2\xed\xa1\xe0\x22\x88\xc6\x41\x7f\x8c\xa9\x19\x62\xe6\xde\x16\x3f\xf3\xce\x54\xa6\xaa\x9d\x28\x66\x1b\x5f\xe9\x3e\xc5\xe0\xaa\xfb\xcc\xc7\x24\x67\xee\xd1\x34\x6a\x1a\x85\x54\xec\x1a\x28\xca\x10\x1e\x0e\xf1\x20\x8f\x2e\xf0\xf8\x1a\x05\x28\xc4\x59\x9e\xce\xe0\xd9\x43\x29\x0e\xc2\x97\x49\x3c\xc0\x95\xf6\x99\xaa\xd4\x0b\x68\x3c\x14\x0d\x53\xe0\x0f\x4d\xc9\x7c\x24\x6b\xd5\x89\x58\x54\x59\x94\xfa\x45\xc5\xf9\xe4\xcf\x8b\x56\xa7\xff\x9d\x62\x2e\x66\x50\x48\x2d\x11\x0d\x4b\x01\xa0\xd2\xd5\xa2\x14\xb5\x5c\x94\x2c\xc0\x90\x21\x20\x12\x41\x95\x2d\x38\x1c\xb2\x80\x99\x9c\x53\xef\x48\x13\x62\x5d\x7c\x66\xed\xb9\xca\x66\xbf\xb1\xbe\xda\x6c\xc8\x9f\xa8\x4a\xc4\xf6\x45\x93\x83\xba\xc8\x57\xbe\xaa\xf2\x6f\x17\x35\xaa\x9c\x9d\x32\xab\x2a\x3b\x98\xaf\xc8\x46\xce\xb5\xc9\x4f\x2d\x6c\xa4\x4f\x46\x58\x12\x0a\x58\xa6\xad\x00\x8d\x40\x6b\x4c\x84\xcc\x0a\x4b\x91\x8b\xb0\x9b\x31\xc7\x07\x22\x0c\xf0\x65\x4d\x84\x26\xb6\xae\x2d\x1d\xfa\x06\x87\x25\x66\xed\x6d\xaa\x3c\x35\x1d\xb9\x21\xdb\x3a\x57\x99\x52\xaf\xeb\xf4\x9b\x22\x7f\xe2\x53\x86\xc7\x78\x90\xd3\x86\x8f\xf3\x34\xc8\xf1\xf9\x75\xcd\x65\xae\x2d\x69\x9f\x41\x5c\xdc\x40\xcf\x29\x2b\x7d\xee\x34\x0f\x63\xb3\x71\x18\x64\x19\x61\x13\x6f\x83\x0c\x87\x8a\xc7\x9c\xfc\x57\x6e\x1c\xc6\x40\x1d\xe3\x14\x0e\x5c\x64\x57\x73\x43\x2a\x5f\xe4\x7a\x72\x3f\x76\x9f\x51\x62\xa3\xee\x42\x8a\x91\x93\xcc\xd8\xcc\x1b\x96\x32\xbb\xd1\x22\x0a\x98\x7d\x1e\xc4\xc5\x0d\x45\xd1\x43\xee\x0b\x1c\x7d\x0c\x3c\x87\xa5\x27\x23\xfb\xae\xd1\x7f\xed\x3e\xe7\x4e\x68\xab\x37\x45\x1e\x2a\xbd\x31\xd2\x31\xb7\x4c\xa8\xce\xb6\x65\x2e\x59\x5b\x62\x1a\x5e\xfb\xd5\x9b\xaa\xc3\xce\xf2\x14\x07\x93\x5b\xa9\xb2\x41\x86\x62\xca\x67\xd9\x06\xbf\xd9\x78\xd9\x8f\xa8\xc1\xb6\x7a\xa2\xa1\xd2\x09\xc4\xb1\x96\x34\xd3\x3e\xaa\x35\x1b\xaa\x62\x5a\x52\xf8\x1e\x03\x7e\x9a\xda\x57\x7f\x59\xe2\x11\xb2\x63\xd9\x6b\x6d\x3b\x2c\x17\x11\xa7\x41\x0a\xc7\x2d\x9b\x80\x68\x6e\x6f\x70\xbc\x29\xac\xab\xb8\xd0\xf8\xc3\x0f\xcf\x87\xe3\x59\x36\x7a\x5e\x6d\x9b\xa3\x50\x5c\x1b\x9d\x18\xe6\x2e\xf2\xcb\xe6\x15\xce\xb5\x90\xd6\x74\x2a\xdf\x96\xca\xca\xf3\xcf\x13\x7a\xf6\xed\xad\xb0\x1f\x7f\xde\xcc\xa7\x10\xc5\x63\x07\xea\x19\x54\x22\xb5\x21\xdd\x6e\xb2\x83\xb6\xe1\x1c\xcc\xde\xcb\x4a\xef\x32\x05\xbd\xac\xa2\x9c\xf0\xec\x5c\x85\x7c\xbd\xf0\x6e\xba\xa9\xf6\xc8\xaa\x10\xd4\x53\xcb\x14\x0a\x7e\xa0\xea\x6f\xb0\x1f\xf2\x99\xe2\xdb\x1d\xe8\x61\x7b\x6f\x7b\x86\x2a\x9a\x73\x94\xe8\x82\x7a\xed\xdc\x46\xf3\x5c\xc0\x28\xd5\x15\x8a\xba\x5c\xd1\x24\xd5\xbb\x95\xc6\x59\x4c\x67\x71\x40\xfa\x9f\x39\x9d\x85\x26\x78\xc1\xe9\xb4\x2a\x7e\x2b\x4e\xa7\xa8\x7b\x87\xe9\x2c\x53\xf8\x56\xbb\x3a\xf8\xa6\xd3\x79\xe7\xe9\x2a\x59\x02\x73\xe6\x4b\xd7\x9b\x96\x4c\x12\xdd\x4c\x84\x9e\x77\x60\x13\xeb\x98\xd5\xf5\x05\xda\x40\xd1\x85\x3c\x5b\x65\x5b\x04\xdb\x31\x69\x60\xe9\xde\x28\x88\x62\xc8\x79\xe2\xba\x6b\x7d\x0b\x76\x03\x9f\x79\xe7\xd1\x86\x3b\xf8\x80\xae\x62\x53\x76\x10\x52\xd7\x20\x06\x69\x68\x8a\xc6\xb4\x5d\x42\xdc\x89\xbe\x2e\xe3\x28\x6f\x7b\x7c\x3b\xd0\x4e\x42\x52\x13\xca\xdc\x91\x5e\xbd\xed\x59\xf6\x1e\x13\x3c\x6d\xe2\x50\xc4\xff\xcc\xb9\x1a\x83\x52\x69\x90\x33\xa3\xee\x15\xbd\x8e\x01\x43\xa3\x59\x2a\x1d\x09\xad\x08\x13\x96\x12\x2e\x23\x21\x95\x13\x22\xeb\x0d\x09\xb3\xcb\x22\x40\xd8\xcf\xcb\x11\x66\xa1\xf7\x29\x7e\x10\xc9\x33\xab\x80\x9c\xb9\x30\xec\x05\xc9\x1f\x4c\x25\x13\x75\xa8\x37\x00\xe4\xc7\x83\x2e\x08\xd7\x06\x5d\x96\x95\x27\x03\x15\x2a\x40\xc3\x4c\x5e\x85\xe2\xb4\x85\xb6\x3a\xc0\x22\xfd\x86\x44\x5e\x48\x0e\xc3\xd9\x42\x88\x15\x9a\x1c\xf1\xca\x61\xce\xfa\xeb\xc1\x11\x9c\x97\x19\xd1\x99\x65\xae\x92\x14\xfa\x55\x28\xba\x3d\xa4\xf4\xcb\x2b\x9a\xb5\x09\xfd\x0c\x0f\xd9\xd7\xa5\xa6\x8f\xae\x15\xb3\x23\x3c\xc1\x20\x85\xc3\xee\x4a\x49\x80\x5d\x45\xc1\x69\x1f\x1c\xda\xe1\xb5\x59\x9d\x4b\xb0\xf8\x82\xc7\x9d\xa7\xcc\x94\x26\x94\xe7\x78\x0b\x53\x40\x67\x07\x64\xcf\x9d\xb9\xeb\x36\xc4\x15\xd6\xad\xd8\xa7\x1e\xd7\xed\xe3\xba\x45\xb7\x5f\xb7\x77\x59\x1d\x60\x21\x3c\x8a\xb2\x85\xd7\x86\x15\x13\x46\xd1\xc0\x45\x7e\x3d\x38\x72\x72\x00\xd9\x83\xcc\xe0\x00\x77\x65\x3b\x56\xcc\x4e\x8a\xa1\xe9\xe3\x41\x32\x61\x4b\x87\xb0\x85\x28\x99\x65\xd5\x99\x87\x18\xac\xaa\xec\x41\x90\x12\xef\x46\xcd\x89\xfb\x42\x1e\x50\x20\x22\x71\x69\xc9\xe6\xe1\x3f\x4a\x92\x0c\xa3\x49\x74\x45\x64\x21\x4b\xff\xc0\x13\xd4\x14\xd2\x90\x4c\x88\x4c\x0a\x73\x91\x5d\x72\x01\xd2\x29\x39\xe9\x64\xb3\x7e\x86\xff\x7b\x86\xe3\xdc\xaa\x62\x40\xaa\x68\x27\xa5\xf5\x50\x47\xd1\xa9\x1a\x94\x51\xd2\x66\x65\xbe\xaa\x9f\xec\x6c\x36\xac\x6c\x31\x92\x8a\xd5\x66\x8d\x94\x44\xfe\x60\x02\x0b\xeb\xf1\xe8\x0c\xfd\xb6\x41\xeb\x9d\x46\xa5\xa1\x4b\x8a\xdf\xdc\x04\xfa\x6d\x8f\x95\x57\x02\x9a\x48\xa2\xed\x61\x10\x86\x64\x02\xe7\x28\x40\xa6\x90\xe6\xaa\xb7\x42\xff\xb5\xab\x3f\x0e\xdf\xf7\x8e\xd1\xff\x6a\xaf\xae\xa1\x29\x03\x9a\x31\x5d\x9e\x0d\xe6\xe1\x97\x41\xb6\x06\x72\xf2\x34\x08\x57\xf8\x53\x89\x6c\x7c\x18\xf0\xeb\xe7\x59\xc6\x63\xe7\x8b\x40\x28\xcc\x5c\x19\x02\x27\x0b\x3c\x16\xb2\xbf\x02\xc8\xf2\xed\x33\x41\xcb\x5a\xc9\xae\xc7\x63\x21\xa0\xa4\xfb\x48\x00\x94\x89\x68\x96\x64\x50\x20\x9e\xe5\x03\x1f\x9b\xc5\xe1\x4b\x8c\x2b\xf9\x55\x5c\xaf\x79\x5a\xdc\x2c\xe5\x82\x39\x08\xf5\xcb\xb5\x5b\x33\x10\x51\x8d\xc6\x3a\xd9\x90\xc6\xcb\x15\x33\x64\x16\xe7\x82\x76\xc0\xaf\xc8\x84\x1a\x33\x82\x35\x80\xd2\x17\x2f\x69\xce\x69\x11\x61\xe5\x5f\x5a\x01\x5b\xb3\xf4\x5e\x88\xb7\x6b\x86\x5e\xa0\xa9\xde\xe0\x2b\xa1\x17\x88\x80\xa2\x60\x51\xf8\xba\x18\xef\x99\x83\x8b\xf1\x1e\xdc\x5a\x94\xb7\x73\x31\x2b\x45\x2a\x2b\x0f\x5f\x50\xb0\x1f\xb5\x4d\x14\xa1\x65\x97\x5b\xbe\x0c\x9d\xc6\xb9\x97\xde\x94\x48\xaf\x1a\x76\x68\xa3\xb0\x7d\xe7\x87\x7f\x19\xb4\xa7\xa2\x64\x33\x43\xd8\x0c\x43\xfb\x20\xc0\x5c\x0f\x92\x78\x10\xe4\x1c\x66\x65\x0d\xcc\xa7\x78\x2a\x18\x0a\x2c\xd9\x51\x10\xd2\x40\x46\x6c\xa1\x7e\x1b\x2e\x33\x8b\x75\x3e\xf3\x4d\x38\x02\x34\x5b\xe1\xca\x1d\xca\xe9\x2c\xc1\xc6\x07\xde\xe1\x5c\xc9\x5c\x2c\x2d\x62\x88\x01\x8b\xc6\x41\x96\xc3\xf3\xfc\x35\x5d\x88\xd7\xa7\x35\x75\x39\xbf\x44\xfe\x12\x75\x31\x3b\x63\xce\x60\x36\x4f\x62\x2a\x38\xb8\x29\xa6\x00\xb7\xa1\xaf\x41\x69\x33\xa5\xdb\xe6\x82\x7a\xfe\xbf\xe2\x22\xc8\xe6\xa2\x60\xbf\x59\xb0\xdd\x2a\x94\xdd\x03\xdd\x9f\xd1\xff\x7e\x12\xe2\x1b\xaa\x1e\x3c\x11\xa7\x35\x7a\x29\x02\x27\x09\xa9\x3b\xbd\xb7\x3d\x17\x14\x36\x57\x37\x82\xbe\x08\x2c\x5d\xd8\x30\x21\x02\xc9\x3b\x08\x1c\xfc\x08\xd8\x00\x48\x86\x93\x1a\x81\x13\x4c\x01\x33\x4f\x3b\xd5\xd1\xb6\x8d\x26\x6e\x14\x6f\x84\x05\x0c\x03\xe9\x44\xab\x1f\x7b\x92\xf5\x61\xb9\x0d\x60\x49\x80\x33\xd5\x3e\xd4\xe2\xc7\x09\x72\x33\x19\x01\x45\x2d\x8a\x54\xc5\x2e\xf9\x3e\x01\xdb\x4f\x07\xfe\xc5\xc4\x9a\x87\x01\xc3\x96\x94\x4b\xda\xaa\x71\x89\xf3\xc4\x40\xa0\xc2\x96\x08\x1a\x0d\x38\x95\x6b\x77\x33\x76\x69\x7f\xf5\x45\x79\xf3\xaa\xf5\xca\x12\x7a\xb1\xba\x30\x06\x42\xd5\xe2\x38\xcb\xbc\xc7\x78\x8a\x82\x1c\x8d\x31\xe1\x82\x49\xcc\x57\x00\x4b\xf3\x41\x2d\x41\x61\xbf\x06\x86\x6b\xf2\x2d\x24\xce\x37\x93\x28\xa6\x46\xa2\xec\x10\x6f\x84\x4b\x54\x1f\x59\x25\x3a\x7d\x12\xfe\x94\x90\x26\x60\x7f\x4c\x8f\xbc\xd1\x05\xfa\xf1\x47\xab\x3e\x5e\x0f\xd4\x71\x78\x2b\x5d\x46\x81\x89\xaa\x4c\x71\x9e\xcf\xf5\x66\xab\x5e\x49\xbb\x45\xd2\x42\x24\x11\x86\xd2\xec\x95\x85\xa0\x79\x73\xf7\x4b\xc8\xab\xab\xe4\x20\x43\xf3\x7d\xb9\x44\x2e\x90\xd7\x99\xe9\x17\x48\xe0\xf0\x7b\xae\x0e\x82\x5f\xc5\x53\x1b\x41\xd7\x29\xf9\x56\x97\xf1\x0f\xb7\xac\x1e\x16\x6f\x6b\x7b\x20\xf9\xcd\x99\x01\x2a\x1f\xd9\xda\x9b\x67\xf9\x77\x47\x4b\x05\x30\xbd\x63\xb2\x87\xdd\x0c\x05\x0d\x92\xf1\x18\x53\xfa\x4f\x86\x5c\x34\x00\x51\x13\x43\x32\xbd\x32\xd1\x43\x12\x45\x25\x27\x6f\xb2\x8d\xa6\xc1\xa5\xf4\xca\xea\x97\x68\x77\xfd\xa0\x0e\xe8\x42\x48\xa9\x52\xbb\xb8\x78\x84\x14\x0f\x8c\x0b\xd2\xfa\x64\x7d\x1a\xe6\xb8\x2e\x40\x59\x30\xa6\xd8\xc3\x0f\x00\x06\x2a\xc9\x80\x86\x1f\xc5\x69\x74\x41\x65\x15\xce\x31\xac\x00\xf9\x55\x6a\x21\xe7\x4b\x96\x83\x66\xac\xd5\x6a\x72\xcd\x6d\x7a\x56\x2e\xdf\x0c\x46\x78\x72\x3b\xb8\x76\x81\x93\xa9\xcc\xc1\x62\x7a\x28\xc1\xb3\x82\xa0\x49\x19\x6f\x8a\xa4\x8d\xf4\x14\x43\x45\x2c\xfe\x56\x17\xc3\x06\x49\x7c\x81\xd3\x5c\x91\x61\x69\xba\x3b\x6e\x4c\x09\x16\x9f\xd4\xfa\xcf\xed\xb6\x7a\x48\xab\xa8\xce\xab\xe2\x65\x45\x7b\x98\xf9\x2e\x56\x2a\x6a\xf3\x8f\x75\xc2\xbb\x49\xc6\x47\xb3\x13\x0d\x62\x91\xc5\x6a\x9a\x64\x59\xd4\x1f\x63\xf7\x8a\xb5\x34\xb5\x98\x73\x53\x31\x50\xa6\x3d\x28\xfd\xc6\x4f\xe0\x7f\x1a\x50\x90\x50\x9f\x93\x15\xdc\x95\x7e\x17\x0e\x4f\xd6\x4a\x5f\xf0\x75\x57\xf5\x8b\xb2\x16\xd3\x3c\xa5\xec\x85\xc8\x32\xee\xc2\x7f\xe7\x14\x14\xab\xb2\x6b\xba\x73\xd9\x6b\x30\x11\x5e\xb7\x4c\xb0\x17\x16\x72\xbd\x7a\x74\x7e\xdf\x3b\x5e\xb3\x57\x90\x58\x78\xcb\x5e\x42\x2c\x1c\x09\x28\x7d\xb7\x72\x30\xc5\xf1\xf1\xf1\x07\xa3\x5a\x75\x67\x32\x79\xfa\xed\x82\xd7\x24\xba\xda\x8b\xd5\x72\x95\x4d\x8f\xe8\x2a\xce\x16\x5b\xc6\xc8\xb9\x6e\x4c\x56\xa2\xf9\x06\x3a\xb8\x09\x39\xd4\xb9\x81\x73\x03\x5b\xee\x95\x01\xbb\x02\xfc\x8e\x86\x91\xbe\xc6\x4b\xe0\x40\x16\xb0\x8c\xa6\x00\x83\xf4\x71\xb8\xf0\xa2\x2c\x30\x8e\x13\xfa\x46\x63\x80\x2c\x69\x3f\x2e\xe3\x1e\x55\x97\x34\x45\x5e\x5c\xd3\xb1\xb5\xbd\x8c\x9e\x3f\xb7\xfb\x56\x58\xcb\xaf\xe4\x09\xcd\x37\xe4\x72\xe5\x98\x53\xcb\x41\xaa\x4e\xc2\xe4\x15\x65\xe2\x14\x63\xe3\xb2\xaa\x2a\x4a\xa0\xaf\x5f\x29\xb9\x16\x75\x56\xf8\x24\x5e\xf3\x63\xaf\xa1\xa3\xb1\xca\x49\x94\xca\xe6\xdd\x6b\xd0\x76\xe0\x6a\x43\xfc\xb4\xdf\x6e\xb0\x9e\xdb\x88\xd3\x06\x9a\x15\x17\xb9\x8c\x61\xf7\x52\x07\xb1\xfc\xba\x43\xac\xba\xc0\xbd\xe4\x62\xde\xcc\xcb\x41\x32\x99\x06\x39\x6c\x2f\x55\x97\xa1\xbc\x2d\x68\x9b\x98\x24\xfe\x54\xdd\x13\x6d\xcb\xef\x36\xc8\xdd\x97\xe1\x60\x42\xdb\x3e\xe6\xe4\xed\x20\x64\x89\xba\x5c\xbc\x51\xa1\x6f\x51\xbc\x32\xf7\x9d\xa3\x96\x91\x23\x2d\x29\x4b\xb0\xf8\x62\x0b\xd4\x48\xc4\x5d\xad\x02\x79\x67\x3b\xc6\x42\x7f\xcd\x43\x2c\x29\xee\x54\xb5\x5c\x4a\xd1\x6a\x0c\xed\xfd\x69\xfd\xaa\xdd\xec\xf8\x9d\xc1\x1a\x24\x36\xe8\xb4\x3b\xad\xf6\xb0\x3d\x3c\x5b\xe2\xaa\x78\x00\xcd\x1f\x8a\x7e\x38\xce\x91\x15\x50\x70\x8e\x85\xe3\xf0\x25\xea\x16\x8c\x8c\x86\xb5\x59\x7c\xcf\x2b\x5b\x63\xb2\xbf\xd2\xa2\xc2\x23\x5f\x27\x05\x9d\xde\x7a\xc9\xa8\x31\x1b\xf8\x82\xbe\xc5\x1a\xbe\xdf\x00\x0e\xa6\x30\xaa\x2d\xbd\x69\x90\x66\xb8\xa6\x2c\xd4\x92\x8b\xc9\x34\x53\x14\x3f\x45\x35\xab\x57\x02\x29\x8e\x68\x0c\xaf\x39\x8b\x8e\x12\x86\x81\x4c\x99\x7a\xb5\x0c\x22\xbf\x8c\x93\x0e\xc3\x2c\x29\x84\x01\xee\x04\x67\x39\xb5\x6d\x08\xc6\x96\x05\xaa\xc1\x3c\xad\x9f\xa1\x8d\x0d\x54\xac\x3d\xf4\xe3\x8f\x7a\xbb\xa7\x3e\x2b\xc3\xd7\xa4\x4b\x05\xb5\x7d\x45\x2f\x30\xcc\x96\x91\xca\x61\x8c\xc5\xaf\xb5\xc8\x4c\x79\x1a\x1e\x6a\x2d\x95\x58\xd7\x25\x17\xec\x88\x0e\x57\x41\x05\x0c\xb3\xbc\x01\x7f\x0a\x0d\xd4\xf5\x5b\x6b\xa3\xb8\x72\xab\xe3\x77\xaa\x31\x0a\xeb\xd1\xc8\x71\x0c\xf2\xa4\xd3\x89\x2a\x9a\x97\xde\x15\xf1\x45\x78\x99\x06\xd3\x29\xc8\x91\x41\xce\x9a\x97\x55\x26\x28\x20\x3b\x7d\x26\x79\xa5\x95\xae\x5e\xc5\xd5\xc7\x70\x65\x2b\x1c\x7e\x6c\x9f\xaa\x3a\x90\xdc\xfa\xb2\x47\x08\x3d\x5c\xc6\x2f\x93\xea\xb9\x8e\x40\xee\x2d\xeb\x2c\x75\x08\x8d\x43\x4a\x35\xe2\x80\x51\x5c\xec\x58\x0e\x4e\x65\x21\xa2\x74\xef\x45\x40\xa8\x6b\x88\x6a\xd2\xc4\x96\x06\x95\x62\xd7\x0e\x64\xde\x98\x37\xdd\x5d\x3c\x54\x0b\xe5\x93\xe5\xa8\x53\xe2\x7d\xce\x9a\xa6\x36\x28\xec\x77\xe1\x77\xfe\x37\x89\xe1\x62\xdf\xc2\x36\xff\xda\x0d\x8c\x2c\x4b\xbb\x46\xc5\x5c\x56\xc2\xbf\xd2\xd4\x46\x28\xae\x96\x8e\x53\xd8\xc3\x35\x58\x04\xa9\xd1\xd5\x09\xdf\xb4\x71\x4f\xac\x36\x87\x34\x50\xa2\xec\xb0\x38\xc7\xba\xbd\x58\x6f\x17\x42\x67\xa1\xe8\x39\xdb\x36\xfb\x75\x29\xba\x41\x52\x38\x9f\xd8\x02\xa0\x59\x7d\x56\x0d\xb1\xa4\xf0\xcc\x10\x01\x12\x58\x67\x6f\x23\x99\xf4\xa0\x7f\x05\x4c\xb8\x02\x36\xa0\x30\x7b\x23\xc2\x71\x85\x63\xae\x6b\x3f\xaa\xbe\x9d\x96\x6d\xda\xca\xfe\x6a\x16\xe4\xaa\x45\xcb\x27\x42\x56\xa2\x6f\x2b\xd1\x85\xa5\x88\xa4\x23\x64\xf4\x62\x96\xa1\x5a\xc1\x02\x10\x5c\x88\x9a\xc5\x84\x3e\xb0\x28\xc9\x5e\x59\x0a\x4b\xba\x40\xdd\xc2\xda\x52\x5a\xd2\x0b\x12\xd2\x1b\x5a\x8e\x6b\x37\x95\x8f\x2d\xec\x1e\x3a\x13\x13\x27\x14\x5f\xf2\xb5\x0c\x7a\xb0\xed\x49\x26\x00\xb1\x43\x69\x17\x4d\xd2\x23\xe4\xf6\xfe\x3b\xee\x53\x5a\x80\x16\x11\xe9\xf8\x1b\xec\x4d\x45\x54\xe5\xf9\x6c\x9a\x7b\xcf\x5b\xd8\x34\x27\x3b\x16\x46\x41\xf2\xa8\xbf\x35\xcb\xbe\x6f\x14\xf5\x7d\xe9\x1e\xb7\x14\x67\xec\x02\x47\x84\x81\x6f\xb0\xab\x30\x8d\x83\xa4\x5a\x90\x17\x93\x06\x58\xde\x29\xd8\xed\x37\x9c\x5f\x65\xe4\x0b\x6e\x62\x6b\x8e\x71\x0a\x73\xc3\x90\x27\x4f\xd9\xc4\x94\xa8\x8b\x74\x58\x8a\xbd\x49\x62\x32\x8a\xc2\xc7\xba\x4d\x88\x26\x16\xd6\xc6\x58\xd9\x9a\x3e\x56\xea\xfd\x0b\xe8\x98\x82\x2c\x9b\x4d\x70\xa8\xde\x27\x06\xe3\x14\x07\xe1\xb5\xb4\xdf\x29\x07\xb2\x59\x4c\xd3\x56\x56\x88\x68\xb6\x18\xdb\xb3\xf3\xaf\x85\x0e\x4d\x84\x71\x81\x89\x7a\x9a\xe1\x85\x79\xbd\x5b\x5f\x34\x8b\x17\x85\xf5\x17\x4a\xdc\x06\xc9\x53\x15\xd2\x01\xa7\x02\x24\x88\xdf\xce\x03\x3e\x1b\x3a\x25\x79\xf5\xb0\xca\xb6\x54\xde\x2c\x76\x8d\xbc\x08\xe7\x84\xb0\xe1\x36\x21\x94\x3d\x99\x4b\x55\xbf\xd8\x40\xa5\xda\x51\x06\xad\x44\x29\x6a\x68\x26\xac\x37\x24\xef\xed\x26\x12\xf3\xae\x4c\xbe\x84\x43\xb8\x2f\xa1\xff\x96\x5f\x96\xcc\xb3\xc2\x30\x2f\x4c\xde\x53\xe8\xa4\x95\x6a\xf7\x24\x5b\x04\x3c\xdc\xe9\x93\xc6\xc8\x5a\xde\xfb\x99\x2b\x0c\xa6\x2c\x5e\x50\x75\x75\x2c\xaf\xc1\x2c\x2f\xd8\x03\xc8\x29\xa4\x19\x00\x5c\xee\x15\x52\x04\x2a\xc7\xd4\xb6\x22\x8a\x99\x25\x2f\xb3\x03\x60\x26\x33\xe7\x38\x06\x63\xde\x72\x68\x22\x4a\xb9\x03\x18\x0d\x9d\x5d\x0e\xcb\xd4\x19\x80\x0a\x4b\x12\x92\x36\x51\xa7\x05\x26\xc7\xf0\x81\xdb\xcf\xee\x0d\x51\x32\x89\x88\x8c\xe0\xa1\x80\x7e\xba\x8c\xc6\x63\xd4\xc7\xa2\xc1\x10\xa5\x41\x1c\x26\x93\xf1\xf5\x3d\x1d\xee\xa9\xd5\x04\x1b\x26\x0f\xed\xfd\xec\xc1\x94\x92\xc6\xbf\x01\x17\xa2\x93\x1c\x9a\x2c\x48\xa2\xc6\x15\x7c\x85\x07\xb3\x1c\xd7\x9e\xf3\x68\x54\xcf\x3d\x96\xb8\xc3\x63\xe6\x5b\x0e\xb1\xe8\x9e\xa0\x7b\xe8\x39\x19\x0e\xf2\xff\xcf\xdd\x67\x66\x0a\x46\xe6\x6e\x9c\x9a\x3d\x4e\xa2\x1e\xa3\x2e\xaa\xd8\xb4\x1b\xf5\xd3\x69\x66\xb3\xec\x50\x54\xff\xe0\xbc\x4a\x32\x94\xc8\x14\x4e\xad\xd3\x5a\x35\xd2\x9a\x5b\xdc\xea\xe8\xd2\x96\xd6\xb5\x29\xad\xd0\x78\xb3\x34\xf1\x40\xa1\xc0\x15\x31\xee\x8a\x34\xc8\x6c\x21\xdd\x2c\xad\xb0\x44\xde\xd2\x78\x00\xfe\xd6\x80\xb5\x84\x36\xf3\x72\x0c\xc0\x6e\xda\x50\x93\x8b\x64\xd0\x4c\x41\xce\x93\xc9\xf2\x31\x47\x2f\x4c\x7d\xb6\x92\x1a\xba\x48\xe1\x6c\x77\x96\x3a\x62\xa2\xd4\x82\x87\xf1\xe2\x48\x2d\xa4\xe8\xdb\x69\xb5\x6d\x9a\x01\x45\xc5\x1d\x32\xbe\xcc\x59\x9e\xc6\x92\x3d\x01\xcb\x21\x7e\xdd\x5e\x1f\x6e\x89\x12\x27\x14\xe2\xf6\x6f\x36\x0d\xd7\x03\xea\xc7\xdf\x6f\xed\xdc\x20\xb2\x7d\x72\x0b\x4a\xdb\x2e\x5c\x48\x79\x9c\xd9\x96\x6f\x71\x0b\x69\xc5\x2d\x1d\x76\x3b\x3f\x7c\x09\x87\x5d\x69\x7b\x96\x28\x64\x41\xf5\x38\x73\xa9\x5a\x64\x5f\xfe\x3e\xf4\xe5\xa5\xd2\xc1\x77\xa0\x8e\xf8\x9b\xa8\xcd\x2d\x8b\xaf\x92\x26\xf9\x39\x1f\x6a\x57\x58\xd9\x87\x6f\xd8\x43\x7f\x3e\xb0\x06\xbb\xd8\x8e\xbe\x91\xc2\x41\xdb\x5d\x93\xdc\xa5\xdc\xb5\xc9\x2e\x04\x3c\x11\x5b\xb8\xb8\x22\x61\x4f\x87\x57\xc8\x18\xec\x99\x6e\x7b\x2e\xef\x4e\x2a\xc6\xd2\xbe\x19\x5d\x5a\x81\x2d\x56\xc1\x60\xc5\x1a\x92\xc0\xa9\x98\x57\xf4\x25\xee\xeb\x0c\x39\x00\x84\x31\x3f\x6a\xfb\x92\x1e\xdf\x40\x63\x3f\xba\xa2\xc9\x40\xa0\x82\x75\x48\xa5\xb3\x35\x35\xcc\x54\xa0\xbb\xf4\x26\xd6\x13\xdf\x1d\xf4\xc1\x7f\x01\x3f\xbe\x67\x05\xf1\xf7\xce\x98\xbf\x47\x3d\xb1\x8d\x19\x2e\xaa\x28\xbe\x13\x63\xbc\x77\x14\x4d\x45\xf1\x7d\x31\xee\x8a\x7a\xe2\x6f\xce\xbb\xbf\xb9\xb2\xf8\xdb\x6f\x15\x9e\x62\xdb\xe3\x38\xa1\xdd\xdf\xde\x51\x49\x1f\xee\xbe\xbf\xb0\x6d\x1d\xf2\xf8\x56\xdc\x3d\xca\x14\xe4\x85\x2a\x4f\x64\xba\x94\x53\x5a\xb2\xfc\x95\x37\x67\x5e\xbb\xf9\xbd\x26\xa5\xbc\xf7\x1c\x94\x8b\xe6\x9e\x54\x72\x4e\x1a\x88\x99\xe9\x27\xb5\xb4\x93\xbc\xa2\x23\xf1\x24\xe8\x47\x0b\xe0\xe2\xa7\x9a\x7c\x72\x3f\xc8\x47\x1e\xb2\xa4\xa0\x2c\x8e\xd7\x1f\x92\x41\x30\x46\xd3\x64\x7c\x3d\x8c\xc6\x28\x19\x22\xba\x69\xb1\x53\xbc\xe5\xc8\xcb\x62\xdb\x6f\xa8\x05\xb5\x86\x15\xc6\x24\x5e\xef\x90\xf7\x37\xaf\xcd\xd8\x41\x92\xad\x65\xff\x77\x83\xa9\x81\x8d\xe0\xac\x4f\x66\x50\x27\xe2\x9d\x95\x69\x9a\xe4\x09\xf9\x84\x36\xc8\xe9\x43\x2f\xc0\xea\xa1\x0d\x14\xe3\x4b\x82\x40\x39\x84\x78\x36\x1e\x3b\x16\x8a\xc0\xa0\x58\x26\x52\xbc\x23\x5b\x24\x4f\x3e\x27\xe5\x4a\x6e\xa7\x62\xfb\x43\xd4\x4f\x83\xf4\x7a\x9e\x8e\x5c\xca\x0f\xea\x04\x05\xd9\x42\x99\xd6\x93\x08\x17\xbc\xcb\xc1\x18\x45\xf1\x08\xa7\x91\x12\xc0\x55\x89\xe8\xa0\xe7\x19\x35\x23\x8c\x9a\xd3\x59\x21\xec\x1f\x8f\x31\x0c\xee\x71\xc2\xcf\x60\x14\xe4\x1c\x21\x16\xca\x83\x8a\x41\xc6\xa9\x12\xa1\xb2\x38\x80\x5c\xee\x4a\x2e\x70\x9a\x46\x21\xce\xd0\x21\x55\x88\x44\x38\xa3\x0c\x7c\x7a\x8d\xa2\x98\x65\x33\x2e\x10\xa8\xd0\x82\x9e\xab\xe1\x64\x51\x00\x86\xcc\xe5\x28\xb7\x48\xd4\x40\x32\x51\xfb\xd7\x27\x94\x84\x15\xe9\xa6\xc4\x24\x51\xf6\x17\x8b\xf0\x38\xec\xa2\xe7\x90\x29\xeb\xb9\x6e\x38\x62\x6f\x93\xfc\x4d\x70\x3e\x4a\xc2\x52\x1f\x79\xa9\xb4\x1e\x23\xdf\xe6\x78\x86\x90\x19\xce\x90\xa2\xaf\x18\x64\xf3\x79\x75\x06\x31\x9c\x06\x97\xb1\xf9\x45\x62\x24\x44\x58\x28\xd2\xea\xb9\xcc\x89\x37\x67\xe7\x13\x1c\x5b\x4c\x87\xc9\x8e\x52\x8e\x05\x2a\x98\x0f\x3b\x77\x15\xe5\xad\xe9\x1f\xac\x08\x30\x33\x29\xee\xfa\x15\x09\xc7\xd2\xd4\x8e\xd3\x0f\xbc\xc9\x51\x90\x1d\x5c\xc6\x8c\xec\xaf\x6b\xcf\x49\xcd\xe7\x4b\xc2\xe7\x89\x3c\xc2\x26\xc8\xcb\x93\x17\x73\xfb\x41\x6b\x95\x4e\xb7\xa5\xd6\xff\x93\xcd\xa6\x44\xd4\x8a\xa3\x7c\x25\x20\xc2\x29\xdb\xfa\x82\xf4\x7c\x46\x46\xd7\x3a\x1e\xc8\x92\x41\xa1\x64\x9c\x0a\x8f\xdb\xf4\x79\x86\x0a\x8e\x1e\x51\xa5\x30\x9f\x74\xba\x4a\x4d\x08\x72\x07\x95\xfd\xc0\xb1\xed\x20\xae\x18\x1f\xe2\x14\xc7\x03\xd2\x00\x8c\xf3\x54\x5f\xaf\xc6\x30\x30\xb9\xd8\x06\xd0\xb9\xcf\x20\x5b\x6a\x0c\x1b\x53\xdd\x86\x95\x92\xc9\x4c\x93\xaa\xbc\x67\x31\x1d\x07\x98\x40\xba\x6a\xcd\x10\xa8\x9b\x7c\x3e\x8a\x0c\x36\xb5\x25\x71\x0d\x47\x44\x69\x08\x29\x07\x40\x6a\x4b\x7f\x67\x5e\xc9\x23\x96\xa3\x0d\xc6\x36\xf9\x9d\xc5\x5c\x5e\x44\xcb\x95\x73\x3c\xb3\x11\x58\x72\x45\x9c\x6c\x73\xe5\xf2\x08\xea\xd2\x1a\xe1\xef\xd4\x75\xe2\xa4\x1a\x5e\xfc\x36\x64\x53\xe6\xae\xee\x98\x2b\x74\xc0\x98\x19\x4b\x12\x00\x24\x05\x26\xf4\x61\x88\xb2\x64\x82\x69\xea\x29\x74\x39\xc2\x31\xba\x4e\x66\xa9\x30\xb3\x0f\x88\x38\x4b\x81\xdf\x73\xec\xdc\xbb\xee\x82\xba\xa3\x73\xd9\x5e\x86\x28\x03\x58\x59\x31\x47\x46\x0c\xfd\x2d\xb7\xbb\xb9\x68\x54\x9a\xd3\x5e\x32\x25\xc2\xce\xb4\x90\x7b\x98\xbc\x73\x07\x71\x4a\x02\x06\x1a\x26\x45\xa6\x9a\x80\x26\xf2\x9e\xa7\x94\xad\x4e\xba\x7f\x56\x95\x5f\x6e\x39\xee\xd0\x88\x72\x89\x2d\xfa\x67\x5d\xe3\x22\xe2\x21\xbf\x6c\xfb\x18\x4c\xc0\x68\x62\x4e\x3d\xc4\xb6\xea\xa2\x98\xbe\x59\xcb\x00\x97\x4a\xb7\x58\x32\x9d\xa7\x72\xf1\x33\xb4\x21\xb5\xaf\x7e\x5a\x20\x75\x91\x63\x93\xdd\x46\x97\x49\xfc\x3c\xa7\xf2\x33\x77\x77\x94\x82\x17\x8e\x93\x64\x8a\x82\x7e\x72\x61\xd9\x06\xcb\xbb\xfc\x9c\x43\x7b\xee\xee\x30\x70\x51\xd1\xaa\xdc\x4f\xf1\xb6\x42\x5e\xad\x4a\x8b\x47\x1c\x4e\xa0\xa7\x60\xff\xb2\xc8\xba\xb1\x6d\x7c\x83\x71\x12\xe3\x07\xe0\x78\x00\x17\x6d\x14\x7b\x08\xbc\xa8\xb0\x93\x91\x62\x73\x37\x32\x39\x17\x89\x2a\x1c\x71\x7e\x6a\xb5\x27\xb3\x9f\x91\xad\xb7\xfb\x31\x0a\xc0\xf3\x56\x8b\x45\x58\x1a\x59\xc8\x88\xf3\x5e\x0e\xc2\x16\x9e\x46\x18\x3f\xa8\xe1\x10\xb3\xe8\x3c\x8e\x86\xd1\x20\x88\x73\x16\x50\x32\xa2\xbd\x07\x90\xb4\x1d\xdb\x31\xf9\x17\xc9\x83\x98\x9e\x95\xe5\x37\xf7\x10\x36\xc6\x6c\x5e\x27\x0b\x47\x18\x7c\xd9\xf4\x6a\xce\x58\x23\xab\x59\x98\x18\x29\xed\x06\x63\xee\xa0\xe1\x7b\x4b\xf5\x22\xfb\x67\x2b\x1b\xbb\x61\x0b\xe3\xd0\xfe\x57\x07\x70\x5a\xbf\xaa\xd7\xeb\x7e\xbd\x51\x6f\x7a\xa8\x7e\x55\x6f\xd5\xdb\xf5\x4e\x7d\xed\xec\xc1\x00\x7b\xa8\x53\x39\xf4\x0a\x0b\x5f\xc7\x67\xc4\x58\xb1\x97\xcc\x21\x18\x96\x2b\x7f\xa0\xff\x7e\xfd\x0a\x31\x7b\x35\x51\x63\x88\x6a\x62\x7a\x7f\xd8\xb0\x28\x0a\xe5\x3f\x80\x2a\x19\x0d\xf1\x9f\x95\x8d\x49\x75\x00\x94\x3c\xc6\x38\x3e\xcf\x47\xd4\xf4\xc8\xc9\x45\xaa\xc7\x8c\x29\x16\xca\x62\x91\x62\xb6\xe3\x41\x12\x12\x7a\xc7\xf4\x87\x4e\xee\xf0\xba\x3c\xf6\xa7\x20\x00\x1c\x0f\x56\x76\xf1\x95\xbb\xcd\x79\x01\x64\x2a\xad\xf6\x85\x83\xbb\x14\xc4\x5a\x21\xb2\x8b\x25\xae\xc1\xbc\xb0\x2e\x96\x2a\xca\x90\x7c\xca\x87\xeb\x0b\x45\x73\x61\x53\xe1\x8c\xe5\xc2\xa7\xea\xeb\x57\xb4\x8b\xaf\x4a\xc3\xb7\xcc\x21\xa0\x41\x90\xe3\x98\xed\xf9\x2a\x05\x39\x98\xbf\x9b\x90\xa4\x7b\xd8\x62\xc0\x4f\x18\x37\x94\x28\x13\xd2\xfc\x2e\x7a\xaf\x5b\x15\x97\x2a\xb4\x21\xb0\xf3\x79\xfc\x0c\xf1\xa6\xe1\x4e\x69\x06\x25\x75\xa6\x44\x03\x3b\x2f\x16\x8e\x84\x0c\xec\x2f\x06\xc3\xb2\xf8\x2a\xe6\xa3\x40\x84\x3a\x28\x48\xcc\x5d\x3a\xca\x8e\x0b\x1e\xa3\xf0\x1c\x07\xf0\x63\x95\x25\x51\xf8\x45\x1d\xa3\x53\xbd\x71\x30\x99\x22\x7c\x05\x91\x24\xfb\x91\xde\x39\x7a\xaf\x4a\xca\x98\xb7\x0d\xf4\x3e\x75\x60\x0b\x92\xa2\x20\xfe\x2f\x47\xa0\x74\xa8\x4f\x44\xd2\x18\xc3\x56\x8b\x82\x1c\x05\x28\x8f\x26\x16\x89\xdb\x16\x92\x5d\xee\xae\x3b\x29\x84\x3c\x38\xa4\x28\xda\x20\xe8\xb1\x59\x38\x8d\x78\x54\x6c\xf2\x4f\xad\xd1\x42\x2f\x51\x2d\xa2\x18\xbf\x40\xeb\x4b\x4b\x22\x5a\xb6\x53\x8a\xa7\x70\xd4\x1e\x2f\xa3\x48\x84\xdb\xfe\xba\x51\x34\xfd\xe6\x0d\x6f\xc3\x52\x5e\x34\x5a\x41\xf0\x77\x6e\x4b\xf2\x98\xd2\xc5\x75\xa7\x31\x75\x47\xb9\xaf\xda\xfd\x0d\x64\x0e\x76\x95\x8c\xc1\x26\x15\x8a\xcd\x76\x79\x43\x45\xd3\x96\x63\x25\x88\xe2\xa0\xaf\x9f\x3c\xa4\x03\x40\x55\x76\x4a\x63\x70\x10\x21\x50\x11\x0c\xa3\xfc\xae\xa2\x60\xb1\x38\xc5\xea\x72\x30\x29\xf2\xb9\x6a\xe8\x5e\x0b\x6b\x32\xe5\x28\x5b\x5c\x24\x27\x93\xb1\x33\x0c\x8b\xa8\x76\x2a\x60\xf0\x38\xf3\x1b\xb0\x74\xe8\x1f\x90\x7e\xb3\x41\x48\x3f\x53\xf8\x82\x85\xe0\x15\x51\x6a\x03\xed\x07\xf9\x68\x65\x80\xa3\x71\x51\x73\x15\x2d\x10\x91\xc8\x7e\xfe\xad\xb4\xf3\x38\xcc\x91\x8c\xe3\xef\x6d\xed\x3e\xd9\x71\x57\xa6\x05\xe3\xbc\xab\xd2\xc2\xbc\x73\xae\x0c\x16\x4e\x6a\x14\x57\x39\xfa\xb9\x79\x72\x5e\x31\x69\x84\x99\xdf\xd7\x9d\x26\x75\xa4\xde\xe2\x53\x20\x89\x0d\xc3\x68\x3c\xe6\x61\x67\x99\x9b\x04\x9c\xb7\xe6\x0b\x25\xfc\x30\x17\xdb\x0e\xbd\x32\x28\xa7\x8b\x4f\xa5\x59\x66\x90\x2a\x11\xca\x7d\x19\x9f\x55\x38\x82\x31\x57\x10\xdf\x7d\xd2\xa2\x25\x64\x32\x89\xed\x47\x2c\x99\x3d\x98\x07\x2a\xf2\x35\x55\x6f\xc8\x27\x9f\x2f\xdd\x51\xe6\x3f\x5f\xa2\x0d\xf2\x5f\x47\x02\xb5\xc9\xe7\x3f\xc8\x36\x73\xd5\x0c\x42\xdc\x59\xef\xeb\xe1\xd7\x45\xb1\x20\xfb\x82\x64\xce\x51\x72\x4f\x50\xe1\xee\x8e\xb6\x5a\xab\x5f\xbd\xaa\x77\x5e\xa1\x17\xa4\x0b\x7f\xc0\x9e\xbe\xb3\xb3\xb3\xb3\x84\x96\xe9\x8b\x9f\x7e\x42\xf5\x2b\xbf\x0e\xdb\x3d\x41\xc0\xb1\xdd\xd3\x2e\xd6\xea\x57\xad\x4e\xbb\x4e\x81\x5d\xea\xc0\x2e\xab\x02\x83\xe1\xc5\xd9\x0c\x3c\x7d\x6a\x80\xc6\x9b\x37\xb4\x26\x5a\x46\x30\xd2\xa5\xf5\x59\xdd\xd5\x0d\xa8\xc3\xfe\xca\xcb\x2e\x6f\xa0\xfa\x4a\xdb\x59\x06\xc6\x94\x15\x7d\x41\xed\x6d\x38\xb5\x2d\xa1\x9f\xd0\x4a\x1b\xfd\x07\xf2\x51\x17\xbd\xf4\xab\x88\x28\x06\xe7\x50\xc5\x0d\x0f\xa5\x83\x60\x30\xc2\x2c\xbb\xce\x7c\x81\x83\xd4\xfc\x4c\xe8\x31\xad\xd5\x68\x55\x72\x54\x52\x90\x24\xbb\x89\x34\x18\xf6\x2b\x26\x5a\x75\x03\x7d\x4e\x6b\xb4\x3c\x10\xe4\x5a\x7f\xcd\xd2\xa7\xcb\x22\x87\x4f\x4d\x94\x2f\xe0\xa3\xaf\xa8\x5e\x31\xac\x79\x8c\x2f\x25\x67\x27\xb8\x75\x64\x0a\x90\x98\xa7\xef\x79\xa2\x8d\xa4\xdd\xf9\x94\x1d\xed\xe7\x19\xd2\xe0\x78\x00\x86\x34\xf4\x5f\xbb\x21\xcd\x2e\xbe\x32\x35\x01\x36\x70\xa4\xe0\x06\x05\xba\x42\x7f\x57\x8b\xbf\xa9\xab\x2f\x46\xf8\xaa\xb2\x0a\xa3\xc2\xc9\x73\xc1\xa8\x9a\x95\x5a\xbf\x2f\x46\x3e\xc2\x57\x66\x08\x4d\x36\x7e\xd2\xd1\x7e\x7e\x22\x21\x6b\xe0\xcc\xdb\x1e\x53\x2f\x2b\x9f\x3c\xb3\x45\x8f\x91\x74\xd6\x4d\x40\x23\x7c\xd5\x1b\x05\x69\xe5\x3c\x5b\xd9\xdc\x03\x1d\xe4\x48\x8b\xe8\x41\xee\xf2\x8e\x87\x38\x8e\x1d\x5b\xe3\x00\x96\x00\x69\x2d\x15\x6a\x1f\xbf\xb3\x64\xe3\x77\xb6\xaa\xa4\x9d\xfa\xb0\xbc\xae\x83\x41\x08\x70\xbf\x27\x51\x5c\x7b\xfe\xfc\x16\x11\x37\x25\x0a\xa7\xeb\x6d\x11\x4d\x0f\x5f\x29\x94\x70\xab\x2f\x18\x87\xf0\xf4\xd7\x4b\x4d\x7c\xb1\x51\x9b\x6d\xb1\x1e\xab\x47\xca\xa4\x55\x16\x4b\x94\x42\xeb\x7c\xe0\x47\x17\xfa\xc8\x8e\x32\x8b\xac\x9a\xcb\x45\x52\xd3\xc9\x8d\xb2\x2d\xb4\x51\x92\x1f\x93\xae\x96\x26\x68\x26\xa0\xd3\x7b\x71\xce\x3a\xbb\x92\xcd\xfa\x59\x9e\xd6\x22\x0f\x35\x96\x3c\x48\xc2\x57\xa8\x2c\xc8\x8a\x5a\x5f\xb2\x39\xe0\x2e\xbc\xe7\x29\xc3\xb4\x8a\x1a\x55\xdd\x67\x3f\x04\x79\x14\xfb\xd5\x36\x2d\x56\x96\xef\x5b\xe2\xf1\x76\x5b\x17\xab\xfe\xd7\xed\x5e\x55\x11\xb8\xaf\x35\x35\x86\xf6\xec\x7b\x18\xc5\xe5\x7f\xd4\x36\x46\x87\xe3\x3b\xde\xc9\x24\x04\xe9\x8e\x44\xa7\x6e\x65\x98\x26\x13\xf2\xb6\x97\x84\x18\x36\xa9\xaa\x1b\x92\x0c\xf0\x0e\x7b\x92\x42\xb7\xb7\xdf\x96\x04\x39\x2e\xb4\x18\xbe\xeb\xcd\x89\xad\x22\xba\x3f\xc9\xcb\xad\xfa\x16\x25\x6a\x2d\xb6\x4b\x89\x6a\x62\xa3\x12\x6f\x1e\x7a\xaf\xd2\x9a\x9e\x97\xcb\x39\x92\xb4\xe8\x45\x6f\x57\x06\x8c\xa0\x37\xf3\x5a\xc4\xd7\x84\xbe\x55\xd9\x75\x8b\x0b\x6f\x55\x1a\xc2\x55\x77\xaa\x4f\x27\x3b\x2f\xd7\xab\x6d\x54\x9f\xf2\xe1\xba\xd8\xa6\xd8\xc3\xed\x36\x29\xda\xe8\x5f\xb7\x47\x55\x6c\xff\xbe\x56\xd6\x2c\x1f\xae\xdb\x37\x28\x32\x8a\x0f\xb9\x3d\xe5\xe9\x75\x89\x81\x51\x88\xc9\x11\xfd\xd3\xd1\x5e\x8f\x7b\x3a\xd5\x70\x36\x08\xa6\xb8\x56\xb2\x71\x9a\x6c\x19\x0d\x82\x7c\x30\x42\x35\x33\x7d\x34\xa0\x30\x4a\x93\x4b\xa0\x5b\xc8\xb8\x52\x7b\xbe\x1f\x8c\x87\x49\x3a\xc1\x21\x9b\x86\x30\xc8\x03\x33\x05\xdd\xe2\x0c\x5c\x9e\xd4\xdb\xf3\x6f\x36\x57\x8b\x90\xc9\x77\xcd\xbc\x81\xc2\x28\xeb\x2e\xc8\xb0\x3a\xe3\x66\x75\x5c\xc6\x00\xca\xd6\x30\x8b\x19\xf5\x50\x0b\x01\x85\xae\x38\x9c\xa5\xca\x01\x68\x44\x0a\x5e\xc8\x85\x89\x43\x96\xcd\x4c\xf2\x42\x77\x66\xe2\x95\xec\x64\xaf\xa5\x94\x68\x93\x59\x96\xa3\x3e\x46\x11\x19\xd1\x09\x8e\x73\x9a\x67\x2d\x80\xeb\xf5\x14\xe7\xc2\x63\xa1\x52\x6e\x5f\x2d\x4f\xa7\xaa\xdc\xa7\x39\x0e\xa9\x6b\x55\x91\x20\xfe\x0b\x9e\xe6\x68\x16\x4f\x79\xd2\x40\x35\x3b\xa8\x64\xd3\x52\xb7\x70\xdf\xb7\x6c\x1c\x20\xd3\xe0\xa6\x18\x05\xe1\x25\xe6\xfa\x5c\xd1\x0c\x0e\xb2\xbb\x32\x6b\x1e\x6d\xa4\x9f\xb3\x24\xda\x2c\x89\x69\x9e\xa0\x28\xcf\xb8\x57\x0c\x22\x14\x7c\xd7\x3b\xa6\xbe\x15\x79\x9a\x10\xd7\x7d\xc9\x54\x29\xeb\x2e\x33\xef\x43\x60\xa5\x6c\xb3\x19\x80\x0c\x9c\xcc\x53\x51\xdb\x59\x75\xa6\x44\xcb\xc7\x5b\x41\x1e\x70\x61\xbd\x5e\x55\xd2\xdc\x0c\xc3\x0c\xda\xe0\x79\xc1\x1d\x23\xcd\x68\xa1\xfa\xa6\x28\x82\x2c\x18\x99\xc7\x99\xb1\x0b\xa2\x6b\x9e\x39\x01\x50\x7e\x49\x7d\x4a\x02\xc9\x82\x92\xda\x13\x03\xc7\x7b\x98\xc9\xfc\x4c\xd1\xa9\x3d\x37\xf9\x7d\xa5\x7a\xf3\xf7\x46\x56\xb2\x4a\x32\x73\xd3\xbd\xbe\x48\x47\x27\x07\x14\x95\x06\x88\x05\x13\x55\x41\xc9\x3e\xce\x40\x46\x73\xe2\x44\x32\x5a\x93\x98\x32\x60\x38\x3f\x52\xda\x26\x74\xcd\x45\xbe\xdc\x94\xc8\x06\xcc\x20\xda\xe5\x0d\x35\x49\x7a\x55\x0a\xe6\xb9\x4e\x33\x14\x5c\x04\xd1\x18\x22\x76\x51\xbe\x00\xcc\xce\x4d\x35\x27\x92\xb3\x4a\x14\x5f\x24\x5f\x70\xa6\x27\x19\xae\xb1\xe4\xc0\x1e\xba\x1c\x45\x83\x91\x95\x55\xf7\xaf\x4b\x58\xb5\xd9\x2a\x5f\x28\xfd\x24\x19\xe3\x20\xbe\x41\x61\xb2\x33\x9e\x65\x23\xf4\xcb\x08\xe7\x34\x9e\x09\xcf\x45\x0b\xee\x5a\xd3\x20\x05\x46\xc1\x5e\x15\x5c\x5b\xb0\xeb\x5b\x84\x03\x11\x9c\x1e\x46\xfc\xee\xdb\xbc\x00\xb8\x45\x09\xc9\xb5\x66\x78\xaa\x5c\x57\x5c\x8e\x05\xc1\xd8\x33\x05\xab\xb1\x56\x69\x51\x65\xf1\xd1\x01\x5f\x50\x67\xc2\x96\x48\x41\xdc\x16\x6d\x09\x79\xcd\x8d\xd3\x60\x64\x5d\x6a\x15\xf2\x51\x32\x34\x73\xd1\x3d\x2f\x5e\xc8\x0a\x1b\x5a\x4a\xe6\xb2\xc2\x1c\x7a\x51\xdb\x1e\xd1\xaf\x97\xcc\xe2\x9c\xd3\x97\x85\x99\x10\xa0\x31\x4d\x24\x7c\x04\x71\x8b\x37\x54\xfc\x57\xb5\x26\x5f\x9b\xbc\xc8\x35\xe4\x0c\x83\xa3\x64\x16\x87\x68\x36\xa5\x0e\x85\x83\xf1\x2c\xc4\x1a\xdd\x9b\xd5\x34\x8c\x0a\x23\x17\xf9\x43\xf5\xd8\xb6\x02\x8b\x30\xb9\x8c\x65\x3c\x92\x78\x7c\x8d\x86\x33\xb1\x28\x2d\x91\xf4\x57\x57\xd1\x18\x67\xd4\xa9\xd2\x2e\x6b\x01\xdf\x48\xf1\x24\x88\x62\x55\xb8\xaa\xd6\xaf\x49\x70\x55\x53\xfa\x05\x17\xa7\xe8\xa5\x2d\x33\xbb\x37\xff\x4a\x55\xcc\x39\xd5\x3c\xb8\xa6\x1c\x28\x99\xe3\xa1\xb4\xfe\x02\x49\x04\xe8\xa2\x27\xa0\x0d\x27\x39\x91\xaf\x6a\x1f\xa3\xb8\x26\x37\xf9\x02\xb5\x3c\x85\xce\x6c\xe6\x93\x3c\x83\xb7\x8d\x48\x08\xdd\x49\x00\xcb\xdd\xb6\x28\x9f\xa7\x6a\x16\xf6\xfb\x8d\x3c\x02\xe2\xed\xb2\xb4\x9e\x9c\x46\x13\x04\x33\x9c\x92\xd3\xa4\xd8\x18\x5e\x16\x07\x04\x70\x86\xb4\x57\x64\xdc\x45\xdd\x83\x04\x57\xb1\xe5\xaa\x77\xcd\x31\x52\x52\x60\x15\x0c\x1f\xa6\xdc\x2c\xaa\x70\x5f\x99\x85\xe9\xc9\xb0\xe4\x11\xb5\xa0\xa1\x70\x32\xf4\x72\x43\x9e\xe9\xf9\x54\xc9\x63\x8b\x96\x61\xeb\x56\x38\xa9\xf8\x7b\x72\xd3\x77\x35\x76\xab\x9c\x85\xb2\xd4\xc9\xeb\x8e\x56\x6e\x8e\xdd\xf0\x2f\x32\x79\xfb\x6c\x6c\x88\x05\x26\xd6\x19\x2b\xb5\x78\x53\x79\x98\x38\x69\x3a\x32\xd1\xf3\x33\xf8\x28\xc8\x20\x43\xae\xf3\xc4\x3d\x37\x15\x79\xc1\xae\x65\x1f\x28\x3a\xe9\x0c\x3a\x0d\xbb\x86\x33\x94\xc4\xd2\x51\xd8\xef\xa0\x5a\xdb\x6f\x80\x25\xeb\x92\xe5\x58\xbc\x4b\x2b\xf3\x63\xb0\x78\xb4\x9f\x87\xef\x25\xea\x6b\x59\x06\xb2\xd2\x80\xa9\x65\xae\x66\x74\x10\x16\xc8\x49\x7e\xdb\xe8\x76\xa4\x21\x44\x43\x24\xcf\x0b\x72\x57\xd9\x86\x44\xcc\x81\x12\xba\xed\x78\x77\xb3\xd1\xee\xd8\x9d\xc4\xca\x52\x5d\xdf\x3a\xc2\x1a\x8f\xad\x56\x3d\xcc\xda\x31\x16\xe1\x3d\xdc\x1a\x02\x53\x0d\x31\xc7\x12\xbb\xd0\xa4\xf0\x85\x73\xff\x2a\x13\x46\x2f\xf7\xa1\x22\x01\x84\x65\x15\x8f\x5a\xc2\xb1\x92\x00\xb4\xc2\xbc\x4c\xa9\x41\xdf\x9b\xd9\x70\x58\x36\x66\xbe\x21\x1f\x2d\x36\xd6\x9f\xa6\x21\xb0\x0c\x79\xb0\x69\x5a\xfe\xea\x19\xfb\x9c\x11\x84\x29\x70\x3d\x8e\x70\x65\x17\x22\xca\x8a\x98\xff\xd0\xdc\xe5\xbd\xc0\x9c\xcf\x00\xaf\xda\x73\x86\x94\x4d\x97\xa2\x96\x9c\xaf\x3a\xa1\x05\x65\x42\x51\xc6\xc0\xb1\x1e\x1d\x1a\x09\xa6\xb0\x51\x21\x58\xc8\x83\x8d\x2f\x11\xd2\x09\xbe\x36\x50\xd2\x39\xd6\x14\x7f\xef\xcd\x77\x62\x87\x25\xb9\xc9\x04\x2e\x4e\x06\x89\x3e\x26\x80\x72\x90\xd3\x7c\xf1\xac\x66\x11\x33\x14\x45\x19\xc2\xc3\x21\x1e\xe4\xd1\x05\x1e\x5f\xa3\x00\x85\x38\xcb\xd3\x19\x3c\x7b\x20\xa7\xbf\x4c\xe2\x01\xae\x14\x65\xb4\x22\x85\x2a\x89\x1e\x00\xa5\x22\x20\x37\x94\x58\x5c\x73\x41\x06\xe1\x9e\x76\x06\xb4\xc1\xc9\x51\x24\x13\x72\xa8\x25\x1c\xa5\xcb\x08\xbd\xa4\xda\x7c\xaa\xe7\x45\x17\xa2\xfb\x1d\xcb\xf8\x9a\x07\xa2\x72\x30\x68\xde\x5a\x99\x27\xc0\x2f\xc0\x59\xa5\x11\xe2\x4c\x76\x47\x9a\x07\xeb\xe2\x21\xe5\x5d\x8b\x47\x4a\x7e\xd7\xf6\x1b\xab\xcd\x46\x35\x31\x3f\x63\x1a\x1f\x25\xfe\x7d\xc0\x26\xed\xb9\x08\x9c\x14\xc5\x39\x4e\x87\x92\xb5\x30\x72\xae\x0a\xce\x5f\x59\xd7\x39\xd5\xd2\xed\x96\xc5\x47\x0c\xd0\x08\x8f\xa7\x38\x25\xe2\x4f\x85\x45\xb0\xc3\x70\x63\xbe\xc1\x3a\xca\xdf\xe0\x1e\x8f\xca\x4c\xba\x53\x05\xed\xea\xca\x67\xda\xab\x5d\xe8\x52\xcd\x26\x6c\xb9\xf5\x73\x72\x55\xc5\x78\x10\x40\xbb\xee\xf7\x8c\x75\x61\x0f\x80\x8b\xd4\xf3\x22\x5b\x89\x70\x58\x54\xb3\x88\x15\x19\x2e\x55\x0a\x5f\xfc\xd8\x68\xa5\x27\xc2\x92\x77\xf7\x37\x7b\xf7\x4f\x4f\x44\x84\xe6\x41\x29\x48\x0b\x8c\xae\xfe\x16\x34\xb5\x3b\x09\x06\x95\xe8\x6a\x12\x0c\xee\x42\x5b\xa2\xfa\x9d\xe8\xeb\x0b\xb6\xab\x90\x24\xfa\xea\x7d\x06\xb4\xc8\x3c\x50\x22\xa3\x8d\xd0\xba\x8b\x11\x5b\xe9\xf1\x57\x68\x92\xe6\xf8\x30\x10\x6c\xc0\x89\x81\xfd\x28\xbc\x18\x78\xa6\x16\x08\xe9\xbb\x1f\xe4\x23\x1a\xd6\xf7\x09\x7f\xcf\x86\xf9\x75\x11\xe9\xf7\xe6\xcc\x6b\xb7\xbe\xd7\xf0\xbe\x0c\x99\x1a\x0f\x47\xbc\x74\xef\xf1\x7e\x39\xe4\x45\xe3\xfe\x0a\x0c\xe5\xf8\xbf\xae\xa0\xbf\xe2\x3b\x04\xff\xb5\x05\xd0\x35\xaf\x28\x78\xd4\xd8\x62\xca\x24\x02\x90\xa2\xc1\x4a\xef\x4b\xc2\xd3\x28\xb5\x25\x17\x18\x57\x18\xd9\x4e\xab\x9a\x89\x16\x2b\xcb\x8d\xb4\xc4\xe3\xed\xcc\xb4\x58\xf5\xbf\xce\x4e\xab\x2a\x02\xf7\xc5\x29\xfb\xd0\x9e\xdd\x54\x8b\xe2\xf2\x0f\xb0\x25\x36\xca\x4f\x82\xa9\x10\x0e\x27\xc1\x74\xf1\xd8\x0b\x16\x17\x71\x13\x84\xcb\x2a\x93\x8e\xf9\x6d\x0d\x96\xd1\xf2\x06\x6a\xba\x6d\x96\xaf\x73\xec\x5b\x8c\x96\xe9\x9f\xcb\x74\x99\xfe\x39\x0d\x98\x39\xe0\x46\x01\xb8\x16\xa1\x65\xe4\x2f\x59\x6c\xa2\xf9\x97\x2a\x96\xd1\x1c\x70\x53\x03\xdc\x70\x02\x6e\x58\x01\xdb\x21\xe7\x69\x34\x1d\xc3\xd5\x4b\x8d\x0e\xcb\x9b\x37\xe0\x37\xf1\x95\x3e\x37\xc8\xf3\x3a\x79\x04\x14\x6c\x50\xc4\x54\xfc\x4e\xa7\xa2\xf6\x3b\x7a\x43\x5a\xff\xf1\x47\x04\xd8\xfc\x8e\x5e\xa0\xfa\xca\x5a\x5b\x9a\xa1\xa5\xd7\xe8\xf7\x92\x70\x17\xd2\xdc\x53\x5b\xf0\x49\x30\x05\x9b\xd9\xcd\xbc\x56\xe3\x08\x43\xa7\x3b\xe8\x05\xaa\x35\xd1\x4b\xf4\xfb\x12\xeb\x69\x73\x68\xf5\x76\x32\xe2\x33\x98\x8a\x8b\x30\xe4\xe9\xbe\x4d\x6a\x64\x1f\x08\x4a\x68\x03\x49\xe8\x74\x0c\x67\x12\x88\xad\x57\x14\xb7\x1b\x07\x8f\xa2\x31\x46\x35\xb9\x9f\x2c\x5c\x80\x2b\xd6\x88\x75\x58\xe4\x66\x16\xef\x33\xe3\xac\x32\xd4\x3b\xd8\xc9\x2b\x3c\xf9\xf6\x76\x96\x82\xd5\x2e\xc4\xe8\xbf\x6b\x53\x4b\xb6\x43\x50\xbb\x1e\x79\x2b\xa9\x6e\x6e\x29\x6a\x2d\xb8\x39\x88\x7a\xc2\x50\x5e\xbc\x11\x86\xf2\xf3\xf9\xbe\x51\x22\xc5\x17\x38\xcd\xf0\xbe\x54\xb0\x78\x65\x8b\x6b\xf6\x43\xf1\xd9\x49\xdd\xa5\x40\x6d\x5b\x00\xff\xd3\xf9\x0f\x61\x3f\x64\x85\xb2\x0e\x96\x72\x1a\xb5\xe1\x53\xbe\xb0\x99\x6d\xfe\xef\x4b\x67\x68\x03\xfd\x5e\x2d\x56\xa7\x85\xa5\xec\x9d\xc7\x49\x8a\xbf\x19\x57\x91\x40\xee\xc5\x21\xf8\x39\x17\xd3\x1d\x91\x37\x07\xc3\x79\x3c\x43\x6a\x87\xc2\xf8\x61\x63\x03\xbd\xf4\xe7\xf0\x24\x99\xc2\xe4\xda\xb7\x62\xc4\x56\x91\x20\x15\x69\x2f\x33\xfc\x21\x49\xa6\xc5\x92\xf0\x74\x1c\x3c\x69\x46\x15\x91\x43\xbb\xf1\x0c\xa6\x5d\xf4\x7c\xf3\x6d\x6f\x6b\x7b\xe7\xdd\xee\xde\x7f\xbd\xff\xb0\xff\xf1\xe0\xf0\x7f\x1f\x1d\x9f\x7c\xfa\xf9\x97\x5f\xff\xfd\x7f\x82\xfe\x20\xc4\xc3\xf3\x51\xf4\xfb\x97\xf1\x24\x4e\xa6\xff\x9d\x66\xf9\xec\xe2\xf2\xea\xfa\x8f\xba\xdf\x68\xb6\xda\x9d\xb5\xf5\x57\xcb\xab\x1b\x2c\xc2\xad\x38\xda\x89\x45\xbb\x30\xaa\xc5\x10\x3b\xbc\x52\x0a\xcb\x0d\xc5\xc2\xd4\x26\x0a\x69\xed\xd8\xdc\x54\xc8\x4c\x47\x8e\xfd\x86\x39\x76\x65\x44\x48\x92\x96\x47\x41\x4d\xb2\x03\x0b\x7a\x89\xfc\xa5\x33\xf0\x5e\x29\x04\xa6\x86\x49\x5c\x1c\x68\xa3\x0a\xd0\xa5\x33\xbe\xc1\xcb\x62\x98\x05\x2a\x15\x88\x62\x25\x72\xcf\x57\x22\xcc\x00\xfa\x5f\x69\x8b\xb2\x6f\x4d\x5c\x1e\xbc\x07\xb1\x21\x5e\x5e\x56\x3e\x08\xb2\x15\x3f\x18\x45\x1a\xb1\x25\xad\x61\x11\x6e\x8a\xdc\x3d\xfa\x21\x5f\xda\x23\x5e\x3b\x33\xfb\xb4\x1f\x8f\xfe\x8f\x47\x7f\x71\xf4\xff\x74\xb2\xf3\xd2\xef\xa0\xb7\xdb\x95\x1d\xb4\xfc\xce\xdb\x6d\xd9\x47\xcb\xef\xa8\x4f\xf0\xf5\xf6\x4e\x5b\x14\x99\xbf\xd6\x71\xab\x22\x0e\xf7\xe8\xbc\xe5\x77\x9c\xde\x5b\x7e\xe7\x1f\xa0\x11\xa8\x7e\x58\x87\xc1\xb8\xcb\x59\xdd\xee\xef\x0f\x96\x51\x49\x88\x0f\x93\x28\xce\x5d\x4e\xc6\x7e\xc7\xe1\x64\x6c\x3d\x4c\x17\x98\xba\xbd\x8c\x45\x93\x55\x5d\x8d\x25\xa0\x77\x38\x41\xe9\x44\x7c\x27\x67\x35\xa0\xcd\x45\xd7\xc6\x77\x7d\x8c\xa2\xab\x4a\xb8\xac\xf1\xc5\xb7\x90\xcf\x1a\x54\x5a\xcc\xd7\x98\xd7\x12\xf2\x2d\x7f\xf1\xd0\x9e\xc6\x6a\xc3\xd5\x1c\x8d\x7d\x90\x7d\x04\x86\xaa\x9b\x31\x11\x81\x8a\xc5\xd2\x20\x8b\x45\x0b\xc2\xe6\xa6\x70\x97\x94\xa3\x8d\xce\x8b\xea\xa1\x30\x18\x59\x7e\xa8\xb0\x87\x49\xfb\xd4\x87\x3b\xef\x53\x1f\xbe\x83\x7d\xaa\x0a\x0e\xf7\xbd\x4f\x59\x97\xd3\x87\xed\xc7\x6d\x4a\xfc\xdd\xdb\x36\x95\x5d\x06\xd3\xed\x38\x8c\x82\xb8\xb6\xe8\x8e\x65\x3b\x92\x7f\xff\x5b\xd6\x87\x87\xd9\xb2\xaa\x2c\x93\xef\x7f\xcb\xfa\xb0\xad\x6d\x5a\x8f\x3b\x96\xb1\x63\x49\x2b\x66\xa1\xcd\xeb\x9b\xee\x5e\x62\x5e\x24\x6c\x09\x20\xa5\x8f\x3c\x1a\x3e\x7c\x61\x77\x27\x74\x71\xd7\xeb\xe4\xff\xe1\x62\x85\x7e\x24\xdd\x67\x5f\xe9\xb7\x62\xf9\xcf\x53\x17\x00\x61\xb9\xb5\x05\x9d\x3b\x69\x0b\x58\x8e\xda\x6f\xa9\x34\xf0\x90\xf4\x2a\x1b\x05\xbe\xf6\x6a\x34\x09\x06\x0f\xa8\x5a\xf0\x10\x6f\x16\x7e\x41\x6b\xff\x04\x75\x83\x91\x2f\xf6\x16\xaa\x08\xc5\x88\x45\xfa\xb2\xbf\xd5\x86\x9a\x60\x72\xb3\xbf\xd5\xb6\xc9\x78\x60\xe2\xfc\x05\x5f\xd3\x2c\xd8\xd4\x0e\x56\xf4\x15\x9c\x7f\x83\x38\xe7\x49\xbc\x93\x74\x42\x6d\xb4\xb7\x7f\x3e\xfc\x0c\x9b\xee\x49\xf2\x1e\x17\xc2\x20\xba\xbc\xbc\x5c\x49\xa6\x38\xce\xb2\xf1\x4a\x92\x9e\xaf\x86\xc9\x20\x5b\x85\x24\xdc\xc9\xaa\x56\x67\x94\x4f\xc6\x16\x45\xc8\xf6\xc5\xf4\xfd\xd6\x4e\x81\xb6\x78\xae\x18\x0c\x61\xbe\x0f\x88\xb6\xc7\x19\xde\x2f\x2c\xe5\x39\xec\x51\x64\x60\x32\xf2\x10\xc5\xdc\xed\x45\x0a\xf7\x5c\xb8\xba\xb4\x50\xcd\x6f\xac\x2b\x9e\x2e\x06\x7c\x87\x91\x9a\x1c\x16\x43\x4f\x90\xb2\xbf\xd5\x9e\x87\x6d\x94\x33\x5b\x64\x3d\x48\xb5\xf4\x21\x4f\xd0\x94\x5a\x9d\xca\xde\x39\x8e\x1d\xce\xf0\x8b\xd1\x76\x07\x36\x3c\x5d\xe4\x37\xd6\xc1\x84\x54\xf9\x4a\x3b\x07\x98\x6b\x5f\x0a\x7c\x94\xb6\x6f\x6e\xed\x76\xe3\x20\xda\x87\xf6\xc3\xc1\x52\xa3\xf7\x60\x66\xfd\x25\x1c\x1a\xde\x37\x94\xe6\xe7\xa4\x68\x9a\x5f\xf1\xcf\x62\xae\xd6\xb5\x7c\x7e\xb7\x05\xe3\xa9\xd3\x58\xaf\xd7\x75\xc0\x0b\x7a\x07\xcd\xf5\xfb\xa9\x26\xef\x6e\x41\x0a\x7f\x42\x23\x84\x2a\x20\x11\x76\x00\x19\x58\xc9\xa2\xbd\x8d\x95\x3e\xaf\x4b\x63\x01\xd8\x00\x95\x54\xce\x82\x71\x8e\x36\xe1\x9f\xc5\xc5\x62\xa0\x2e\x4a\xde\xf7\x41\x5e\x98\x6c\x1e\x5f\xc2\xe1\x0a\x75\x8b\xc0\x35\xde\x19\x0f\xf0\x2b\xc9\x5b\x03\xc5\x95\xfc\x8e\x6a\xcd\x85\x04\x5e\x75\x8a\x2d\xe2\x2d\x59\xe9\x8c\x7b\x98\xb5\x85\x97\x1a\x21\x0f\x66\xa2\x5c\xac\x0e\x2b\x2c\x97\x5b\x18\x84\x16\xa0\x43\xfc\x1e\xc6\xc6\x96\x12\x6d\x91\x33\x72\x01\x4c\xf8\x04\x8b\x37\xce\xe3\x32\xdf\x63\x68\x8f\xd8\x93\xa5\x9c\xc4\xc4\x69\xd1\xe2\x85\x05\xcb\x77\x6c\x63\x22\xe0\xd5\x8f\xcc\x98\x45\xc3\x95\x1b\xb4\xbc\xe1\xf8\x58\x8f\x02\x44\x8c\x03\xcf\x01\xe7\x05\xb3\xea\xb2\x44\xcb\xce\xbf\x56\x46\x72\x30\x86\xc2\x09\x84\x41\xe1\xc4\x26\x19\x05\x1b\xf4\xaa\x36\x2f\xfc\xe9\xcc\x12\x84\x26\xc4\xc0\x99\x9f\x95\x83\x92\x4f\x0f\x4a\xd2\x40\x97\xa6\xfd\xd1\xb0\x17\xc8\x3a\x47\xc1\x86\xb1\x65\xa8\xcc\x77\x12\x59\xb1\x98\x31\xd6\x36\xb4\x51\x96\x6a\x49\x3a\x1a\x4e\x7f\x96\x68\x17\x22\xc0\x1c\xaf\x57\xd5\xe6\xba\x12\x0f\x96\xfd\x8e\x6f\xc5\x7b\x17\xe4\xbb\xf7\xe8\x7d\x6b\xf1\x2b\x93\x7a\x53\x9d\x9b\x4b\x95\x14\xed\x86\xf4\x5e\xe5\xee\xc5\x07\xa4\x70\x75\xb1\x69\xd3\xfd\xda\xc5\xd9\x17\xab\xe6\x21\x87\xd8\x70\x17\x30\xa5\x62\x83\x50\x21\x17\xb2\xbe\x6b\xcf\x31\x5d\x58\xd8\xb0\xab\x12\x0b\x38\xae\x94\xef\x77\x37\xaf\x4b\x8e\xef\x14\x9a\xfd\xec\xee\xf1\xc3\x67\xb7\xbd\xee\xf1\x23\x69\x77\x6d\x8d\x9c\xe9\xd7\xfe\xd6\x67\xfa\x41\x34\x1d\xe1\xf4\xe5\x03\x9b\x08\xc0\xe9\x5d\x6e\xea\xaf\x39\xc4\x9b\x99\x3b\xef\xe5\x34\xdf\x83\x8e\x1d\x12\x8e\x93\x89\x43\xbb\xfc\xd2\x6d\x42\x20\xde\x6b\x99\x30\x94\x1a\xe4\x0c\x17\xe4\x50\x89\xfe\xe4\x8c\x98\x55\xdc\x81\x97\x39\x8b\xaa\x40\x8b\x2c\x90\x4e\x83\x9c\x6e\xe8\xdc\xe4\xf8\x2a\x27\xa7\xc8\x80\x3d\xa3\x29\xed\x13\xf3\xcd\xe2\xa9\x36\x82\x10\x0f\xa2\x49\x30\x1e\x5f\xb3\x34\xa0\x61\xe5\x9b\x1b\x79\x54\x6e\x58\x2b\x6c\xe0\x4e\x04\x1a\x6a\xb3\x8b\x27\xe3\xb8\x0d\x7e\x0f\x9a\x9e\xa3\x98\x12\xe9\x56\x47\xee\xfc\x62\x17\x3b\x4a\x4d\x87\xa3\x96\x5c\x66\xa5\x98\xdd\x22\x81\xc4\x2e\xbe\xba\x65\x26\x08\xcb\xf0\x4a\xe4\x23\xdf\x37\x2c\x38\x9d\xda\xcd\x43\x14\x4f\x67\xf9\x5d\xe6\x94\x93\x87\x4a\x74\xb7\xa0\xb3\xfb\x22\x8e\x81\xc6\x28\x2c\xf4\x71\xeb\xa4\x12\x30\x5a\xf6\x10\x36\xc5\xe4\x6c\xa0\xa2\x0d\x5a\xe1\xb5\x95\x7a\x7a\x0a\xf5\x70\x8d\x40\x01\xa8\x2b\x03\xbd\xb1\xeb\xe6\xdd\x3b\x6d\xd1\x5d\x6d\xb7\x95\x36\x88\x6e\xbb\xe1\x69\xca\xf3\xf5\x47\x53\xbb\x7f\xba\xee\xdb\xb5\x3b\x1a\x91\xcc\xcb\x34\xe1\xe6\x21\x05\x1c\x80\x85\xc6\xd5\x9a\x88\x8a\x94\xd8\x90\x1d\x55\xef\x27\x21\x3d\xb8\xbc\xce\xe5\x78\x95\x95\xc4\x15\x55\x51\x44\x56\x07\xe7\x65\x3c\x48\x71\x7e\x4f\x4a\x25\x22\xff\xee\xda\x03\x07\x41\x2f\x19\x9b\xb0\x79\x22\x53\x47\xdf\xaa\x1a\x43\xd9\x39\xd8\x11\x20\xd8\xaa\x33\x12\xfa\x22\xea\xa3\x20\x1e\x75\x0f\xf7\x12\x6f\xb7\xfb\x8c\x2f\x0b\x07\xa6\x39\xe1\x65\xe9\xa1\x4a\x8a\x2e\xab\x8f\x93\xdd\x10\xbf\x44\x31\x45\x3b\xfa\x56\x8a\x8b\xc9\xba\x5e\x16\x19\x53\xab\xc4\xf5\x05\x3a\x2c\x7b\x94\xcc\xcd\xf1\x38\xb9\x44\x41\xda\x8f\xf2\x34\x48\xaf\x11\x53\x2f\x7d\xc1\xd7\x96\xb8\x83\x5f\x64\x8d\xc4\x4f\xd6\x86\x4b\x06\x4a\x57\xb7\x54\x1b\xad\x39\xce\x90\x04\xa5\x12\x37\x48\x88\xff\x06\xba\x8d\x24\x45\x51\x1c\xe3\x14\xa2\xcf\x26\xb3\x1c\x04\x08\x3d\x0a\x1f\xc4\x4c\xa4\x3a\x46\x4a\x86\xec\x81\xb6\x62\x04\xa4\xe3\x1a\x3f\xb9\x46\x64\xa9\xb1\x08\x09\x24\x92\x56\x32\x29\xd3\x47\x46\x52\xc1\x48\x2a\x68\x34\xf6\xeb\xc1\x11\xcc\x27\xbd\x06\x9c\x06\x21\x1a\x24\x71\x96\x07\xb1\xde\xbc\x35\x89\x94\x3a\xc7\x6e\xc5\x9a\xc0\xfb\x34\x3a\x43\xbf\x6d\xa0\xfa\x55\x7b\x40\xff\x67\x73\x87\x31\x0a\x37\x3b\xf4\x7f\xe5\x9a\xb1\x44\xd3\x89\x45\xda\xb3\x8d\x22\xff\x82\x38\x64\xb0\x03\x3d\x44\x14\x32\xc1\xc4\xef\x25\x12\x59\x49\xbe\x32\x1b\x33\xb6\x0c\x24\x74\xda\xc6\xc7\x1d\x7a\x52\x55\x5f\x5c\x2c\x98\xdb\x45\x20\x83\x61\xfe\x6e\xe2\x8f\xed\x6f\xf6\x58\xf4\x31\xc0\x2b\x82\x25\x56\x1a\x09\x65\xc1\x29\xaf\x12\x88\xcc\x28\x7d\xff\xc1\xc8\x64\x92\xe0\xad\xcc\x0d\x3e\xf6\x50\xd1\xc3\x60\xa8\xff\xa7\x47\x0f\x9b\x23\xa6\x2e\x22\x22\x12\x1e\x5a\xd0\xd0\xdc\x08\x62\xee\x1a\x73\xa3\x88\xb9\xab\x3e\x50\x24\xb1\xbb\x73\xbb\x1e\x55\x4f\xc3\x78\x5b\xf6\x63\x22\x5d\xec\xda\x83\xa3\x95\x06\x1c\x2b\xe5\x98\xf2\x58\x69\x40\x0b\x09\x85\x4b\x1a\xfc\x92\x49\xa0\xb2\xe4\x0c\x39\x36\x09\x06\xf6\x4b\x22\x71\xf0\x77\x18\xc1\xbd\xfa\x5b\x2b\xcc\xaf\x3a\xad\x97\x96\xd7\xe3\xa8\xff\x92\xa0\x12\x82\x6d\x6b\xa6\x7d\xc5\xf1\xe0\x25\xd8\x34\x5a\xde\x53\x37\x4b\xed\xc3\x24\x6c\xcf\x37\xbe\xcb\x46\x41\xa3\xad\x83\x24\x2f\x1b\x3a\xb8\x6c\x14\xb4\xfd\x86\xf9\xb2\xb9\x6e\x29\xd9\xd4\x5e\xa5\xd1\x14\x4f\x42\xbf\x53\xb7\xda\xfe\x29\xaf\xa6\xfd\x2f\xe1\x50\x6f\x07\x5f\x4c\xbf\x84\xc3\xb2\x7b\x07\xb5\xeb\x49\x88\x5f\x0e\x86\x7d\xeb\xeb\x3c\x75\xbc\x7e\x79\x3e\x0e\xc2\x49\x10\xdb\x3e\x27\x76\x60\x78\xa0\xbf\x9e\x06\xe1\xcb\x20\xce\xa2\xab\x57\x0d\x7d\x10\xc8\xa7\x28\x4b\xfc\xba\xdf\xd0\x47\x9c\x7d\x7a\xb5\xf6\x6a\x4d\x9f\x21\xf2\xe9\x0f\x9c\x26\xcc\xf5\xda\xf2\x35\x76\x7c\xa3\x3a\xb2\x97\x23\x7c\xa5\x7d\x08\xb0\x4e\x5c\x34\xee\x46\x68\xbc\x4f\x07\xfa\xe4\xa6\x41\xbf\x1f\xe5\xd6\x97\x2f\xc7\xf8\x3c\x18\x5c\x3f\xf4\x1d\x90\x58\x3d\xf0\xa4\x2f\x1a\x78\x59\xac\x15\xf1\xc8\x96\x08\x3c\x93\x95\xa1\x99\x85\xb2\x75\x20\x7e\x37\x5a\xe2\x37\xa1\x7a\xfe\x9b\x10\xbb\xf8\x4d\x7f\x15\xa4\x5d\xd8\x97\xc2\x2f\x46\xc8\x14\x03\x4a\xbf\xc6\x1d\x16\x45\x87\x53\xab\xf4\x94\xa7\xea\x93\xa0\xcd\xe2\x6d\xa2\xd4\x20\x94\x48\x9b\x95\x09\x50\xbc\x11\x74\x27\xbf\xa1\xe4\x26\xde\xc8\x54\x26\x5e\xc6\xea\x2b\x89\xa6\xe0\x99\x90\x12\xfc\x28\x28\x88\x8e\xca\x80\x0d\x14\xa3\x17\xe9\x37\x27\x93\x45\x15\x91\x8a\x02\x52\xe6\xb5\x8b\x2b\x26\xdd\xa1\xd8\x58\x97\xba\x6d\xdf\x2b\xd7\x26\x7b\x2a\x5d\x75\xdb\x2d\x4f\x21\xbc\x6e\xbb\xed\x15\x13\xdf\x6d\x77\x3c\x75\xf4\xba\xed\x35\xfd\x46\x58\x27\xe5\x6e\xa7\xee\x31\x6a\xed\x76\x00\x1f\x41\x29\xdd\x4e\xc3\x93\x69\xa5\xdb\x69\x79\x36\x6a\xe9\x76\x9a\x9e\x4c\x21\xdd\x4e\xdb\x93\xe9\xa7\xdb\x01\xbc\x14\x9a\xe9\x76\xd6\x3c\x9d\x6a\xba\x9d\x75\x4f\xa7\x9b\x6e\xe7\x95\x67\x10\x49\x77\xad\xee\x59\xc8\xa9\xbb\x06\xf8\xb3\x25\xd1\x5d\x03\xec\x19\x69\x74\xd7\x5a\x9e\x41\x1c\xdd\x35\x40\x9c\x90\x51\x77\x0d\x70\x2e\xd6\x59\x77\xad\x23\x5f\xa0\x7b\xc5\x92\xed\xae\xf1\xab\x75\xb2\x98\xbb\x6b\xaf\x3c\xbe\x54\xbb\xeb\x75\xaf\x58\xc2\xdd\x75\xdf\x2b\x16\x77\x77\x1d\xd0\x29\x28\xb8\xbb\x0e\x8d\x0b\x46\xd3\x5d\x6f\xdd\x9c\x79\x9d\xfa\xe3\xe5\xc1\x5f\x7f\x79\xd0\x1b\xe1\xc1\x17\xd2\x29\x58\x29\xd4\x0d\x88\xa6\x39\xcb\x66\x53\x32\x30\x98\xc5\xa7\x96\xfa\x0d\x72\x3c\x0d\x69\x8e\x7e\xd8\x40\xcf\x39\xe4\xe7\x16\x8b\x10\xe1\xa4\x71\x8f\xd7\x15\xa5\xe6\xf8\xa2\x9d\x23\x3c\xc4\x29\x86\x83\x5e\x1a\x9d\xc3\x99\x2c\x8a\xa3\xbc\x00\x93\xcd\xa6\x38\x05\xd5\xf5\x86\x96\x9e\x43\x82\xb2\x39\x3b\x9f\xe0\x38\xd7\x0a\xa0\x3c\x41\xa3\x20\x0e\xc7\x58\x19\x37\x19\x76\xdf\x0a\x59\xb1\xa9\x81\xaa\xa6\x3b\xa0\xa4\xfb\xa6\xb1\xe4\xa9\x09\x54\x14\xe7\xeb\x92\x86\x7e\x28\xd7\x17\x8a\x09\x75\x76\xcc\x63\x7e\x51\x83\x2a\xe1\x3f\x11\xa8\xf0\x42\xc6\x46\x39\x44\x58\x11\x4b\x68\xfa\x2f\x80\x74\x11\xe1\x4b\x17\x8a\xce\xe6\x25\x84\xf7\x38\x0a\xe8\xeb\x57\xb5\x3c\x27\x38\xc0\x12\x74\xc6\xbc\xfa\x0f\x64\xcd\x09\xdb\x11\x58\x74\x76\xe0\x46\xd5\x25\xa3\x15\x27\x56\x7e\xc7\x8e\x96\xbb\xa5\xc5\x6a\xec\xc5\x79\xb3\xb1\x68\x13\x8b\xd5\xd8\x19\x27\xc1\x6d\xaa\x74\x5a\xf0\xbe\x28\x7f\x4b\x52\x5a\xa1\x14\xec\x21\xf9\xd5\x75\x8e\x0f\x20\x39\x90\xf1\xda\x96\x77\x59\xa1\xbf\x5d\xba\xe8\x8a\xb6\xaa\xac\x88\xa2\xf4\x62\x2a\x84\x02\xda\x5b\x81\x1b\xda\xb0\xe3\x6c\xd1\x2c\x6c\x5f\xb1\xec\xab\xd7\xb9\xcd\xf8\x79\x21\x77\x41\x1b\x2a\x8b\xe4\xd3\x2e\xea\x9f\x46\x67\xb7\x4a\x9e\x5d\x98\x73\x47\x7f\x60\xaa\xaa\x2d\x1c\x47\xd5\xa2\x82\xb1\x16\xa9\x2d\x3c\xc4\xdc\x08\x6d\x1d\x51\xe6\xdb\x9a\xf5\x8c\x8c\x26\x79\x4d\xe0\xa1\x98\x48\x7d\x32\x33\x37\xdb\x0d\xa6\xd3\xf1\x35\x6b\x38\x48\xcf\x67\x84\x85\x67\x65\xfe\x8a\x8c\x5f\xaf\x4c\xd3\x24\x4f\x08\x8e\x32\xe7\x2e\x33\x9c\xd0\x77\x1f\xbb\x82\xa5\xe3\x3f\xca\x3a\x7f\x8d\xac\x03\x01\xa3\xff\x82\xb8\x44\xd6\x9c\x4a\x15\x4c\x24\x60\x8b\xa5\xf7\x78\x28\x2f\x74\xeb\xa4\xca\x09\x63\x16\x52\x49\xaa\xba\xd4\x6e\xfe\x6c\x92\x9e\x8b\xaf\x74\x5a\x76\x2e\x72\x42\xd8\xc4\x06\x1d\xbe\x95\xa0\x9f\xd1\x1f\x59\x14\xb3\x60\xac\x84\x65\xd4\xaf\xfc\x3a\xfb\x5b\x42\x5f\xd5\x34\xbe\x6c\x79\xd5\x96\xac\x16\xea\xfb\x5b\x6d\xcd\x9a\xc2\x66\x00\xa2\x7b\x4d\xa2\x0d\x36\xaa\x16\x03\x10\x9e\xf6\xa6\xf4\x76\xac\xd0\x04\xdb\x73\x15\x9f\x9a\x9c\xb4\x7e\xd5\x59\x6b\xb5\x1b\xcd\xba\xef\xa1\xfa\x15\x1e\x0e\xc2\xa0\xbf\xfe\xca\x92\x57\xb1\x7e\xf5\x6a\xbd\x1f\x84\x83\x21\xf6\x60\x60\x9a\x8d\x76\x6b\xad\xa3\x96\x3b\x73\xde\x88\x69\x69\xf4\xe4\x5e\xec\x8b\x4c\x7a\xb6\xbd\xeb\x32\x98\x22\x0c\xee\xd5\xf3\xf7\x10\xbf\xe3\xde\x31\xdc\xd7\xd7\x7c\x36\x28\x12\x9f\x09\x3c\x9e\x5e\x10\x45\x8e\x08\xbc\xfb\x9f\xa5\xd2\xfb\xa7\xfc\xe1\xcc\xe6\x12\x22\x7d\x26\x04\x67\x16\x20\x7f\xb5\x5a\x4d\x82\x49\x3d\xc5\xd1\x57\x24\xbf\x84\xbd\xae\xb5\xa4\xf9\x88\xa3\xaf\x15\x01\x36\x5a\x4b\x16\x80\x10\xca\x58\x71\x49\x37\xc1\xdd\xcd\x38\x64\x57\xb9\xa1\xb0\x5f\xf7\x2b\x43\x5a\x47\xd2\x98\xa2\x65\x54\xd7\xc5\x07\xa5\xb4\xaf\x95\xf6\x4b\x4b\x37\xb4\xd2\x8d\xd2\xd2\x4d\xad\x74\xb3\xb4\x74\x4b\x2b\xdd\x2a\x2d\xdd\xd6\x4a\xb7\x4b\x4b\x77\xb4\xd2\x9d\xd2\xd2\x6b\x5a\xe9\xb5\xd2\xd2\xeb\x5a\xe9\xf5\xd2\xd2\xaf\xb4\xd2\xaf\xca\x67\xa7\xae\xcd\xce\x9c\xc9\xf4\xb5\xe2\xe5\xb3\xe9\x37\xb4\xe2\xe5\xd3\xe9\x37\xb5\xe2\xe5\xf3\xe9\xb7\xb4\xe2\xe5\x13\xea\xb7\xb5\xe2\x6d\x83\x1b\xac\xae\x12\x86\xfc\x25\x8a\xcf\x49\xd5\x28\x18\xf7\x6d\x62\x73\x40\xb6\x81\x53\xeb\x40\xf5\xe1\x93\x75\x50\x06\xf0\xc9\x3a\x00\x21\x7c\x6a\xda\xd0\xe9\x15\x77\xd0\xea\x37\x82\xc4\xce\x4e\x2d\xf0\x50\xdf\x43\x03\x0f\x85\x9e\xb4\x40\x3d\x84\xd6\x3c\xb2\x85\xd6\xcf\x74\xde\x10\xd2\x7a\xa1\x87\x44\xd5\x62\x84\x3c\x84\xfc\x86\x87\x4e\x4e\x7d\xa3\xde\x80\xd6\xa3\x2d\xd1\xaa\xc5\xa2\x25\xf5\xd6\x48\xbd\x86\x51\xaf\x4f\xeb\x09\x24\x03\xa9\x5e\xd3\x43\xa8\x01\xed\x35\x8d\x7a\x65\xfd\x6b\x89\xfe\xb5\x16\xea\x5f\x5b\xf4\xaf\xbd\x50\xff\x3a\xa2\x7f\x9d\x85\xfa\xb7\x26\xfa\xb7\xb6\x50\xff\xd6\x45\xff\xd6\x17\xea\xdf\x2b\xd1\xbf\x57\x0b\xf5\xcf\xaf\x7b\xac\x7f\xbe\x49\x30\x65\x1d\xf4\x7d\x8f\x75\xd0\x37\x29\xa6\xac\x87\x04\x4b\xda\x43\xdf\x24\x99\x52\x12\x6d\x7a\x9c\x44\x4d\x9a\x29\xed\x63\x4b\xf4\xd1\x24\x9a\xd2\x3e\xb6\x45\x1f\x81\x6a\xcc\x4e\xbe\x7b\xe7\xe8\xa4\x87\x50\x9b\x76\xd2\xa4\x9b\x90\x56\xb4\x76\x92\xd0\xdb\x2b\x5a\xd1\x24\x9c\x01\xad\x68\xef\xa4\xef\x21\xd2\xd1\x93\x53\xdf\xa4\x9c\x3e\xad\x68\xed\x24\xe1\x18\x8d\x3a\x54\x34\x49\xa7\xac\x8f\x6d\xd1\xc7\x86\x9d\xd7\xb8\xfa\x48\x68\x8e\xf6\xb1\x61\x67\x36\xce\x3e\xb6\x79\x1f\x1b\x76\x6e\xe3\xea\x63\x4b\xf4\xb1\x61\x67\x37\xae\x3e\xbe\x2a\xfa\x68\xe7\x37\xce\x3e\xb6\x44\x1f\xed\x0c\xc7\xd5\x47\xc2\x18\x59\x1f\xed\x1c\xc7\xd5\xc7\xf5\xa2\x8f\x76\x96\xe3\xa4\xd5\xa6\xc7\xfb\x68\xe7\x39\xae\x3e\x36\x04\xad\x36\xec\x4c\xc7\xd5\xc7\x35\xd1\xc7\xa6\x9d\xe9\xb8\xfa\x48\x96\x3f\xed\x63\xd3\xb7\x2f\xc8\xdd\x5d\x37\xb1\xb6\x00\xd7\xa6\x9d\xeb\xec\xee\xda\x3b\x49\x86\x95\xac\xad\x93\xd3\xa6\x9d\xeb\xec\xee\x96\x2c\xc8\x0e\x54\xb4\x73\x9d\xdd\x5d\x47\x27\x5b\x1e\x6a\x34\xa1\xa2\x49\x3a\x65\x7d\xf4\x8b\x3e\xda\x99\x8e\xab\x8f\xad\xa2\x8f\x76\xa6\xe3\xea\x23\x4c\x24\xed\xa3\x9d\xe9\x38\xfb\x58\x17\x7d\xb4\x33\x1d\x67\x1f\x9b\x1e\xeb\x63\xcb\xce\x74\x5c\x7d\xac\x8b\x3e\xb6\xec\x4c\xc7\xd5\xc7\xa6\xe8\x63\xcb\xce\x74\x5c\x7d\x24\xac\x9c\xf6\xb1\x65\x67\x3a\xae\x3e\xbe\x12\xf3\xd8\xb2\x33\x1d\x57\x1f\xc9\xf2\x60\x7d\xb4\x33\x1d\x27\xad\xb6\x39\xad\xb6\xec\x4c\xc7\xd5\xc7\x46\xd1\xc7\x35\xfb\x82\xdc\xdb\x73\x0b\xaa\x1d\xda\x49\x3b\xd7\xd9\xdb\xb3\x77\x12\x68\x0e\x78\x40\xcb\xce\x75\xf6\xf6\x4a\xc4\x80\x36\x88\x80\x76\xae\xb3\xb7\x67\xef\x24\xe1\x1d\x0d\x18\xd6\xb6\x5d\xd4\x71\xf5\x91\xcc\x07\xed\x63\xdb\xce\x74\x5c\x7d\x6c\x8a\x3e\xb6\xed\x4c\xc7\xd9\xc7\xba\xe8\xa3\x9d\xe9\xb8\xfa\xe8\x17\x7d\xb4\x33\x1d\x57\x1f\xd7\xc5\x3c\xb6\xed\x4c\xc7\xd5\x47\xa0\x39\xda\x47\x3b\xd3\x71\xf5\x11\x44\x72\xda\x47\x3b\xd3\x71\xf6\xb1\xe9\xf1\x3e\xda\x99\x8e\xab\x8f\x2d\xd1\xc7\x8e\x9d\xe9\x38\xfb\xe8\xf3\x3e\x76\xec\x4c\xc7\xd5\xc7\x86\xe8\x63\xc7\xce\x74\x5c\x7d\x7c\x25\xe6\xb1\xd3\x34\x17\x24\x5c\xa3\xe4\x38\x9d\xe0\x30\x0a\x72\xe6\x54\x06\xee\x0a\x6a\x39\x72\xc4\x45\x1b\xa8\x06\xff\x2e\xa3\x40\xd7\xb0\xd2\x32\x3e\x2b\xe3\x93\x32\x7d\x7b\x99\x06\x2b\xd3\x20\x65\x06\xf6\x32\x4d\x56\xa6\x49\xca\x84\x86\x36\x57\x53\x55\xee\x58\x2c\x75\x17\x0c\x68\x0b\x99\xd2\x45\x36\xdd\x20\x0f\x6c\x07\xf3\x20\x0f\x44\x28\x9f\x20\x0f\xdc\xca\xb1\xf8\x6d\x94\x67\x27\x49\x1e\x8c\x05\xcc\x78\x2b\xc8\x03\xea\x41\xf2\x02\xad\x5b\xa0\x43\x9d\x0f\x78\x98\x73\xe8\xc2\xe3\x04\xca\x1b\x9d\x71\xa6\xbc\x12\x68\x9e\x16\x20\x7f\xfa\xe9\x27\xd4\x86\x8b\xb7\xfa\xd5\x7a\xbd\xb8\x6f\x2b\x4a\xfc\x0b\x35\x1b\x06\x71\xa8\x7d\xd9\x45\x1b\x08\xd4\xee\xc3\x71\x92\xa4\x35\xa9\x93\xab\x8a\xee\xdd\xd5\x39\x28\xfb\x01\x6d\x48\x4f\xfa\xc2\x11\xa8\xd7\x6a\xb5\x02\xb7\x65\xd4\x69\xd1\x7c\x69\xaf\x20\x98\x68\x6b\x89\x2a\x6c\xec\xfa\x59\x5e\x95\xe1\x5c\x28\x67\xe5\xb7\xd5\xb5\xb3\x26\x38\xa6\x9a\xd5\xc1\xcd\xd3\xcd\x1a\x5c\x62\x91\xce\xb6\xaa\x74\xf6\x83\xb5\xb3\x1f\x6e\xdb\xd9\x0f\xd6\xce\x7e\xa8\xda\x59\xb3\xb7\xb2\x13\x55\x4d\x74\x9f\x07\x9b\x82\x9c\x7a\x76\xff\x41\x30\x78\xa7\x6e\x0c\xe0\xa3\x68\xf3\xa4\x2a\xcd\x2b\x3f\xc7\x1b\x52\xd1\x79\x5b\xc8\x77\x97\x19\xc6\x3b\xbd\xdf\x16\xba\xf7\x70\x5c\x71\xa1\xb2\xeb\x7f\x81\x09\x5c\x61\xec\x9e\xda\xef\x2e\x76\xd9\x2d\x59\xad\xb6\xab\x5c\x4b\xec\x2e\x7c\x1f\x41\x69\x61\x57\xb9\x8b\xd8\x75\x5e\x42\xcc\xbf\x71\x38\x62\xb9\x81\x61\x0e\x59\x04\x9e\x10\xc6\x54\x2d\x5a\x21\x59\x39\xb8\x21\x94\xb2\x7a\x50\xb0\x82\x53\xa6\xb8\xa1\x83\xc7\xe2\xfa\xdf\xd8\x78\xe1\xf3\x67\x83\x16\x5c\xde\x95\x3c\x82\x06\xf9\x6a\xf7\x70\xa0\xbf\x04\x92\x9a\xea\xeb\xca\x43\x99\x87\xd4\x2b\x34\xe0\x93\x68\x03\x05\x68\x19\xd5\x6a\x7d\xf4\x23\xdd\x1c\x6b\xff\x97\xfc\x0c\x97\x08\x1b\xb8\x42\xcb\x28\x97\xda\x13\x01\x8b\x63\x32\x4d\x19\x5d\xa9\x34\x4e\x79\xb3\x81\x5e\xa2\x6c\x09\xaa\xf5\x35\xa3\x37\x81\x95\x76\xfe\xaf\x86\x15\x6c\xc7\xb5\x01\xfa\x11\xfd\xdf\x87\xc1\x4a\x3b\x04\xcd\xc5\xaa\x8f\x7e\x43\x03\xf4\x1b\x41\xec\xfe\x91\xd1\x04\xc0\xb9\xc8\x10\x44\x6a\x7d\xf4\xf5\x9e\x07\x47\xbe\xad\x3e\x76\xa5\x49\x9f\x9b\x78\xbf\x4a\x90\x35\xee\x27\xa6\xb9\x28\xc2\x6a\x30\xc1\x38\x9c\xc5\x1c\xa5\x6f\x1b\xd6\x8c\xad\x4b\x61\xe4\xb2\xbf\xd5\xb6\xf8\x7e\x95\x97\x37\x1d\xbe\x8a\xf8\x62\xca\x65\xbe\x9a\x91\x7f\x7f\xab\x6d\x35\x19\x70\x4e\xc2\x9c\x5c\xf5\xf7\x35\x05\xb7\x0a\xed\x30\x7f\xe2\x64\x2f\xbf\xfb\x98\x38\xea\x54\x26\x26\x62\x77\x12\x0c\xc8\x64\x28\x99\xe1\xcd\xf9\x60\xc5\xcc\x39\x29\xb2\xd9\xd3\x79\x29\xcd\xc0\xce\x22\x5b\x3b\x2c\xa0\x1a\x7f\x6b\x17\xb3\x7f\x7e\x4c\x36\xba\xd8\x5e\xb0\x38\x43\x68\x07\xe3\xb0\x1f\x0c\xbe\xb0\xb8\x9a\x93\x24\x84\x25\x45\x68\x46\xcc\x37\xbc\xec\xed\xbc\x25\x22\x90\x45\x3c\x00\x33\x27\xf8\xaa\x58\xcb\x81\x85\x0b\x6d\x65\x9f\x00\x60\xc6\x3c\x62\xd5\xf7\x76\xde\xae\x6c\xc7\x34\x56\x39\x18\x50\xed\xbc\xb5\x18\xfc\x4c\x1d\xe6\x32\xcc\xcc\xb0\xc4\x64\xc6\x2d\x9a\xb2\x10\x54\x5c\x20\xa1\x8f\xb6\x7b\x66\x29\x94\x07\x2d\x24\x87\xf2\x50\xcb\xf3\x18\xe5\xef\xf1\x75\x96\xa7\x38\x98\x6c\xc6\x21\xeb\x9d\xc5\x3a\x32\x61\x66\xb1\x02\x9c\xc7\x1a\xb0\x09\xd9\x47\x78\x82\x21\xc8\x38\x18\x63\xd2\x79\x62\xb1\x32\xc1\x7f\x3e\xc6\x57\x39\x7d\x6d\x17\xdf\xf1\xc5\x5b\x16\x33\x15\x5a\x5f\xc9\xc6\xd1\x00\xd7\x38\x0a\xe2\xa6\x5e\xe0\x62\xb3\x9f\x54\x66\x6d\x0b\xff\x53\x66\xed\x0e\xa3\x0b\x86\xc3\xa3\x28\x5b\x78\x6c\xbf\x19\xdd\x9c\x14\x1d\xea\xe3\x41\x32\x61\x5e\xf7\x84\x20\xa2\x64\x96\x55\x23\x19\xd1\xc5\x4a\xe2\x78\x49\x6f\x6a\x73\xbb\xa0\xf9\x46\x98\x07\x36\x38\xef\x5d\x14\xc1\x5a\x2e\x5e\xab\x46\xe3\x72\x38\x66\xda\x7c\xf1\x19\x32\xbb\x5e\x58\x8f\x34\xa2\x34\xda\x40\xd1\x05\x9b\xc2\xba\x63\x25\x26\x17\x18\xed\xfd\x0c\xe7\xcf\x6c\xd6\xcf\xf0\x7f\xcf\x70\x9c\x97\x9c\x9e\x01\x5f\xe1\xc0\x30\xd7\x00\x5a\xc7\x47\x9b\x10\x73\x12\xc8\x1f\xa3\x72\x4c\x07\x1a\x0a\xd6\x04\x10\x0f\xa9\x5d\x59\x5d\x45\x6c\x46\x8a\x77\xd6\x6c\xb9\xe5\x51\x63\xa8\xe9\x79\x61\x21\x08\x91\x60\x44\xa3\x70\x8e\x36\xe8\x85\x61\xc1\xc5\x89\x9d\xb7\x65\x06\xd7\x7c\xd3\x59\x24\x4e\x5d\xa7\xf9\x28\x7c\x7c\xef\xc2\x07\xfa\xcf\x69\x8a\x33\x9c\x5e\x60\x2a\x86\x24\x33\x22\xca\x4b\xe2\x07\xa8\x31\x82\x3c\xea\x8f\x19\x07\x46\x5b\x29\x7a\x9b\x46\x41\x8c\xde\x51\xf7\x4c\x34\x8c\xc6\x18\xc7\x83\x95\x01\x80\xe0\x21\x9f\x21\x02\xb6\x46\x3f\x27\x47\x50\xe4\xbf\x82\x18\xed\xa6\xb3\xfe\x35\xfa\x7d\x44\xfe\x59\xb9\xc4\xfd\xff\x3c\x9f\x04\xd1\x78\x65\x90\x4c\xec\xf2\xce\xc9\x11\x6f\xae\x44\xec\x91\x0b\x55\x96\x7e\x9e\x14\xf9\x5e\xe2\x01\x39\x28\xd0\x94\x49\x4f\x9f\x3c\x21\x83\x0e\xa4\x27\xd2\x21\x81\x92\x88\x2a\x85\x96\x60\xd6\xe9\xaf\x3f\xd1\xea\x6a\x72\x81\xd3\xe1\x38\xb9\x24\x75\x60\xe3\xf3\x79\x3a\x50\x52\xcf\xef\x2c\xfd\x48\xca\xbe\x16\x9f\x1b\xf2\xe7\x75\xfd\x6b\x93\xed\x61\xac\x31\xc0\x13\x50\x21\x60\x45\xbb\xab\xab\x88\x37\x8b\xfa\x3e\x29\x02\x28\x43\xd3\xf5\xd7\xa2\x4a\xa3\xa8\x22\xca\x3c\x01\x04\x68\x21\x5a\xaa\xa9\x96\x62\xc5\x9e\x00\x2a\xac\xdc\x0d\xfc\x97\x10\xa4\x5c\x62\x79\xb9\xdf\x94\xbe\xc3\x7f\x78\x19\x5a\x64\x79\xb9\xdf\x78\xfd\xd4\x5d\x60\x79\xb9\xef\xb3\xef\xe4\xbf\xd0\x71\xde\x28\x3c\x2c\x6f\x40\xcf\xdf\xbc\x61\xf9\x20\xe5\xd7\x0d\xaa\x02\x54\xde\x32\x84\xcc\x96\x44\xb5\xfa\x55\xdd\x67\x5a\xbf\xa2\x28\xe3\x7a\xa4\x10\x79\x79\xa3\x53\x07\x5b\x1e\xb5\x01\xfd\x57\xa5\x11\xf6\x92\xde\x20\x71\x52\x2a\x5e\x2e\x31\x82\x91\xa6\x60\x75\x15\x91\x5d\x02\x6e\x62\x50\x24\x2d\x24\xba\x78\x8c\x95\xf6\x3c\x43\x00\x2f\x43\x49\x3c\xbe\xa6\xcb\x71\xeb\x97\x83\xa3\x2d\xf4\x3b\x7a\x83\xd6\x01\x26\x6f\xd0\xb7\x61\x41\xef\xe2\xd4\xce\xb2\x6f\xbc\xbf\x7c\x2d\x29\x67\x01\xb1\xae\x56\x1c\xaf\xff\x42\x99\x73\x51\x91\xd3\x28\xae\xc9\x30\x66\xab\x8c\x27\x8a\x66\xf9\x80\x19\xa8\x97\x49\x3c\xc8\x2d\xf5\x80\xd0\x60\x6f\xa4\x5c\x06\x42\xb7\x90\x83\xd0\x7c\x59\x88\x4b\x07\x84\xb0\x4d\x9a\xa7\xac\xe8\x89\x2e\x1a\xb1\xcf\x12\xae\xaa\xea\x79\x11\xa1\x08\x39\x04\x23\x74\x3b\xe1\x08\x2d\x28\x20\x21\x55\x9e\x33\x0f\x5d\x05\xdd\xcb\x67\x2f\xb1\x34\x5e\x6b\x92\x95\x28\x2e\x09\x58\x4e\x11\x4b\x2a\xbc\x80\xa4\xd5\x7a\x94\xb4\xbe\x77\x49\xcb\x21\x5f\x39\xd4\x3b\x27\x47\xe5\x72\xce\xa2\xea\x1d\x0b\x4b\xd7\x79\xf9\x23\x13\xff\xe7\x31\xf1\xd2\xd3\xec\x03\xb0\xec\xbd\x78\x90\x62\x88\xdc\xc0\x80\x6b\x20\x99\x1c\x52\x4c\xee\x4b\x44\x8d\x69\x1c\x5f\xe0\xb6\xfc\x2b\xaa\xff\xad\x36\x87\xaa\xbb\xc2\xfc\xf3\x36\x29\xb3\xc0\x2e\xd0\x7e\xdc\x05\xfe\x16\xbb\xc0\xf6\x18\x0f\xf2\x34\x89\xa3\x01\xea\x25\x21\xee\x27\xc9\x7c\x85\xff\x76\xaf\x4c\xe1\x4f\xbf\x2e\xb4\x23\x6c\xf7\x54\x85\x3f\x79\xbe\xaf\x1d\x40\x66\xed\x2a\x03\x51\xeb\x95\x69\x31\x09\x3e\xca\x42\x7a\x28\xfc\x42\x7c\x2b\xfc\x78\xea\xa5\xde\x7c\xbd\x19\x94\x59\x60\x1d\xff\xbd\x93\x23\xff\xcf\x59\xc7\x07\xb3\x7c\x3a\xcb\xab\x5f\xda\x1d\x94\x5e\xda\x1d\x2c\x7e\x69\xa7\x4b\x75\x07\xda\x25\xde\xc1\x5f\x7b\x1d\xf4\xe0\x52\x9d\xa9\x9b\x17\x6f\xee\x57\xb2\x2b\x69\xe8\x7b\x91\xee\xfe\x49\x27\xec\x03\xed\x5a\xd3\x25\x44\x1d\x54\xb8\xb4\x38\x58\xf0\xd2\xe2\x31\x8b\xdd\xdf\x83\xf9\x6e\x7e\x3c\xde\x43\xbf\xae\xbc\x6a\x34\xb9\x81\x38\xca\x72\xb2\xbc\xcf\xaf\x0d\xee\x3b\x0d\xc2\x95\xcd\x38\x8b\x7e\x25\xa5\x45\x2e\xb8\x69\x10\xca\xec\x2f\x0c\xf2\x40\xba\x08\x75\x5d\x80\x66\xea\x0d\x28\xa9\x75\x5c\x18\xfc\x2a\x06\xc0\xaf\xd5\xa2\x7d\x3d\xad\x48\xdf\x95\x50\x04\x88\x62\x16\xe7\xa2\x67\x5a\x30\x2b\xb0\xc5\x3b\xa4\xdf\x0c\x60\xf4\xc5\x4b\x15\xb3\x7f\x69\xdf\x8d\xd6\x68\x4c\x9b\x71\x90\xd1\xc8\x59\x68\x9a\x64\x91\xea\x81\x4f\x1a\x25\xdf\x49\xfd\xc3\x84\x77\x56\xb4\xb0\xac\x61\xf4\x12\xf9\x5a\x23\x87\x41\x58\x3c\xc3\x40\x89\x6c\x23\xea\x6b\xca\x4a\xe4\xb6\x8a\x90\x5a\x6a\x23\x45\x48\x2d\xb9\xb4\x2d\xb8\x96\x6a\x99\xbd\xac\x01\xe2\x76\x88\xdc\x02\x77\x16\x5b\x88\x43\xa7\x88\x77\x38\x97\x12\xce\x2b\x53\x45\x15\xf8\x62\x34\xcb\x67\x4e\xea\x73\x4d\x45\xf3\x25\x39\xfe\xb2\xbe\x17\x17\x41\x12\x0a\x6c\x5f\x31\x3c\x24\x34\x30\x8e\xde\x3e\x7d\x72\x63\xe5\x9b\x7c\xb9\x5c\xbd\x6a\x34\x17\xe2\x9d\x77\x4b\x4c\xf6\xc8\x3b\xbf\x15\xef\xdc\x3b\x3e\x40\x10\x12\xb7\x1a\xeb\xdc\x63\x01\x74\xef\xca\x3a\xff\x72\x76\x58\x2c\x89\x39\xfc\xd0\xc2\xaa\x68\x3a\x00\x7b\x04\xba\x95\x34\x88\xc3\x64\x52\x33\x38\xe0\xd2\xd2\x8a\x26\x29\x95\xc3\x61\xa9\xc3\x4e\x0d\x2e\xd7\x68\x9d\x79\x04\xdc\x23\xa3\xd2\x19\x15\x27\xce\x85\x18\xd5\xdf\x3b\xf3\xc2\xff\x28\x46\xb5\xba\xb7\xdd\x43\xaf\xd6\x5e\xad\xbd\xf4\x11\xa3\x0d\xb4\x8f\xf3\x51\x12\xa2\x86\x8b\x5b\x41\x68\xef\xdb\x72\xab\xcd\x30\xa4\xfe\x83\xea\x82\xa8\xc0\x05\xf8\xea\x25\xb5\xe9\x1f\x5f\xb4\x4a\x03\xff\x07\xa7\x09\xe4\x0e\xcb\x47\x18\xa5\x38\x93\xf8\xa2\xd2\x11\x52\x8e\xf5\x98\x3c\x1b\x78\xdf\x8a\x17\xb0\x85\xf8\x07\xc3\x41\x5d\x8d\xce\xe6\x01\x34\x85\x67\x5f\xd8\x49\x8c\xd1\x24\x49\x31\x15\x1e\x5f\xbe\x84\xbe\xb9\x46\x91\xaf\xf7\x97\x2f\x2b\x2e\x70\x98\xcf\x45\x16\xf8\xda\xdd\xa2\x9c\x3f\x2e\xf0\x6f\x76\x8a\x43\x71\x92\x4c\xab\x89\x21\x1f\x39\x39\x3a\x57\xb6\x20\x76\xf7\x9a\x28\x8a\x94\xd1\x9c\x68\x6a\x21\xa2\xbb\x5b\xb8\xd9\x47\xa2\xfb\x56\x44\xf7\x7f\x24\xe6\x57\x4e\x72\x12\x0f\xfc\x0b\x85\xdf\xca\x07\x67\xf9\x7c\x6b\x08\xc0\xb5\x5a\xb9\x08\xbc\x84\xbe\x7e\xd5\x5f\xdd\x6a\x8b\xb1\xf7\x78\x7e\x5c\x81\xd5\x55\xf4\x89\xc0\x57\xeb\x45\x46\xa4\x00\xd0\x2c\x88\x32\x97\xa3\x68\x8c\x51\xed\x87\x5a\xe1\x6b\x5d\xc4\xe0\x06\x8f\x43\x23\xe6\xb6\x30\xe1\x34\x14\x99\x91\xd8\x92\x90\xaa\xa2\xd4\x1d\xbb\x21\x1e\x6f\x95\xdd\x4b\xa2\xa0\x85\x78\xc9\xdf\xdb\x71\xcb\x92\xa3\x8b\x26\xc9\x7a\x58\xbe\x52\x64\x42\x82\xd6\xfe\xfa\x3c\x1f\x0f\x9b\x24\xbc\x5a\x4c\x6c\x23\xe6\xb5\xf8\x72\xbc\xbb\xe9\x17\xb1\x9e\xc9\x93\xf4\xd1\x4c\x04\x6e\x73\x10\x3d\x0c\xb2\x8c\x2c\xe4\x97\x04\xb5\x10\xbd\xc7\xd7\x68\x0b\xa7\xd1\x05\xcd\x09\xb9\xc3\x07\xa5\x51\x1e\x73\xfa\xf0\xed\xfb\xad\x9d\x46\xd1\x9a\x78\xae\x98\x78\xbc\x97\xc4\xc3\xe8\x7c\xc6\x32\x51\x26\x90\x15\x32\x2b\xcb\x2f\x99\x26\x53\x9c\xe6\xd7\xe8\x4f\x7a\x2c\x06\x6f\x52\x60\xbe\x27\x23\x9a\xe3\x38\x23\x0f\x51\xcc\xd2\x05\xe4\x89\xf0\xa5\x59\x41\x5b\x78\x18\xcc\xc6\x79\x17\xb5\x50\xcd\x6f\xac\x43\x22\xe5\x25\x17\x7c\x47\x42\x73\x9c\xf2\x44\xe6\x05\x38\x32\xfe\xf3\xd0\x8c\x72\x96\x3c\x33\x03\x50\xc5\xa1\x5e\xfa\x90\x27\x68\x8a\xd3\x61\x92\x4e\x24\xe0\x0a\x64\x29\xfd\xe3\x60\x78\xde\x75\x8d\x32\xa2\x17\x5f\xc7\x10\x73\xc6\x6f\xac\xaf\x36\x1b\x5a\x08\x6e\xda\x15\x8a\xba\xf6\xa9\x40\x48\x69\xfc\x66\xa9\x2c\x21\x69\x59\x02\x79\x32\x2b\x61\x41\x5a\x7c\xbd\xcd\xcf\x22\x7a\x00\x7c\xee\x86\x74\x55\xce\x18\x4a\xc6\x6f\x60\xa3\x1b\xee\x6f\x36\x4c\x52\x38\xc5\x14\x8d\xde\x43\x62\xd0\x2f\xe1\xd0\x48\x1a\x4f\xa9\x9d\x9f\x1e\x15\x33\xac\x45\x2a\xfe\x59\x4c\xd6\x3a\x4d\x3f\x79\x67\x30\x9e\x3a\x8d\xf5\x7a\x5d\x07\x5c\x92\xbd\x7e\x30\x3c\xb7\x1b\x5e\x90\x89\xd8\x10\x3f\x39\xe1\x91\xe2\xae\x60\x18\xe6\x7a\x87\xeb\x0a\xea\x41\x57\x95\x05\xdd\x26\xdf\xec\x94\xc1\x06\x6a\xe1\x0f\x2b\x15\x2b\x67\xc1\x38\x47\x9b\xf0\xcf\xe2\x89\x68\xb9\x1b\x8d\xe4\xd7\x7e\x17\xb2\xa3\x89\xd4\xc3\xe1\x0a\x8b\x4a\x52\xe3\x9d\xf1\x00\x3f\xe7\xa4\xb2\xe2\xf2\xbc\x6a\x35\x17\xca\xed\xa2\x4e\xbd\xd5\x80\x30\xca\x1d\x49\x61\x99\x97\x3d\xf8\xee\x33\x5a\x25\xe4\x43\x79\x90\x27\x66\xc7\x6e\x96\xe8\x4e\x50\x0e\xb2\x29\x1d\x6c\x9a\x6e\xde\xd0\xe7\xd8\x42\x3d\x81\x9c\xbc\x17\x87\xf8\xca\x56\xe3\xb4\x7e\xc5\x14\x40\x96\x68\x9d\x73\x42\x74\x09\x54\x84\xb0\x2c\xde\x38\xf3\xd7\x17\xd8\xf0\x4a\xc5\x1b\x67\x25\xbe\xe5\x6d\x90\x59\x59\x61\x4f\x36\x23\x8c\x62\x6b\xa1\x45\x8b\x17\x73\x8c\x2c\xd4\x8f\x4c\x50\xd7\x3a\xc8\xe3\x22\xbd\xe1\xf8\x58\x8d\x0b\x44\x27\x59\x9e\x63\x9e\x2c\x1b\x28\xb0\x48\xe3\x5b\xf4\x5a\x9f\x33\xc4\x32\x7a\x17\xa9\x81\xcd\xef\xf3\xb3\x31\x00\x7c\x65\x88\xad\xa3\x6b\x16\x17\x59\x8c\x8a\x57\xac\xe3\x0e\x44\xf6\xc4\x18\xdb\x41\x47\x72\x34\x3b\x06\xd6\x82\x85\x62\xcb\xe1\x53\x5b\x0e\x69\xfa\x9c\xc6\x1c\x08\xf8\xb9\xd2\x04\x8c\x9e\x18\x69\xf9\xa3\x6d\xac\xab\x8c\x37\x9a\x17\x0a\xca\xd6\x59\x3e\xfa\xf2\x3b\x7b\xc0\x2a\xa9\x89\x5f\x0f\x8e\xd4\xee\x80\xeb\x94\xc5\xe3\xda\x18\xb7\xdf\xa9\x0d\xcc\xef\xdc\x06\x46\x9a\xcd\xd7\xe8\xf7\x92\xd1\x23\x7f\x45\x8d\xd3\xdf\xc1\x1c\xc6\xe8\xc8\xe9\xef\xba\x59\x0c\xff\xbb\x31\x5f\xeb\x01\xa7\xc8\x9f\xc4\x1c\x98\x6e\x1a\x1a\xb5\x4d\x89\xc6\x24\x4e\xeb\x67\xcb\xcb\xe5\x26\x45\x12\x70\xe9\xe8\xcb\xf9\x86\x25\x88\x19\xdb\xcb\x8a\x7a\x65\x06\x94\xf2\x31\xe2\x4e\x1b\x7a\x95\x60\x33\xa5\x1b\xf9\x82\x9b\xf8\x7d\x89\x96\x51\x66\x4b\xb7\x3f\x3f\x7a\x8d\x45\x34\xb8\x87\x20\x36\x54\x44\x10\x92\x21\x15\x0a\x5d\x62\xc2\x62\xd5\x3c\xe4\x90\x4d\xef\x02\xa6\x54\x36\x2d\x82\xec\x88\xa3\xa4\x4b\x80\xf1\x90\x2e\xa8\xb2\x61\x57\xc5\x62\x52\x68\x8e\xf0\x74\x53\x66\x8b\x46\xa1\xd9\x03\xf5\xe8\x29\x74\x79\x4e\xd8\x9b\x33\x6f\xed\xef\xed\x43\xbf\x40\x5a\xf7\xf9\xc9\xd1\x1f\x56\x77\xe4\x4c\xaf\xed\xca\x7a\xfd\x4f\xd0\x2e\x1d\x83\x71\x66\x8f\x1b\xef\x52\x25\x92\xfc\xb2\x4c\x8f\x24\xf0\x38\xc2\xb3\x2c\xe8\x8f\x31\x0b\x07\x26\xa1\x73\x8c\xe4\x54\x8b\x14\x8a\xfe\xe6\x1d\x52\x33\xac\x49\xdb\xc2\x11\x64\x53\x46\xcc\xd0\x96\xd9\x18\x9b\x9a\x24\x51\x1e\x62\xac\x44\x19\x0a\x10\x4d\xc0\x8c\x2e\x70\x9a\x41\xd4\xb2\x51\x90\xa3\x18\x9f\x8f\xf1\x20\xc7\x21\x61\xc3\x03\x96\x52\x35\x67\x0a\x9f\x3c\x41\xe3\x28\xcf\xc7\xf8\x25\x0d\x70\xb9\xa2\x02\xc5\x69\x9a\xa4\x28\x4c\x70\x16\x3f\xcf\x51\x30\x1c\xe2\x01\xad\x4b\x91\x7a\x9e\xa1\x0c\x0f\x66\x69\x94\x5f\x7b\xa2\x62\x7f\x96\xa3\x28\x87\x4a\xbc\x46\x94\x67\x22\xa0\x42\x34\x8e\x72\xe6\xc4\x4d\xf3\xba\x46\x84\x3f\x4f\x70\x4c\xf7\x83\xcc\xa6\x28\xa3\x03\xf2\x81\x76\x4e\xa8\xcb\xb4\xb7\xf2\xfc\xdd\x36\x69\x5b\xf9\x21\xe5\xbd\x6c\x06\xed\x3c\x60\x14\xd6\xdb\x70\x6a\xb8\x28\x3b\x2d\x44\xec\x84\x46\x76\x2f\xec\x3c\xa7\xfd\x2a\xda\x25\xbf\x2c\x89\xe3\xde\x9f\xd6\xcf\x3c\x54\x7b\x7f\xda\x3c\x63\xc1\x02\xd0\x57\xf2\xc8\xae\x02\xfc\xce\x92\x25\x89\xdc\xfb\x53\x9f\x56\xaa\xab\x95\x9a\xe5\x95\x1a\xb4\x92\xaf\x56\xaa\x97\x57\x6a\xd2\x4a\x0d\xb5\x92\x2f\x2a\xa9\x75\x6c\xd9\x91\x8c\x21\xe3\x5e\x86\xae\x41\xeb\x89\x41\xeb\xd9\x07\xcd\xc4\x47\x1a\x2e\xd6\x27\x7a\x61\x32\x1c\xf2\xb4\x83\x14\x69\x1a\x64\xb5\x5e\x27\x5f\x6c\xfd\x35\x27\xa2\xa9\x42\xf6\xad\x90\x1b\x95\x20\xd7\x9d\x03\x2f\xc1\xd0\x20\x37\x2b\x41\xf6\x5d\xb3\xe3\x49\x30\x34\xc8\x75\x0d\xf2\xfc\x89\xec\x05\x69\x7a\x8d\xfa\x7a\x3a\x55\x3a\x55\x7d\x1a\xff\xc2\xd4\x64\xe4\x74\xf2\x09\xeb\xc9\xae\xb3\x1c\x4f\xd0\x30\x99\xa5\x28\x8f\x26\xfa\xdc\x2f\x18\x94\x37\xc6\x57\xf9\x31\x59\x7d\xee\xf8\xb1\x96\x88\xb7\xfb\x49\x18\x0d\xaf\x29\x27\xa4\x74\x58\x01\x8b\x75\x37\x16\xbd\x53\xea\x38\xf0\xeb\x29\xa4\xbc\x84\x68\x2b\x46\xa6\x38\x5b\x92\xdc\x9f\x51\x86\xf3\xd9\x54\xfd\x50\xe2\xd1\x31\xff\xb0\xbf\xf7\x33\x75\xed\x28\x3b\xe1\xef\xfd\xfc\xb9\x8e\x36\xd0\xde\xcf\x66\x6a\x34\xa9\x88\x4f\x8b\xf8\xd6\x68\xc6\xf2\x92\x86\xa9\xcc\x66\xfd\x0b\x4c\x44\x05\xd7\xd1\xbf\x4e\x83\x1f\x43\xdb\x34\xfa\xf1\x57\x44\x9f\x5c\xd1\x8f\xe5\xe2\x2c\xcc\xb1\x28\x5f\x5c\x87\xda\xc3\x1c\x8b\x66\x1b\xa2\x59\x5f\x69\xd6\x9f\xd7\xac\xaf\x36\xeb\x2f\xd6\x2c\x84\xd1\x89\xea\x7c\x09\x12\x20\x51\x43\x5d\x81\xae\xaa\x4d\xa8\xda\xe0\x8b\x19\xaa\xd6\xd5\x65\xea\x98\x11\x46\xd6\x65\xac\x15\x01\xb5\xd6\xe9\xb9\x5e\x8f\xed\x4f\x3f\xfa\xf4\xa3\x6f\xfd\xd8\xa0\x1f\x1b\xd6\x8f\x4d\xfa\xb1\x69\xfd\xd8\x2a\x6b\xb3\x5d\xd6\x66\xa7\xac\xcd\x35\xd1\x66\x89\x46\xaa\x12\xe7\x41\x8b\x73\x1f\x54\x8d\x03\x21\x53\x49\x21\xfb\x11\xdd\x4b\x72\x57\xa7\xf2\x5a\x92\x3e\x2a\x71\x66\xb5\x88\xbd\x77\xee\xed\x1d\x06\xb7\xf0\x32\x03\x2e\xa4\x96\x3e\xa6\xa1\x86\x7e\x05\x22\x44\xb5\x5f\xc9\xdc\xf3\x55\x02\xcf\x62\xef\x7d\xad\x57\xf4\x69\xc5\x06\xab\xb8\xa6\x55\x6c\x3b\x2b\x36\x68\xc5\x16\xab\xe8\x6b\x15\xd7\x9c\x15\x9b\xb4\x62\xe7\x4c\xa0\xa6\x54\xf4\x8b\x8a\x77\xda\xc5\xca\xa2\xd4\x53\x44\x78\xec\xf8\x63\x96\x92\x9d\x05\x8f\x87\xc7\xdb\x44\x8f\xe7\x70\x18\x83\x13\x70\x6c\xf1\xe3\xad\xf8\x5a\x9d\xf0\x90\x94\xa3\x57\x78\xd3\x1d\x97\x7b\xd1\xc9\xd4\x2f\xec\x78\x8a\x9b\xdb\xe2\x63\x74\x41\xbf\x74\x5a\xab\xcd\x86\xae\x96\x13\xcb\x44\x10\x6c\xad\xa2\x2b\x94\xb2\x3e\x94\x2f\x92\x08\xaa\x19\xfc\x1c\x07\x17\x18\x25\xe3\xd0\xc9\x6a\x17\x90\x1f\x7a\x9f\xe9\xe4\xf6\xf4\x78\x87\x4a\x8b\xbd\x60\x3c\x98\x8d\xc9\x0a\x8b\xf1\xa5\xb3\xd9\x1e\x4b\x04\xd3\xa3\x89\x60\xea\x57\xad\xb0\x09\xff\x87\x96\xb9\x84\xa6\xe7\x6b\xe9\xb1\xbc\x30\x3d\x9a\x17\xa6\x7e\xc5\x6a\x34\x21\xa6\x7c\x8f\x0b\xa8\xf5\x25\xf4\x06\xd5\x7a\x9f\xa5\xe7\xff\x40\x3e\xea\xa2\xfa\x92\x09\xb1\xc1\x20\x36\x28\x44\x06\xb0\xc5\x20\xfa\x1a\x44\xbf\x02\xc4\x26\x83\xd8\x34\xba\x55\xa3\xed\x28\x10\x1b\x15\x20\xb6\x18\xc4\x96\xb5\xd7\x4d\x0d\x62\xb3\x02\xc4\x36\x83\xd8\xb6\xf6\xba\xa5\x41\x6c\x55\x80\xd8\x61\x10\x3b\xd6\x5e\xb7\x35\x88\xed\x0a\x10\xd7\x18\xc4\x35\x6b\xaf\x3b\x1a\xc4\xce\x5c\x88\x85\xd8\x4f\x81\x2a\xd5\xd7\xf4\xea\xba\x77\x8c\xa0\x69\xb2\xfb\x9c\xbf\xbc\xc3\x22\x22\xa5\xce\xaf\x80\x57\x47\xa4\x6b\x3d\x4b\x12\x0e\x9e\x2e\x3f\x9d\x0d\x72\x34\x8a\xce\x47\x28\x88\x43\x34\x4e\x2e\x51\x90\x9e\xcf\x20\xfc\x0b\xb8\x39\xff\xf7\x2c\x48\x8d\xc4\x3d\xd0\x40\x80\x36\x48\x2b\x5c\x8a\xb3\x28\x0f\xce\xfb\xb4\x08\xdd\x25\xac\xc7\x27\xde\x67\x05\x83\x14\x67\xb3\x71\x8e\x92\x61\x59\xf3\x23\xba\x05\xd4\xce\x03\xf4\x02\x9d\x07\xd4\x75\xc5\x5f\x5b\x42\xcb\x88\xbe\xea\xb3\x57\x6d\x78\xd5\x87\x57\x36\x24\xc7\x14\x90\xd4\x15\x7a\x24\x7c\x81\xce\xaf\x60\x86\x97\x80\x20\x78\x01\x21\x76\x4a\x05\x6c\x89\x60\x48\x87\x7e\x3d\x38\x42\x10\x4e\x52\xfe\xf8\x8e\x72\xb8\xf3\x11\xfa\x0d\x9d\x8f\xab\x32\x39\xbb\x52\xe5\x57\xc6\xe2\xde\x51\x16\x57\xab\xbd\x2b\xb6\x6f\xb2\x93\xbd\x93\xc4\x82\x25\x56\xa0\xa3\x16\xe8\x14\x05\x74\x7a\xfe\x95\x71\xc3\x77\x94\x1b\xd6\x68\x33\xc5\x7e\xfb\x8e\xf3\x3f\xd8\x6f\x97\x11\x69\xcd\x84\xd1\x60\x30\x1a\x1c\x86\xaf\x22\xe0\x1b\x18\xd6\xd5\x02\xf5\x32\x0c\x9b\x0c\x7a\x93\x43\x6f\xa8\x18\x36\x34\x0c\x7d\x0b\x86\x2d\x06\xa3\xc5\x61\x34\x55\x04\x9a\x06\x86\x0d\xb5\x40\xa3\x0c\xc3\x36\x83\xde\xe6\xd0\x5b\x2a\x86\x2d\x0d\xc3\xa6\x05\xc3\x0e\x83\xd1\xe1\x30\xda\x2a\x02\x6d\x03\xc3\x96\x5a\xa0\x55\x86\xe1\x1a\x83\xbe\x76\xa6\x90\x88\xc0\xb0\xa3\x61\xd8\x56\x30\xac\x94\xf8\x23\xe3\x49\x27\x84\xae\xb5\x42\xda\x89\x79\xd7\x5d\x14\x56\x8e\xaf\x72\xf9\xde\x49\xd6\xa4\xf2\x50\x0a\x4a\x1a\x07\x7a\x5b\x64\xde\x5f\x4d\xc7\x01\xc1\xe6\x2a\x47\x4e\x70\x2c\xce\x4c\xad\x68\xd9\x06\x51\x5c\x5c\x95\x29\x75\xd5\xe4\x1d\x72\xc9\xa5\xb2\x3b\x28\xb9\x60\x65\x63\x64\x4f\xbd\x1b\xe9\xb6\x5b\x5e\x71\x29\xd2\x6d\x77\x3c\x76\x57\xd2\xed\xf8\x37\x67\xde\xda\xdf\x3b\x12\xe1\xe3\x7d\xd5\xe3\x7d\xd5\x83\xdd\x57\x69\x4b\xbc\xb8\xcf\xd1\x6f\x72\xfe\x5e\x77\x38\xf7\x95\x15\xee\xbd\x38\x9a\xbf\x57\x8f\xe6\xef\x6f\x7b\x34\x7f\xaf\x1e\xcd\xdf\x97\x1d\xcd\xe7\x29\x98\x1f\x6f\xaa\x1e\x6f\xaa\x1e\x6f\xaa\x94\x2f\x8f\x37\x55\x8f\x37\x55\x8f\x37\x55\x45\xb3\x8f\x37\x55\xfa\xc7\xc7\x9b\x2a\xc7\xe3\xe3\x4d\xd5\xe3\x4d\xd5\xe3\x4d\x15\xfc\x3d\xde\x54\x55\x53\xe2\x3e\xde\x54\x3d\xde\x54\x3d\xde\x54\x49\x7f\x8f\x37\x55\x8f\x37\x55\x8f\x37\x55\x8f\x37\x55\xff\x93\x6f\xaa\xee\xed\x8e\xea\x76\xb7\x53\x55\xee\xa5\x2a\xdc\x48\x3d\xd4\x5d\xd4\xdf\x3b\x1f\xca\xe3\x5d\xd4\x3f\xff\x2e\x4a\xbe\x3b\xea\xb5\xe6\x3a\x3a\xc9\x37\x47\xbd\x96\x74\x6d\x04\x0f\x0f\x7f\x67\x44\xbd\x34\xc5\xad\x91\x3d\xa8\x00\xf7\xd0\x2e\xbb\x56\x02\x37\x4e\xd9\xa3\x58\x8a\x99\x6e\xea\x2b\xe2\x28\x47\x59\x3f\xb9\x32\xe1\x1c\x0b\x74\x8e\xe5\x6b\x3a\xfe\x67\x93\x26\x1b\xed\x8e\xfb\x50\xce\x0e\xdd\xd1\x7c\x35\xee\x7b\x7c\x6d\xd3\xe3\xaa\x2d\x7a\xdc\x7f\x7c\x6e\xc3\x6c\x50\xc8\x10\xf0\xa8\x12\x11\xfa\x97\x3c\x4e\x0e\xd5\x21\xab\x44\xb6\x36\x3e\xf6\xa7\x0a\x20\x33\x12\x9a\xf2\xd9\x08\x8a\x66\x3b\xfb\x93\x5e\xd4\x7e\x47\xcb\x74\x7c\x96\x79\xa3\x4b\xe8\x5f\xd0\x2b\x47\x2c\x85\xcb\x60\x6a\xc7\x19\xf6\x0d\x53\x43\x20\x4d\xc0\xb1\xdd\x31\x9e\xbc\x26\x33\x3e\x7f\x7a\x7a\x56\x15\x3f\xcb\xaa\x21\x88\xe6\x77\x96\x65\x56\x00\xba\xb3\x5a\x8e\x6b\x42\x40\x0b\x62\xe4\x5f\x27\xd3\x63\x57\x19\x2a\x2d\x0b\x27\xe7\x46\xbb\xe3\x50\x88\xd4\x9d\xca\x10\x6b\xa3\x55\x15\x23\xd2\x7a\xd2\x14\x23\xc5\xa0\x45\xda\x97\xdf\x8b\xe1\x9c\x9b\x01\x1e\x94\x83\x6a\xf5\x2f\x32\x9e\xda\x7c\x88\xd5\x14\xd3\x65\x14\x53\x95\x5a\x6c\x59\x44\x11\x68\xd0\x69\xc2\x38\x46\x95\xca\x77\x85\x84\x1d\x84\x6b\x25\xda\x12\x82\x75\x13\x6b\x41\xa8\xea\x7b\xb5\xb3\x5f\x49\xdd\x1a\x5b\x53\xa4\x0a\xc3\xeb\xac\xc8\x6b\x10\xeb\x79\x0c\xb4\xe3\xd3\x27\x88\x83\x62\xb9\xd1\x2a\x48\x3d\x32\xce\xee\x64\x2c\x94\xb9\x62\x62\x99\x82\xdd\xf7\x2a\xf7\xf6\x5a\xf7\x21\xf4\xf6\x5a\x0b\x4b\xbc\xe6\x1e\xab\x89\xbb\xbd\x96\x35\xb6\x05\xdc\xd0\x44\x38\xbc\xc5\x0e\xbf\x95\x26\x53\x65\x97\x67\x2f\x60\x10\xbe\x41\x54\xbc\x90\x34\xa7\x06\x9a\xd3\xf4\xfc\x64\xe2\x49\x29\x11\x6a\x0e\xf9\xaf\x1a\x32\x58\x3d\xd6\x1c\x41\x5d\x8a\xfa\xa5\xad\x62\x02\xaa\xab\x82\x50\x23\xc6\x55\x12\x62\x48\x1b\xbc\x60\xf9\x1d\x06\x19\xcf\x92\x0d\x5c\x18\xbe\x10\xbc\xc8\x2e\xfe\x13\x6c\xe6\x2f\x5f\x5a\xf7\xf0\x05\xd8\x3d\x9a\x93\x00\xe9\x3b\x5a\x6d\x64\x88\xee\x67\xc5\x01\xa4\xc5\x57\x1d\xa3\xf9\xf2\x95\x47\x0a\x95\x9f\x34\x7b\xad\x87\x3a\x66\xde\x2d\x5d\xdf\xb7\x3c\x5f\x3e\xd8\x29\xf0\xdb\x06\x71\x26\xac\x0a\x67\x38\xbd\xc0\x4f\x9f\xd4\x06\x4b\xa8\x51\xf7\x1b\xa8\x7f\x8d\x7a\xff\xdf\xff\x1b\xa6\xd1\x00\xed\xe3\x2c\x8e\xc6\x2b\x68\x73\x3c\x46\x69\x74\x3e\xca\x33\xc4\xca\x87\x2b\x4f\x9f\x3e\x39\xc2\x61\x94\xe5\x69\xd4\x9f\x01\xfc\x20\x0e\x21\x28\x4f\x14\xa3\x2c\x99\xa5\x03\x0c\x6f\xfa\x51\x1c\xa4\xd7\x84\x1d\x4c\x32\x8f\x45\x69\x48\xe1\xdf\x64\x96\xa3\x09\xf0\xf4\x01\x70\x56\x0f\x05\x29\x46\x53\x9c\x4e\xa2\x3c\xc7\x21\x9a\xa6\xc9\x45\x14\xe2\x90\x06\x9d\x20\xeb\x74\x98\x8c\xc7\xc9\x65\x14\x9f\xa3\x41\x12\x87\x11\x5d\xc3\xa4\xd2\x04\xe7\x5d\xb6\xe2\x5f\x22\x15\xad\x0c\x14\xc3\x14\x9f\x41\x12\x62\x34\x99\x65\x39\xd9\xa8\x83\x28\x06\xa0\x41\x3f\xb9\x20\x9f\xa6\xd7\xd0\x45\x14\x27\x79\x34\xc0\x1e\x8d\x2b\x34\x8e\x32\xd0\x2c\xcb\xed\xc5\xa1\x86\x4c\x18\x65\x83\x71\x10\x4d\x70\xba\xe2\xc2\x21\x8a\xe5\x81\xe0\x38\x4c\xd3\x24\x9c\x0d\xf0\xbd\xa3\x81\x58\xd7\xc2\x64\x30\x13\x71\x30\x48\x8d\xd5\x24\x65\x31\x32\x26\x41\x8e\xd3\x28\x18\x67\xc5\x30\xc3\xdc\x40\x35\x09\x75\x32\xcf\x27\xbb\x7b\xc7\xe8\xf8\x60\xe7\xe4\x97\xcd\xa3\x6d\xb4\x77\x8c\x0e\x8f\x0e\x7e\xde\xdb\xda\xde\x42\x6f\xff\x8d\x4e\x76\xb7\x51\xef\xe0\xf0\xdf\x47\x7b\xef\x76\x4f\xd0\xee\xc1\x87\xad\xed\xa3\x63\xb4\xf9\x71\x0b\xf5\x0e\x3e\x9e\x1c\xed\xbd\xfd\x74\x72\x70\x74\x8c\x9e\x6d\x1e\xa3\xbd\xe3\x67\xf0\x61\xf3\xe3\xbf\xd1\xf6\xaf\x87\x47\xdb\xc7\xc7\xe8\xe0\x08\xed\xed\x1f\x7e\xd8\xdb\xde\x42\xbf\x6c\x1e\x1d\x6d\x7e\x3c\xd9\xdb\x3e\xf6\xd0\xde\xc7\xde\x87\x4f\x5b\x7b\x1f\xdf\x79\xe8\xed\xa7\x13\xf4\xf1\xe0\x04\x7d\xd8\xdb\xdf\x3b\xd9\xde\x42\x27\x07\x1e\x34\x6a\x56\x43\x07\x3b\x68\x7f\xfb\xa8\xb7\xbb\xf9\xf1\x64\xf3\xed\xde\x87\xbd\x93\x7f\x43\x7b\x3b\x7b\x27\x1f\x49\x5b\x3b\x07\x47\x68\x13\x1d\x6e\x1e\x9d\xec\xf5\x3e\x7d\xd8\x3c\x42\x87\x9f\x8e\x0e\x0f\x8e\xb7\x11\xe9\xd6\xd6\xde\x71\xef\xc3\xe6\xde\xfe\xf6\xd6\x0a\xda\xfb\x88\x3e\x1e\xa0\xed\x9f\xb7\x3f\x9e\xa0\xe3\xdd\xcd\x0f\x1f\xac\xbd\x24\xb8\x2b\x7d\x7c\xbb\x8d\x3e\xec\x6d\xbe\xfd\xb0\x4d\x5b\xfa\xf8\x6f\xb4\xb5\x77\xb4\xdd\x3b\x21\xdd\x29\x7e\xf5\xf6\xb6\xb6\x3f\x9e\x6c\x7e\xf0\xd0\xf1\xe1\x76\x6f\x8f\xfc\xd8\xfe\x75\x7b\xff\xf0\xc3\xe6\xd1\xbf\x3d\x06\xf3\x78\xfb\x7f\x7f\xda\xfe\x78\xb2\xb7\xf9\x01\x6d\x6d\xee\x6f\xbe\xdb\x3e\x46\xb5\x39\x43\x72\x78\x74\xd0\xfb\x74\xb4\xbd\x4f\x70\x3e\xd8\x41\xc7\x9f\xde\x1e\x9f\xec\x9d\x7c\x3a\xd9\x46\xef\x0e\x0e\xb6\x60\xa0\x8f\xb7\x8f\x7e\xde\xeb\x6d\x1f\xbf\x46\x1f\x0e\x8e\x61\xb4\x3e\x1d\x6f\x7b\x68\x6b\xf3\x64\x13\x1a\x3e\x3c\x3a\xd8\xd9\x3b\x39\x7e\x4d\x7e\xbf\xfd\x74\xbc\x07\x83\xb6\xf7\xf1\x64\xfb\xe8\xe8\xd3\xe1\xc9\xde\xc1\xc7\x25\xb4\x7b\xf0\xcb\xf6\xcf\xdb\x47\xa8\xb7\xf9\xe9\x78\x7b\x0b\x46\xf7\xe0\x23\x74\xf5\x64\x77\xfb\xe0\xe8\xdf\x04\x28\x19\x03\x18\x7c\x0f\xfd\xb2\xbb\x7d\xb2\xbb\x7d\x44\x06\x14\x46\x6a\x93\x0c\xc1\xf1\xc9\xd1\x5e\xef\x44\x2e\x76\x70\x84\x4e\x0e\x8e\x4e\xa4\x3e\xa2\x8f\xdb\xef\x3e\xec\xbd\xdb\xfe\xd8\xdb\x26\x5f\x0f\x08\x94\x5f\xf6\x8e\xb7\x97\xd0\xe6\xd1\xde\x31\x29\xb0\x47\x9b\xfd\x65\xf3\xdf\xe8\xe0\x13\x74\x99\xcc\xd1\xa7\xe3\x6d\xfa\x53\xa2\x58\x0f\x66\x12\xed\xed\xa0\xcd\xad\x9f\xf7\x08\xda\xac\xf0\xe1\xc1\xf1\xf1\x1e\xa3\x13\x18\xb2\xde\x2e\x1b\xee\x95\xa7\x4f\x5e\xac\xaa\x3a\xaf\xfd\x20\x1f\xdd\xaf\xde\xab\x5a\xd4\x69\x1a\xf8\x58\x14\xa1\x8f\x95\xac\xb3\xe1\xc2\x2e\x88\xf3\x0c\xe5\x41\x9f\x4b\x2c\xa4\xca\xe7\x3f\xc6\xd6\x60\x9b\x85\x1c\x55\xf7\x10\xf2\x3d\x84\x1a\x1e\x42\x4d\x0f\xa1\x96\x87\x50\xdb\x43\xa8\xe3\x21\xb4\xe6\x21\xb4\xee\x21\xf4\xca\x43\x7e\xdd\x43\xbe\xef\x21\xbf\xe1\x21\xbf\xe9\x21\xbf\xe5\x21\xbf\x2d\x59\x58\xae\xd1\xba\xe4\x1b\x81\x47\xca\x13\x18\x7e\x9b\xc2\x25\xf5\xa0\xad\x57\x0c\x7e\x83\xc1\xf0\xa1\x8d\x02\x4e\x93\xb5\xd5\x62\xb8\xbc\x62\x30\xd6\x25\x3c\xd7\x18\xac\x0e\xc3\xc5\xa7\x30\x7d\x39\xd6\xb2\xcf\xea\x72\x5c\xea\x14\x06\xe0\xc1\xf1\x6c\x52\x58\x04\xbe\x2f\xf7\x5b\x86\xd3\x62\x75\xdb\x0c\xf7\x35\x06\xa3\x21\xe1\xe9\x33\x58\xeb\x0c\x17\xd6\x6f\xbf\x79\xb6\xf4\x5a\x9e\x8b\x74\xce\x5c\x70\x3c\xd6\xa4\xb1\x6a\x30\x98\x1c\xe7\x8e\x3a\x1e\xd0\xb7\xa6\xd6\xf7\x0e\xab\xd3\x2c\x60\x41\xdd\x76\x81\x33\x87\xc1\xc7\x03\xda\xf2\xb5\xbe\x43\xa1\xb6\xd4\xc1\x35\x86\x60\xa7\x18\x5c\x01\xa4\x21\x0d\x34\x45\xb6\x00\xb4\xce\xea\x48\x83\x05\x13\xd3\x2e\x06\x57\xc0\x68\x4a\x03\x4d\x91\x95\x10\x6a\xb0\x91\xad\x4b\xc0\xf8\x68\xac\x89\xd9\x13\x14\x8a\xd8\xe8\x50\x64\xd5\xd9\xc8\xe6\xad\x0c\x8a\x22\x1b\x2b\x40\x4f\x6e\x89\xd3\x56\x53\x1a\xcf\x4e\xf1\x4d\xa1\xe9\x35\x0f\x3e\xc1\x50\x71\x7a\x7d\x55\xd0\x1e\xa7\x29\xbf\x2d\x0d\xeb\x1a\x2b\xab\xcc\x87\x5f\x10\x81\x98\x8b\x57\xac\x20\x27\x9e\x75\xa9\x0c\x47\x7c\x0d\x7e\xcb\x67\x29\xb1\x96\x5b\x45\x55\xde\xbe\x58\xf3\xf2\x9a\x58\x57\x40\x16\xa0\xf8\xfa\x6c\x17\xb4\x2f\xfa\xd9\x28\x50\x10\xe3\xc4\x48\x86\xc2\x45\xda\x94\xcc\x5b\x20\x0c\x31\x65\xf0\xdb\x05\x02\xd0\xcf\xb5\x62\x21\x42\x83\x2d\x86\x48\x47\x43\xba\xa9\x0e\xbe\xe8\xb4\x5f\xc0\x11\x63\x27\x16\x34\x7c\x57\xe0\x08\x06\xe2\x4b\x83\xd4\x29\xda\x15\x0b\x8f\x2d\x60\xbf\x69\x99\x0f\xd1\x01\x0d\x71\x0e\x48\x2c\xb8\x86\xf4\x6f\x5b\xac\x62\x75\x80\xda\x96\x72\x2d\x75\x66\xc4\x4c\x16\x9d\x42\xbe\x8f\xce\x94\x2c\xd9\x9f\x47\x64\x85\x58\xe6\x03\x89\x50\xcd\x75\x0f\xd5\xaf\xda\x9b\xeb\x8d\xb5\x57\xaf\x5e\x91\xdf\x9d\xed\xad\x57\xdb\x6f\x37\x7d\xf2\x7b\x7d\xc7\x7f\xfb\xb6\xb7\xd5\x23\xbf\x37\x5f\xb5\x9b\x3b\x5b\xad\x6d\x75\xbe\x47\xa9\xb3\x81\x76\x7d\xb3\xb1\xfe\x76\xbb\x03\x0d\xf4\x5a\x5b\x5b\x7e\xa3\x05\x0d\x6c\xad\xd5\x9b\xdb\x3b\x4d\xf2\x7b\x6d\xb3\xb3\xb5\xd6\xd9\x86\x86\x39\x42\x67\x56\x7d\xc0\xd1\xde\xe1\xf6\xfe\x96\xdf\xa9\x43\xf8\xfd\x39\x3a\x24\x51\xb6\xd0\x22\x49\xaf\xe8\xae\x7c\xdb\xbb\x22\xaa\x4c\x04\x24\x1c\x41\xb0\x3b\x6b\xad\x76\xa3\x59\x87\x11\xdc\xde\xe9\x6d\x6d\xbe\x5d\x87\x0e\xbe\x5a\x7f\xbb\xb9\xd5\xdb\xd9\x26\xbf\xfd\x7a\xb3\xd1\x6e\xad\xc1\xe0\xf4\x9a\x5b\x8d\x6d\x7f\xa7\x7e\xe6\x54\x8d\x57\x55\xca\x5b\x15\xbb\x95\xbd\x94\xfc\x92\x9b\x9a\xf9\xe6\xf8\x14\x0b\xd0\xbd\x16\x66\x91\x8e\xeb\x9b\xfd\xcf\x52\x69\x7e\x79\xf0\xd9\x34\x64\x42\x65\x77\x2a\x52\x3d\xb4\x81\x6a\x66\x01\x44\x0d\x40\xa5\xc6\x0a\xc3\x07\xe9\xe5\x62\x46\xa5\x06\x40\x66\x57\xaa\x01\x34\xad\x4b\x4d\x70\x25\xaa\x31\x34\xcf\xd6\x79\x17\x89\xfb\x07\x42\x8a\xce\x2b\x47\x60\x00\x9f\x47\x63\x77\x81\x14\x0a\xa4\xce\x02\x20\x7e\x7e\xfe\xc3\x0d\x01\x64\xa2\xcf\x7f\xb8\x21\xc0\x36\xfd\x39\x73\x43\x80\x4d\xe3\x73\x96\xda\x23\x5a\xaf\xae\x92\x55\xf6\x85\x1c\x9a\x2f\x82\x34\x22\xd2\xb1\xe5\x92\x36\x18\x7b\xa8\x3f\xf6\xd0\x60\xec\xa1\x70\xec\x21\x3c\xb6\x34\x14\xa4\x1e\xea\xa7\x1e\x1a\xa4\x1e\x0a\x53\x0f\xe1\x54\x6f\x2c\x20\xa8\x04\x04\xe1\x5d\xd3\x65\xa4\x9f\x42\xd0\x71\xf8\xe8\xeb\x1f\x07\xe4\xe3\x80\x7e\x6c\xe8\x1f\x43\xf2\x31\xa4\x1f\x9b\xfa\x47\x38\x30\x60\xfa\xb1\xa5\x7f\x14\x69\xaa\x03\x35\x2f\x35\xef\x92\x7e\x2b\x68\x35\x25\x84\x7f\x97\x37\x90\x6f\x5d\xdb\x39\x59\x3e\xc1\x18\x2d\x17\x6b\x6a\xf9\x8f\xf1\x69\x74\x76\xb6\xf4\xd5\xe6\xc4\x00\x5e\x3b\x6f\xfc\xce\xd2\x9f\x4f\x9f\xa8\xac\x91\xb4\x81\x86\x7e\xad\x3f\xf6\x06\x63\x2f\x1c\x2f\xa1\x65\x34\x1a\xdb\x7d\x6f\x6e\x90\x50\xc8\x45\x6f\x9a\x0d\xaa\x6a\xb3\x40\x6b\xe8\xd0\x8c\x91\x37\xa0\xb5\xd6\x9d\xd0\x9a\x3a\x34\x63\xaa\x0c\x68\x9d\x96\x13\x5a\x4b\x87\x66\xcc\xad\x04\xed\xcf\xd5\x55\x06\x71\xbd\xee\x84\xd8\xd6\x21\x1a\x04\x81\xec\x61\xd2\xc9\x24\xe6\xd6\xe9\x22\x5f\x50\x9a\xe4\xe3\x5a\xee\x65\x64\x5a\x6d\x4e\x1b\x40\x03\xf9\x32\x1e\xdb\xa7\x1c\x56\x84\xb1\xa4\xc8\x1f\xd0\x6d\x68\xfb\x02\xe4\x0e\xed\x92\x35\xe9\x5b\xdd\x80\x60\xbd\xf4\x6d\xb5\x61\x99\x19\x37\x89\x02\xd5\x20\x45\xcb\x12\xb5\xa6\xb7\xa7\xd6\x76\xad\x9f\x7a\x83\xd4\x0b\x53\x18\xf1\xf4\x6e\xd4\xda\xd2\xa1\xdd\x95\x5a\x55\x68\x77\xa2\xd6\x86\x0e\xed\xce\xd4\xea\xeb\x10\xef\x99\x5a\x53\xb8\xb5\x2e\x21\xd7\xd4\x41\xae\xc0\x51\x53\x1b\xb9\x02\x23\xb6\x7d\x01\x16\x4d\xc9\x35\x75\x92\x2b\x6c\x00\xb6\xda\xb0\x35\x98\x16\x1a\x3a\x2b\xdf\x93\xd3\x31\x80\x0c\x09\x56\xbf\x9a\x84\x49\xfe\xb3\x81\x6a\xbb\xd4\x34\x77\x40\x38\x73\x68\xe9\xe9\x2e\x33\xe1\xdd\xa5\xe6\xb7\x21\x29\x67\x1b\x91\x5d\x66\xa6\xbb\x4b\x0d\x69\x31\x29\x17\x58\xcb\x35\x59\x39\x30\x96\x85\x1d\xa1\x6f\x2d\xd7\x62\xe5\xc0\x30\xb9\x4f\xca\x0d\xac\xe5\xc0\x80\x59\x19\x16\x5d\xac\xdd\x61\xa9\x35\xee\x60\x9e\x15\x06\x79\x20\x84\x21\xf2\x60\xd9\xf8\xe7\xa7\x61\xe4\x25\xe3\xb7\x51\x9e\x9d\x24\x39\x70\x3c\x0a\x33\xde\x0a\xf2\x80\x5a\x6d\xbd\x40\xeb\x16\xe8\x50\xe7\x03\x1e\xe6\x46\xd2\x46\x28\x6f\x74\x66\x33\x0c\xcd\x2c\xc4\x88\xe5\x5b\xa4\xc6\x4c\x05\x48\x22\x4d\xb6\xcf\xd0\xd7\x0d\x9a\x58\xb8\xb0\x91\x10\x25\xfe\x85\x9a\x0d\x9d\x5a\x0b\x48\xb5\x5a\xad\x28\xba\x8c\x08\x7f\x20\x20\x5f\x2d\x11\x50\x2d\xb2\x6e\xfd\x96\x43\x80\xe6\x55\xe9\x70\x14\xc2\xb3\xf4\xb2\xba\xf0\x6c\x00\x63\x82\xb3\x06\x6c\x9e\xe0\x6c\xeb\xa8\x9c\xa7\xa3\xc8\x87\xc9\x73\xec\x80\x71\x8c\x25\x6d\xc7\xea\x2a\x9c\x04\x11\x64\x77\xa1\x0e\x59\x56\xc3\xa9\x29\x3d\x79\x99\xd9\x5c\xca\xc9\x12\x56\xb7\x2c\xa3\x5b\x08\x67\x17\x6d\x20\x59\x7c\xbf\xdb\xf9\xad\x5d\xe9\xf8\x66\x3f\x91\xed\xc2\x51\x6c\xd7\xe2\x4c\x82\xca\xce\x60\xbb\xc2\x5d\x6f\x57\x39\x5e\xed\x2e\x7c\xae\xa2\x14\xb2\xab\x9c\xa9\x76\x9d\x87\xa9\xf9\xa6\x70\x47\xf4\x26\x9c\x4e\x2e\xcb\x60\x11\xc2\x60\xab\x45\xd9\x8d\xb9\x36\x41\x0a\x9b\x1a\x8c\x93\xb8\x9c\x41\xfd\xff\xec\x7d\xff\x72\xdc\x36\xd2\xe0\xdf\xf1\x53\x60\xb7\x6a\xed\x51\x34\x96\x08\xf0\x17\x68\x5b\xb9\x4b\x14\xfb\x73\x2e\x76\xec\xb2\xb5\x67\x7f\xe5\xb2\xb3\x20\x09\x6a\x18\x8f\x66\xf4\xcd\x50\xd6\x68\x37\x4e\xdd\x6b\xdc\xeb\xdd\x93\x5c\xa1\x01\x92\x20\x09\x80\x33\xb2\x9c\x6f\xb3\x6b\x6d\xad\x33\x33\x6c\x74\x37\xfa\x17\x9a\xf8\xd1\x80\xad\x04\x02\xaa\x9d\x5d\x80\xaf\xf6\x6d\x10\xf2\xf1\xcf\x03\x23\x91\xed\x86\xb6\xa6\xd8\x84\xa7\x9d\x7d\x51\xf0\xf1\xa3\x5c\xfd\x47\xfa\x8e\xb8\x02\x4f\x36\x53\x74\x35\x45\x7f\x37\x5d\xf3\x31\x99\x6c\xe0\x64\xe7\x15\xfc\xfb\xf7\xf6\xb6\xf6\x8f\x03\x3c\xc4\x8d\x67\xb2\xd9\xbb\x3d\xb9\xda\x93\xc7\xc9\x7f\x13\x5f\xfe\xbe\xb7\xb7\x77\xdf\x86\xcd\x1f\xc5\x26\x10\xfd\x26\x30\xb6\xac\x59\x70\x05\xe3\xb8\x6e\x03\x06\xe0\xed\x6a\xef\xf6\xe4\x37\x60\xce\x8e\x31\xdc\x46\x66\x42\x68\xbf\xb6\xa8\x2c\xb8\x20\x95\xd8\x4c\x17\x46\x4c\x9b\x07\x0f\x16\xc0\xd5\xe6\x9b\x6f\xbe\x99\xf8\xe4\xee\x42\x67\x4a\x7e\x70\xee\x86\xa9\x37\xc3\xc8\x7b\xe0\xb6\xdb\x0c\x63\xbd\xed\x47\xed\x6f\x81\x3d\x4f\xf5\xe7\x6a\x29\x23\xd3\x10\x8d\xe5\x7e\x1e\x0b\xf4\x75\x2f\xe6\x51\x9e\xd1\xee\x64\xa9\x27\xf0\x26\x77\x14\x8b\x77\x0c\xbb\x70\xec\xad\x2e\x6b\x6e\x4d\xdb\x6d\x86\x93\x83\xbd\xad\x36\x35\xc0\x76\x5b\x95\x6a\xe5\x3c\x7e\xfa\xed\xf1\xef\xa0\x1a\x47\xf3\xf7\xfc\x0a\x9a\xae\x79\xb6\xe2\x95\xe5\xee\x24\x8b\x42\xe1\xca\xc1\x1b\x54\xa8\xbc\xc8\xb0\x51\xcd\xe3\x33\x96\xb5\xea\xd1\xb7\x58\x19\x34\xd4\x01\x1e\x6a\xe9\x8c\x65\x06\x4d\x7d\xf5\x51\xae\x03\x5b\xb6\x46\xd5\x90\xe6\xdb\x89\x3e\xbe\x9d\xc6\xf1\x97\x2d\x4e\xff\x0a\x47\x56\x3e\xf7\xd2\x7d\xaf\xb0\x9a\x46\xd8\x5a\x32\xed\xe5\xe3\x6f\xef\xe2\x2d\x56\x32\x86\x77\x55\xdf\xe4\xfa\xc5\x11\x9c\x3e\x6d\x97\x30\xca\x45\x59\x4d\x0c\x05\xa8\xba\x4b\x1a\xbc\xc8\x72\x96\xd2\xc4\x50\x9b\xc9\xdb\x24\x34\x65\x79\x56\xf0\xce\x1a\x87\x09\x30\xf3\x73\xc2\x71\xe1\x75\x9f\x7d\xfa\x12\x88\x2d\x43\x37\x27\xdf\xc3\x19\xf4\x01\x82\x6d\xe6\x9e\xcd\xd3\xc5\xe2\x51\x6a\x9e\x2c\x86\x84\xd1\x3c\x55\x0c\xaf\xab\xe6\x89\x62\xf1\x88\x37\xd3\xc4\x03\x4e\xad\xf3\xc4\xd6\x39\x61\xcb\xdb\x02\xcc\xfb\x20\x79\xc2\xd4\x52\x0b\xe6\x95\x4c\xfc\xbb\x25\x30\xba\x67\x4f\xeb\xbf\x7a\x42\xc9\x8c\xa8\x3e\xe7\xf0\xea\x4d\x89\xee\x22\xff\x2d\x7a\xa7\x3e\xd2\xf6\x23\x0e\xb4\xcf\x91\xed\xee\x48\xc5\xd2\x64\x01\x87\x63\xe5\xbb\x25\xbc\x3e\xf8\xd8\x5c\xa6\xc6\xfc\x26\x04\x53\x4b\x13\x26\x90\x84\x80\x84\xc9\x37\x99\x18\x0e\xc8\x72\xb4\x0f\x84\x6c\x13\x8d\xe8\x01\x22\x9e\x55\x6a\x30\x6d\x36\x99\xa4\xe8\x36\xca\x64\x9e\x2b\x3e\xe6\x80\xd9\xdb\x84\x4c\xae\xc2\x8e\x4c\xf1\xa1\x07\x28\x18\x23\x91\xa2\x77\x28\x43\xef\x50\x2e\x31\x47\x3c\x4f\x78\xca\x4c\x45\x87\x7a\x98\xa3\x1d\x98\x97\xbc\x8b\x4f\x99\xea\xc5\x5d\xe4\x6d\x62\x8f\x07\x81\x4f\x02\x3b\xad\xc3\xaf\x1b\x72\xd4\xdb\x43\x5f\x1f\x6e\xdd\x17\x81\xdf\x0f\x93\xdc\xe7\xa4\x3f\xcb\x83\x2c\x2a\x15\xfe\x92\x9b\xa6\xfb\xd0\x11\xca\x4c\x53\x7c\x08\x48\x3e\x78\x80\x7c\x4f\xf5\x12\xd4\x6f\xbc\x5b\x14\x1d\x21\x13\x1f\x6c\xbb\xd3\x5a\x5b\x4d\x06\xaa\x49\xb4\x7a\xb2\x8d\xf5\x4f\x78\xa3\xce\x44\x20\x4c\x18\x0e\x2a\x9f\xa0\xce\x24\x20\x4c\x16\x66\x66\x18\x5f\x9f\x28\xcc\xcd\x30\x81\x3e\x49\xc8\xfb\x30\x5f\x26\xf8\xfe\x59\x27\xf8\x44\x2e\x7c\x50\xcc\x97\xcb\x95\x3e\xe7\x76\x08\x03\xb5\xfa\xfb\x24\x22\x50\x0b\xa1\xc5\x3c\x32\x4f\x37\x98\xa6\xfb\x4c\x33\x74\x3b\xce\x03\x19\xa7\xeb\xfe\x88\xb3\x41\x5f\xa6\x10\x06\x93\x01\x22\x7d\xde\x69\xf6\x00\x1a\xb8\x26\x0e\xba\x09\x79\x77\xce\x40\x3c\xfb\x32\x5d\x70\xa3\xd3\x05\xa0\x8f\x2d\x66\x0a\xcc\x6a\x69\x27\x09\x94\x6a\xec\xc7\xa6\x04\x80\x7d\x5a\x80\xfe\xa1\x0b\x6c\xac\x67\x8c\x84\xd1\xe7\xae\x8d\xa1\xa8\xfc\xfb\x4c\x1f\x0c\xa6\x07\xf4\x77\x78\x12\x46\x9d\xb7\x78\xed\x14\x76\x7f\x56\x80\x90\x60\xbb\x79\x01\x01\xd8\xc1\x09\xdf\x25\xf2\xdf\x75\x6e\x20\xc3\x5e\x98\xf0\x9c\x8a\x57\x7e\x3f\x8a\xb3\x3c\xf4\x62\xf8\xec\xc5\x5e\x9e\x63\xf8\x5c\xc4\x1e\x0f\x13\xdf\x3c\x67\x50\x14\x99\xe7\xa5\x3e\x4c\x2e\x44\x34\xa4\x38\xc4\xf2\x73\x50\x24\xb4\x60\x80\x20\xe5\x05\x0b\x0a\x16\xec\x30\x5d\xb0\x55\xe6\xa9\x85\x7d\x25\x3a\xad\xa5\xe3\x14\x2d\x44\xd4\x26\x9d\xb9\x7b\x34\x4c\x5e\x2c\x0b\x4b\x5f\x86\xe8\x91\x11\x97\x90\x60\xd7\x41\x5a\x34\x19\x19\xa6\x3b\xde\x31\x18\xa8\x09\x31\x1f\x62\xff\x32\x54\x7f\xc2\x50\x2d\xb4\xb2\xdd\x60\x6d\x54\x4e\x67\xb8\x96\x0a\x72\x0e\xd8\x84\xf4\x8f\x3a\x6b\xe7\x9a\xd5\x70\x74\x2f\x4e\xc4\x00\x9e\x7c\x99\xd7\xff\xef\x19\x98\xff\x78\xc7\xf2\x7e\x90\x97\x38\x94\x7f\x6f\x4e\xe5\xa2\xd5\xf2\x62\x91\xa3\xac\x7b\x5e\x4f\xeb\xc1\xe3\xfe\xd5\x29\x3f\x76\x97\x01\xea\x89\x5a\xde\xe2\x90\x4f\x4c\x19\x0c\xd2\x97\x94\xcb\xf5\xf3\x55\x79\xc6\x27\x0b\xe3\x30\xb6\xfe\xaf\x55\xf5\x53\xfd\x9e\x2f\xbe\x4c\x16\xfd\xf7\xcc\x66\x22\x58\xaa\x13\x1d\x21\x72\xbf\xfe\xfc\xe0\x48\x62\xa8\x7f\x70\xcc\x0d\xff\x69\xb2\x40\x7f\x51\x60\x7b\xd6\xf9\x42\xe5\xa3\x05\x9b\xaf\xf9\xf8\xae\xc0\xfe\xfc\x58\xfd\x3e\xbe\xba\xe8\xbe\xe1\x1a\xc4\x72\xca\xab\x47\x2b\x06\x9f\xd9\xfc\xbb\xb2\x5a\x1b\x04\xd4\x2c\xe1\x2f\xd0\x5d\x34\x59\x40\x65\xcf\x3d\xf4\x75\x67\xf2\xa3\x3f\x93\xa5\xd1\xaa\x67\xa9\xf5\xca\xec\xf0\x1b\x28\xa4\x57\xbf\xe7\x72\x56\xce\x39\x9a\xa8\x67\x0f\x90\xda\x92\xd9\x97\x62\xab\x4d\xab\xa0\x1b\x14\xd4\x2a\xe5\xc7\x6f\x24\x10\x94\x1d\x1d\x08\x02\x6c\xe1\x7c\x79\x39\x59\x4c\x11\x46\x87\x88\xec\x6d\x51\xb1\x1d\xc1\x4d\x28\xbb\xa0\xf5\xf7\x8c\xc5\xb3\x25\x8a\xfd\xfd\x91\xa9\xd0\x45\x07\xa2\xce\x90\x26\x2d\xce\xeb\xaf\xb1\x89\xc4\x7b\xbb\x6c\x7a\x98\xa1\x7f\xf6\x95\xb6\xc7\x07\xeb\x79\x99\xf1\x89\xb7\xf7\x65\xd5\x6b\xeb\x55\xaf\xc1\xa3\x02\x1e\x85\xa6\x47\xa7\xf0\x68\xb0\x60\x04\x39\x0b\x3c\x8a\x3f\x79\x19\x2d\x72\xd4\xba\xff\xbd\x97\xd1\x4e\xd9\xd9\x19\xf3\x36\xcd\x62\x1a\x1e\x08\x65\x08\x0d\x1b\x8d\x27\x75\xcb\x07\x0f\x10\x91\x8b\x5e\xf5\x2f\xdf\x7c\xf3\x0d\x8a\xf7\xf6\x10\x7a\x67\xc6\xd4\xfd\xeb\x60\xc2\xc1\x00\x13\xa6\x7b\x7b\xdb\x61\xea\xb6\xf3\x8d\xe1\xa5\xd3\x13\xdc\xf6\xdb\xb8\x49\xbe\x0b\xac\x75\x1b\x4b\x66\xb5\x6e\xe3\x4d\x5d\x6f\x7a\x4b\x66\xbb\x98\xfc\x21\xa6\x64\xc7\x6e\xd7\xed\xcc\x77\x12\xa0\xd6\x70\x94\x12\xf7\x55\xcf\xa1\xc8\xaf\xea\xe1\xbe\x73\xc1\xd4\xb6\xfa\x99\xc1\xa9\xc6\x09\x47\xb7\x51\x01\x9b\xdd\x7e\x13\x1f\x4f\x6d\x57\xb8\x9c\x31\xa8\x30\xc7\xd0\x6d\x94\x02\x38\x93\xab\x83\xef\x90\x5a\x27\x34\xf1\x0f\xc9\x4a\x79\x2a\x18\x6f\x96\x5a\xd5\x62\x9b\x5a\x6b\x95\x5b\xff\xe4\x13\x9c\x68\x4f\xb0\xdf\x79\xd4\x69\x64\x1e\xdb\x1a\x62\x70\x4f\xcd\x84\x83\x8d\xcb\xca\xc9\x1c\xda\x45\x0a\xa3\x7c\x82\xb5\x27\x18\xeb\x8f\x62\xb9\xb3\x55\x3e\x22\xa1\x79\xc4\x83\x05\x64\x41\x69\x86\xf6\x6b\xb2\xfb\x42\xa8\xfb\xf2\xa2\x37\xeb\xe2\x31\x34\x24\xe8\xa8\x16\xcc\xbe\x10\xad\x89\x82\x08\x5c\xa7\x06\x04\x22\xd6\xf5\xeb\xb4\x8b\x3f\x11\x1e\x4d\xe9\x17\xd4\xce\x84\xdb\x12\xb0\x69\x99\x0f\x8d\x2c\x91\xf6\xab\xad\xa3\x91\xe5\xd0\x49\x25\x04\x51\x11\x13\xad\x7f\x97\xa5\x51\x09\x13\x2a\x18\x28\x19\x5e\x98\x61\x22\x05\x03\x25\xc1\x4f\xcd\x30\xb1\x82\x01\x9f\x9f\x7d\x59\x86\xfd\xb2\x0c\xfb\x65\x19\x76\x98\x6d\x7e\x59\x86\xfd\xa7\x9c\xe3\x0d\xa3\x9d\xe7\x78\xc3\x68\x74\x8e\x57\x7f\x67\x1b\xce\xf1\x86\xd1\x97\x39\xde\x1b\x9f\xe3\x0d\xa3\x6d\xe7\x78\x4d\xca\xe9\xce\xf1\x82\x82\xdc\x9b\xb6\x9b\xb5\x33\xf3\xd2\x2c\xf5\xfe\xd0\x4b\xb3\x9b\x28\xf8\x5d\x2e\x2e\x68\xe8\x7c\x99\x05\xee\xce\x02\x6f\x22\x58\x53\x3d\xd8\x44\x81\xf6\xfb\xeb\x28\x50\x55\xba\x01\xe2\x40\xab\x13\xbd\x53\x4d\x37\xad\x7f\x2f\x1e\x3f\xfb\xf9\xd9\xa3\x47\x2f\x1f\x9e\xbc\xec\xcf\x16\x3f\xff\xe1\xe7\x1f\x7e\xfa\xfe\xe1\xeb\x87\xc3\x5b\xb9\x5f\x3c\xfb\xeb\x4f\xdf\xff\x7c\xfc\xec\xa7\x97\x27\xdf\xfe\xd4\xb4\xd4\xc8\xc9\x69\xe5\xe3\xed\xa6\x95\xb5\x16\xab\xd9\xb2\x2e\xda\xd2\x9b\x93\xae\x49\x8b\xb7\x6b\x3c\x45\x57\xb6\x52\xe5\x95\x9c\x12\xa9\xd0\x03\x44\x82\xfb\xa8\x32\x4c\x89\x68\x7d\x7e\xb3\x41\xfb\x28\x44\x5f\xa3\x2b\x79\x7a\xb0\xaa\x0f\x69\xc2\x27\xb2\x07\x33\x95\xe8\x2f\x28\x1a\xe4\x22\x90\x06\xf2\xcb\xd7\xe8\x08\x5d\xa1\xbf\xa0\xd0\x94\x25\xf2\xcb\xff\x14\x58\x09\xfa\x1a\x09\x3a\xbe\xa0\xb3\x67\x00\xde\xc8\x69\xb9\xd7\xbd\x9f\xaf\xe4\xcf\xff\x69\x99\x0a\xd6\xc4\x76\x5e\xa2\x12\xae\x13\x30\x08\xad\x91\xcc\x46\x4a\x66\x23\x0f\x68\x6e\x0c\x82\x69\x40\xa5\x74\xd1\x95\x04\xbd\xb2\x4c\x2b\xb5\x06\xd2\x15\xe3\x15\x5c\xf0\x33\xec\xb5\x90\x6b\xbf\xeb\x1f\x47\xfb\xd6\x5b\xe5\xe8\x5a\xc3\x93\x47\x2f\x5f\x08\x5e\x37\x1e\x36\x19\x83\x7e\xef\x84\x65\x7e\x4c\x80\x01\x89\xda\x58\x9f\xae\x2f\x7b\xb6\x65\x04\x7b\x52\x83\x59\x44\xa8\x6e\x9e\xf8\x05\x3d\x40\xf1\x7d\xf4\x8b\x63\x66\x0e\xfa\x00\x47\x53\xcd\x55\x51\x6a\xf2\x69\x59\x3d\x5f\xae\xa1\x8e\xab\xb0\x2a\xb8\x2c\xf7\x97\x3d\x74\x17\x99\x76\x53\xd7\xc8\xf5\x46\x0f\x90\xaa\x17\x61\x02\x16\x7f\x83\x0e\xbe\x3b\x42\x40\x46\xc3\x62\xa1\xd5\xdd\x51\xad\x53\xfd\xe6\x08\xc8\xda\x37\x57\x0f\x28\x3f\xd5\x28\x77\x50\xdd\x35\xbc\xf7\x34\x0c\x6c\x37\xb5\xa4\x19\xd6\x82\x6f\x2a\x30\xa0\x11\xb5\x50\xfb\x4e\xf4\xc3\x43\xf4\x7c\x55\x9e\x95\x55\xf9\x81\xa3\xf3\xe5\xfc\x6a\xb1\x3c\x2b\xd9\x1c\x2d\x3f\xf0\x15\xfa\x8f\x47\x13\xb2\x77\x0f\x6d\xde\x51\xb4\x8f\x36\xef\x22\xf8\x37\x84\x7f\x03\x11\x66\xcc\x28\x95\x45\x4b\xf2\xf2\xfc\xc0\x3b\xe4\x6d\x62\xc7\x96\x79\x0b\x73\x0a\xc3\x91\xd1\x3e\x46\x16\xbd\x7a\x01\x5e\xce\xf1\xa9\xe1\xa7\x2e\x30\xd6\xd7\xd9\x74\x60\x3f\x7b\xbb\xae\xa6\xac\xc1\x7f\x2a\x7e\x76\xbe\x5c\xb1\xd5\x55\xe7\x26\x3a\xe1\x02\x27\xfa\x40\x64\x5d\xa5\x34\xde\x3a\x63\xf6\xfe\x13\x63\xcf\xc6\xf8\xee\xad\xed\xf8\xdb\xad\xec\xf8\x9d\x75\x1d\xdf\xb5\xaa\x73\xf3\x57\x09\x2c\x2f\xaa\xf3\x8b\xea\x09\xbc\x5a\x77\x60\x11\x24\xe9\x39\x5f\x97\x2b\x9e\x6b\x17\x0d\xa4\x65\xb5\xae\x0b\x42\xcb\xc6\x9d\xb7\x85\xba\xf1\xb3\xc5\xbc\x56\x93\x56\x83\x9b\xad\xf8\x3d\x44\x48\x30\x45\x24\x8c\xa6\xc8\xa7\xc1\x14\x85\x98\xf4\x1b\xab\x3b\x0b\xee\x89\x67\xfa\xa3\xfe\xa5\x05\xf5\x4b\xb3\xf5\xde\x02\xbd\x77\x3d\x6c\xd7\xb8\xbf\x00\x66\x6a\xe1\x26\xc4\xfa\xdd\xbb\xfe\xf6\xe6\xad\x25\xda\x6f\x61\x6a\xe2\x0f\xf0\x48\x93\x5b\xf0\xcb\xc6\xec\x60\x11\x6e\xac\x94\x00\x70\xd2\xdc\xd6\x0b\x23\x40\xe4\x79\xe8\x2e\x12\x03\x6d\x73\x53\x82\x2e\x09\x91\xbd\xf8\xe4\x73\xad\xe8\x19\x26\xe6\x0c\x42\x33\x4e\x9e\xd5\x9d\x78\xc2\x16\x30\xf7\xd3\xeb\xda\x21\x22\xa6\x39\xb4\x74\xbd\x5c\xa5\xe3\xf2\xef\xa1\xff\x94\x4a\x82\x4f\x49\x89\xba\x8b\x62\x42\xd6\xd6\x69\xf3\xa7\x04\xee\xa0\xef\x83\x8b\x58\xef\x2a\x66\x61\xbd\x82\x5a\x90\x77\xd6\x13\x24\x9d\x42\x82\xe4\x3a\x15\x04\x49\xa7\x74\x20\xb9\x7e\xcd\x40\xc5\x30\x1e\xe3\x18\x77\x59\xc6\xd7\xe2\x19\x77\x99\xc6\xbb\x70\x6d\xd4\x83\x34\xae\x66\x6a\xa4\x5c\x54\x4b\x69\xcd\x66\x4d\xcf\x19\x4c\xe6\xd5\xee\x6c\x10\x85\x80\x38\x80\xfb\x66\xdf\x1d\x81\x5c\x6c\x30\xf3\xe5\x25\x52\x30\xe3\xab\x11\x2f\xc4\x00\xbb\xb6\xf8\x80\x4c\x94\xc1\x0f\xe4\x47\x99\xf4\xc2\x67\xbb\x0b\x9c\xcc\x78\xc5\x86\x4f\x76\x78\x6b\xd0\x90\x3d\x2d\xc5\x2b\xc8\xfc\xe2\x6c\x01\x9d\x33\xb8\x55\x2d\xc1\x3a\xcd\x9e\xa2\x36\x93\x36\x02\xef\xf8\x4e\xa2\xd3\xe8\x68\xa9\x7d\x43\xb1\x10\x12\x7f\x75\xea\xd9\x68\xcf\x05\xfb\x44\x83\x9d\x2f\x2f\xad\x79\xa9\x55\x5a\x27\xc6\x3c\xc7\xd4\x93\x13\xa1\x85\x93\x37\x1b\x1b\xef\x27\x1b\x69\x6b\x47\xd0\x03\x3b\x10\x18\xdb\x11\xb0\xbe\xdd\xee\x9b\xeb\x99\x81\x23\xac\xb6\x3d\x0a\xa0\x4b\x13\xa1\x97\x00\x5e\x0f\x5d\x8b\xe5\x27\x1b\xdc\x82\xe3\x6d\xc0\xa5\x7d\x9d\x6c\xb0\x4b\x8f\x0a\xf6\x49\x03\x0b\x7a\x74\x9a\xf7\xfa\x62\x05\x1e\x25\xaf\x13\x11\xa6\x3e\x6e\xe5\x27\x9b\x40\xc5\x02\x34\x99\x28\xde\x9a\xa3\xc1\x8a\xbe\x3a\x1f\x6c\x7b\xbd\x01\x6c\x4f\x1a\x6c\x32\x6a\x48\x6c\x4f\x7a\xd8\x9e\x8e\x63\xfb\x5d\x9d\xaa\x13\x0a\x1d\xf6\x89\xfa\x21\xd1\x62\xa6\x68\xa7\xb7\xbd\x17\xb3\x25\x7a\x5e\x3a\x2c\x5b\x90\xac\xef\x7c\xc4\xf7\xb5\xaf\x32\x95\x6b\xbe\x7f\xb2\xc9\x77\x24\xd7\xa0\x75\x99\xb1\x00\x92\x16\x34\x16\x90\x6a\xe8\x27\x2d\xb4\x3d\x24\xc1\x60\x31\x5b\x3e\x93\x59\xca\x51\x67\x3e\x4c\xe7\xcb\xda\xd9\x17\x4b\x48\xf4\x1c\x21\x5e\xbc\x40\xb7\x24\x46\x27\x1e\x34\x5f\x99\xd4\x9d\x7e\xf0\xa0\x65\x12\x4c\xbb\xee\x1f\x5c\xa5\xe9\x13\x74\x57\x7b\x6e\x33\x74\xd4\x75\x9d\x06\x87\x11\xf9\xd3\x1d\x91\x77\xe7\x3c\xda\xee\x6e\x35\xe3\xd1\xef\xb2\xe2\x4a\x43\x03\xb3\x1d\x43\xe6\xa2\xe0\xda\x3d\x7f\x3a\x42\xe3\xc9\x8e\x34\x5c\x63\xdb\x8a\x2d\xd6\xe7\xcb\xb5\xd3\x4a\x20\xfc\x3e\x2f\x9f\x48\xc7\x38\x79\xa3\x4d\x28\xb6\x76\x68\x1d\xf3\x64\xc3\x6d\x06\x3e\x05\x39\x36\xfa\x59\xe3\xc7\x79\x89\x58\x05\x43\x20\xc4\x4b\x73\x4e\x78\xe2\x41\x1f\x8c\x45\x5b\x9b\x97\x23\xaf\x09\x00\x46\xb8\x13\xaf\xee\x8e\x84\xb6\xb9\xfc\x89\x57\x77\x46\xc1\x59\xc6\xad\xc3\x43\x74\x3c\x73\x05\xbf\xed\x87\xf5\x6b\x0e\x19\xe3\xa1\x11\x69\xe1\xab\x8e\xc3\xcd\xb8\x32\x62\xdc\xbb\x85\xd4\xba\xd5\x49\x63\x70\xdb\x37\xd9\xe0\xa6\xd1\x44\x4b\x42\xf6\xb6\x19\x00\x25\x02\xd2\x43\x40\x06\x08\x9c\x52\x14\xb9\xc7\x6a\x79\xe9\x10\xe2\x5c\xf3\x86\x93\xd6\x35\xde\xa1\xc9\x6f\x8a\x7d\xf9\xc3\xed\x9a\x19\xf8\xea\x8a\x1f\x73\xcd\x6b\x4e\x5a\x17\xd2\x31\xc2\x0f\x2d\xc6\xf9\xf2\xf2\xd3\x27\x68\x7f\x58\x9a\xde\x48\x06\xfa\xb6\x7a\x5a\x67\x1a\x52\x8c\x6f\xbd\xc9\x4c\x78\x3e\xfa\xd2\xd6\xc1\x62\x73\xc4\x4e\xbe\xd2\x6d\x21\x5c\xd2\xb1\xd8\xf1\xcf\xb5\x2d\xca\x30\x49\x73\xe3\xbb\xa2\x06\xf0\xcd\x8c\x8f\x68\x37\x9c\x06\xfa\x1a\x26\xaf\x86\xf3\x40\xd7\xdd\x4b\x85\xaf\xb3\x95\x0a\x36\x49\x65\xbc\x9c\x77\xf7\x3b\xe1\x3d\x74\xd8\xe5\x7f\x0f\x7d\xdd\xff\x01\x88\xc3\x02\x4d\xb3\x9b\xeb\x9f\x64\x13\xd4\x27\xcf\xe1\xe9\xd3\x8c\x35\xf3\xc6\x39\x48\x74\x68\x54\xbd\x0e\x52\xcf\x02\x0e\x71\x1e\x1a\x37\xd3\xbd\xfc\xaf\x0b\xce\xff\xce\x87\x48\x67\x6c\x3d\xab\x8d\x7b\xab\xbb\xe8\x07\x5c\x7c\xca\x64\xe1\xf8\x9c\xd0\xf6\x29\xbd\x2d\x9d\xdf\x7d\x0e\xb1\xa5\x67\x9f\x95\xd3\x52\x43\x35\x31\xa7\x27\x9c\x3b\xcd\xcd\x69\xa8\xd4\xf4\x9c\x8e\xea\xba\xf3\x8a\xad\x28\xdc\x9d\x78\x32\xe8\xc4\x93\xeb\x76\xe2\xc9\xa0\x13\x4f\x76\xeb\x84\x59\x55\xd2\x74\x95\x93\x55\x4b\xb4\xe2\xd5\xaa\xe4\x1f\xb8\x61\x03\x22\x52\x87\xbb\x65\x3c\x38\xbf\x58\xcf\x6a\x36\x4c\x22\x32\x40\x3e\x1d\x42\x7e\x7a\x79\x62\xc3\xe9\xa1\x86\xf4\x74\xe8\xc2\xd6\xf3\x44\x37\xb4\x6b\xd2\x1e\xbf\xd4\x16\x4a\x43\x38\x6b\x0e\x3b\x6d\x11\x21\xb6\x5c\xcc\xa9\x3f\xb6\xfb\x33\x9d\x62\xff\xb2\x5d\xf3\x9a\xdb\x35\xfd\x5d\x37\x6b\xfa\x63\x5b\x35\x7d\xc7\x46\x4d\xff\xcb\x36\xcd\x9b\xde\xa6\xe9\x6f\xb9\x49\xd3\xa0\x96\xce\x16\x4d\x7f\x9b\x0d\x9a\xbe\xfd\x18\x7e\xb3\xf1\xf0\x1e\x0d\x3e\xbe\x9d\x52\xfc\x2f\xb2\x5d\xb3\x5f\x60\x27\xc4\xe4\x77\xdb\xc3\x59\x97\xdb\x11\x34\xff\x58\xe5\x76\xae\xb5\xdb\x52\x3d\x6e\x77\x7b\xd6\x30\x3b\x15\xe4\x09\x31\xe9\x6c\x0b\x09\x31\xb1\x6e\x33\xa1\x5b\x16\xe4\x11\x80\x9d\xad\x26\x54\x55\xb5\x08\x31\xb9\xb1\x23\xc4\x7a\xf7\xad\x35\x79\x06\x9b\x1c\xbc\x4d\x96\xa6\x69\x92\x87\xf9\x54\x2b\xd8\xb3\x37\x35\x41\x46\x24\x61\x24\x21\x4c\x2f\xe7\xb3\x67\xa8\xdb\x63\x68\x9a\xe0\x30\xf1\x70\xc8\xf4\xea\x3f\x66\x22\x38\x24\x05\xcf\x64\xcd\xa0\xba\x36\xd0\x96\x44\xa2\xd8\xf7\x49\x14\xc9\xb2\x42\xaa\x72\x90\x99\x08\xe5\x69\x10\x30\x1a\xeb\x75\x85\xb6\x24\x92\xa7\x5e\x46\xb8\x97\xeb\x65\x88\xcc\x44\x82\x38\x0d\x03\x8a\x73\xbd\x48\x51\x2f\x35\xbd\xe9\x2a\x45\xc2\x9e\xae\x59\xa5\x08\x47\x5f\xca\x14\xdd\x50\x4e\x44\x77\x2e\x53\x24\x9a\x8c\xe5\x45\x7a\xcc\x18\x66\x46\xf4\x4b\x99\xa2\x9b\xcf\x8d\xe8\xb6\x65\x8a\x8c\xca\xe9\xe6\x47\x74\xb4\x4c\x91\x4f\xdd\x65\x8a\xc4\x30\x7e\x8f\x12\x53\xb6\x44\xfe\x45\xb2\xa5\x7f\xe9\xc3\x2d\x37\x7b\xb0\xe5\x33\x1d\x59\xb9\x7e\x12\x25\x1f\x35\xdd\x55\x88\x7e\xae\x77\xf0\x1a\xee\xba\xe9\x6e\xf2\x3d\x60\xe7\xe7\xf3\xab\x89\xfa\x71\x8a\xd8\xea\xf4\xe2\x8c\x2f\xaa\x75\xff\x4e\x1e\xfd\xf8\x4c\xcb\x0f\x94\x52\x6a\x49\xf4\xc8\x7b\x9b\x80\x50\x46\x8a\x04\xf2\x8a\x3c\x26\x94\x71\x42\xf6\xa6\x43\xb8\x18\xfb\x71\x10\x24\x50\x66\x90\xf8\xbc\x88\xc2\x2c\xd7\x53\x83\x41\x83\x34\xcc\xbc\x22\xcd\x0a\xb8\x00\x21\x0b\x72\x3f\x25\x85\x09\x31\x4f\xd2\x30\x4f\x59\x08\xb7\x67\x63\x9a\xe4\x69\x9a\x39\x11\xfb\x49\x18\x65\x24\x4c\x21\x9d\xf1\x03\x9a\x86\x3e\x35\x21\x0e\x93\x02\x63\x5c\x00\xc7\x69\xe4\x85\xb9\x87\x13\x27\xe2\x84\xf8\x05\x25\x0c\xae\xdc\x66\x05\x4e\x82\x22\x49\x4d\x88\x59\x8a\xb3\x90\xe7\xc0\x71\xce\xa2\x9c\x62\x4c\x9d\x88\x73\xea\xc5\x8c\x49\x19\x33\xdf\xf3\x3d\x12\x18\x65\x8c\x09\xf5\xc3\x54\xde\x19\x11\x84\xb1\x17\x15\x29\x77\x22\x26\x81\x8f\x69\x98\xc2\xdd\x11\x01\xe7\x41\x4a\x68\x66\x14\x45\xe8\x65\x71\x9e\xc1\x05\xe2\x79\x58\x14\x69\xc0\x89\x13\x71\x4c\x52\x1e\xe6\x31\x88\xa2\x20\x71\x4a\x93\xc8\xa8\x3c\xea\xe5\x3c\xc5\xf2\xf2\x0a\x3f\xc5\x51\x12\xa5\xd8\x2d\xe3\x34\xcf\xbc\x48\x56\xa8\x24\x61\x16\x63\xe2\x87\x26\xc4\x19\x4e\xd2\x02\x4b\x06\xb2\x22\x4a\x48\x94\x04\x4e\xc4\x3c\x48\xd2\x28\xc9\x40\x76\x09\x2f\x70\xc0\x72\xa3\x8c\x79\x91\xf2\x20\xa6\x70\x8d\xb8\x4f\x83\x82\x84\xdc\x77\x22\xf6\x8a\x0c\x27\x79\x06\x0d\x68\x4a\xb3\x3c\x4c\x8d\x1c\x93\xc0\xcb\x18\xce\x32\xb8\xa4\x3d\x66\x59\x92\x45\xa1\x5b\x79\x39\x4f\x48\x16\x81\x83\x84\x09\x49\x3d\x12\x1b\x11\x07\x2c\x0e\x68\xc0\xe0\x1d\x21\xe2\x2c\xe2\x01\x75\x73\x1c\x66\xa9\xc7\x92\x1c\x38\x49\xf3\x00\x17\x69\x1e\x18\x5d\x3a\x2a\x12\x4a\x73\x40\x4c\x7d\x8c\x43\x3f\x75\x73\x9c\x50\x9f\x87\x38\x24\xe0\xd2\x3c\x8a\xf2\x82\x99\x1d\x84\xfa\x38\x8b\x22\xc8\xf0\x49\x9e\x06\x3e\xc1\x9e\x3b\x56\x78\x9e\x4f\xe2\x8c\xca\x3b\xdf\x8b\x94\x60\xdf\x68\x6e\x69\x11\x26\x71\x91\xa9\xfa\xa6\xbc\xf0\x38\x77\x5b\x45\x16\x71\xcf\x4b\x0b\x30\x7c\x3f\x67\x94\x16\x99\xd1\x2a\xf2\x90\xc5\x09\x0e\x00\x71\xe2\x7b\x8c\xc5\xc4\x2d\x0a\x2f\xca\x58\xe4\x87\xf2\x7a\x17\xcf\xf3\x29\x31\x3b\x08\x0e\x48\x42\x12\xf9\xee\xe5\x31\x8f\x47\x3c\x76\x8b\x82\xc4\x69\xec\x31\x0a\xc1\x25\x88\x72\x42\x8a\xc2\xe8\xd2\x84\x63\x21\x26\x10\x59\x98\x91\x28\x4b\x48\xe4\x44\x1c\xe4\x24\x8b\xf2\x02\xac\x22\x64\x59\x40\x18\xcf\x8d\xb1\xc2\xf7\xa9\x97\x63\x10\x59\x92\x27\x61\xea\xe7\x85\x13\x71\x14\x7a\x2c\xf6\xc3\x40\x3a\x08\x2b\x22\x3f\xe7\x66\x73\x8b\x98\xc7\x52\x88\xdb\x7e\x16\xc7\x29\x61\xee\xb0\x49\x71\x46\xb2\x84\xc8\xe8\x16\xf3\x9c\x71\x1e\x99\x10\x27\x24\x26\x24\x93\x22\xc3\x01\x25\x7e\xe8\xa7\x4e\xc4\x8c\xa4\x05\xa7\x4c\xc6\xd9\xac\xc0\x9e\x1f\x19\x1d\x84\x51\xcc\xa2\x28\x00\x8e\xd3\x2c\x20\xbe\xe7\xb9\xa3\x5b\x46\x82\x94\xa6\xb1\x07\x71\xd6\x2b\x68\x12\x27\xd8\x18\xdd\xe2\x28\x0b\x31\x03\x19\x7b\x51\x18\xa4\xdc\x77\x5b\x45\x8e\x13\xc2\x29\x4e\x00\x71\xc4\x8b\x90\x60\xe3\x98\x97\x47\x49\xe2\x45\x04\x74\x11\x86\x51\xc8\x92\x11\xcf\x2b\x02\x8f\xfb\xa1\x94\x5d\x18\xc7\x98\x78\x84\x19\xed\xd8\x8b\x18\xf3\x64\xcf\x7c\x92\xa6\x39\x4e\xdd\xca\xc3\x09\x0b\x32\x8c\x21\x6c\xa6\x34\x27\xb9\x97\x19\x39\xc6\xdc\x8f\xa3\xcc\x93\x76\x8c\x03\xcc\xd2\xd0\x1d\xdd\x48\x1c\xd0\x38\x0e\xc0\x8e\xf3\x82\x72\x9e\x26\x89\x09\xb1\x1f\xa4\x5e\x9a\xa5\xd0\x33\x8e\x93\x34\xa0\x23\xe6\xe6\x27\x38\xf3\xb2\x14\x94\x92\x85\x59\x12\xb2\xc8\x37\xc6\x63\x9e\x53\xc6\x02\x08\x9b\xdc\x0f\x30\x65\x99\xdb\xdc\xc2\x34\xc9\x32\x16\x14\x72\x64\x88\x7c\xee\xc7\x46\xc4\x11\x25\x3c\x2a\x64\xb0\xca\xa3\x94\xa4\x94\xb9\x45\x11\x07\xb4\xa0\x84\x83\x83\x84\x39\x2f\x52\x62\x8e\x15\x31\x65\x61\xe4\xcb\x91\x26\xf0\x71\x4c\x8a\xc8\x6d\x15\x34\xc8\x68\x4c\xb1\xcc\x84\x70\xe1\xb1\x34\x36\x86\x4d\x9a\x65\xb1\x47\xa4\xf2\x30\x8b\x02\x3f\xe1\xee\xdc\x2d\xf1\x52\x5e\x14\x05\x93\x59\x64\xe4\x63\x4e\x8c\x56\xc1\x82\xd0\x8b\x32\x0e\x9e\x97\x73\x4a\xd2\x9c\xbb\x73\xb7\x94\x17\x09\xf3\x0b\x39\x32\x90\x2c\x8a\x13\x6c\xce\x2b\xa2\x18\xc7\xb4\x90\x43\x98\x1f\x93\xd0\x27\x6e\xe5\x65\x8c\xc4\x3e\xcf\x40\xc6\x9c\x91\x28\xc2\x89\x51\xc6\x39\xa6\x51\x4a\xe5\xd0\x44\x84\x21\x91\xee\x24\xe0\x30\x11\x61\x39\x8b\xf3\x1c\x1c\x24\xcb\xb9\xc7\x53\x6c\x0c\x9b\x45\x18\xe7\x41\x11\x17\x6a\xd0\xe5\x39\x8e\xdd\x76\xec\x45\x85\x17\xc5\x32\x5f\x88\x09\x8e\xa3\x22\x35\xba\xb4\xc7\x22\x3f\xce\x33\x70\x10\x46\x32\x9a\x50\xe6\x1e\x41\x30\xf6\x8b\x84\x7a\x81\x9a\xb8\x4b\xbc\x9c\x19\x39\xc6\x69\x8c\xbd\xd4\x97\xf1\xd8\xc7\x59\x10\x63\xb7\x8c\x09\xcd\xd3\x38\x2e\x42\x69\x15\x5e\x10\xe7\xd4\x18\x8f\x7d\x92\x31\x96\xc6\x60\x15\x81\x97\xc5\x24\x48\xdc\x0e\xe2\x67\x09\x4f\xb9\x07\xa2\xc0\x61\x96\xa4\x3c\x35\x2a\x2f\xf0\x71\x1e\xc5\x19\xf4\x2c\xc9\xb0\xe7\xe5\x81\xdb\x8e\x83\x2c\x0b\xf3\x40\x26\xde\x59\xea\xf3\x80\xa4\xc6\xa1\x49\xa4\x2b\x24\x49\x20\x58\x15\x59\x14\xc6\x5c\x84\x57\x57\xac\x28\xb2\x34\x2a\x98\x1c\x24\x59\x1e\x15\x8c\x1b\x39\x8e\xb2\x20\xc0\x09\x05\xc4\x01\x0b\xe2\x90\xe2\x58\x4d\xa2\xbe\x75\x1c\x5b\x6d\xdf\x0b\x5f\x5d\xf7\x84\xaa\xed\x1a\xb4\x57\x9d\x13\xaa\x3f\x5f\xef\x84\x6a\x88\xc9\x76\x4b\x07\x86\xe5\x88\x9b\xaf\x3e\x7a\xdd\xa5\x83\x88\x79\x09\xaf\x27\xdc\xfd\x34\xcb\x12\xcf\xb2\x74\x90\xa6\x51\xcc\xb8\x1c\x7e\x69\x90\x31\x16\x77\x53\x17\x07\x11\x3f\x8b\x78\xe1\xc7\x10\xc9\x0a\x9e\x04\x05\x15\x91\xcc\x04\xc9\xc2\xa0\x28\x42\x1f\xbc\x20\x2c\x70\xee\x47\xc5\xb6\xb3\xfa\x21\xf6\x78\x48\x64\xf0\x61\x39\x8f\x28\xc9\x2d\x4b\x07\x49\xea\x85\x11\x95\x06\x49\x52\x9f\x47\x19\x2e\xb6\x24\x82\x0b\xea\xe7\x89\xb4\xf9\x22\x0d\x70\x9a\x47\x96\x9e\x84\x29\xf7\xb2\x5c\xa6\x41\xd8\x8f\x39\xc1\x71\xb2\xcb\xd2\xc1\x4d\x9f\x23\xdd\xa6\x34\x2c\xc0\x79\xf6\xca\xaf\x8f\xb1\xbd\xf4\xeb\x63\x62\xaf\xfd\xfa\xd8\xb7\x17\x7f\x7d\x1c\xd8\xab\xbf\x3e\x0e\xed\xe5\x5f\x1f\x47\xf6\xfa\xaf\x8f\x63\x4b\x01\x58\xd9\x41\x28\x0f\x6b\xdc\x07\x2e\x9f\xcf\xe5\xf3\xe1\x61\x0f\x29\x03\x68\x6e\x3c\x02\x25\x9f\xcf\xe5\x73\x4b\x73\x02\xcd\x89\xb5\x39\x99\xcb\xe7\x96\xe6\x3e\x34\xf7\xad\xcd\xfd\xb9\x7c\x6e\x69\x1e\x40\xf3\xc0\xda\x3c\x98\xcb\xe7\x96\xe6\x21\x34\x0f\xad\xcd\xc3\xb9\x7c\x6e\x69\x1e\x41\xf3\xc8\xda\x3c\x9a\xcb\xe7\x96\xe6\x31\x34\x8f\xad\xcd\xe3\xb9\x7c\x6e\xd8\xd6\xb7\x65\xd1\x63\x69\x19\x26\xe4\x4c\x1a\x45\xbf\xe2\x1e\x6c\xb9\x95\x06\x61\x6a\x95\x4a\x5b\x30\xb5\xca\xa4\x1d\x98\x5a\x65\xd2\x04\x4c\xad\x72\xa9\x7e\x53\xab\x5c\x6a\xde\xd4\x8a\x4b\xad\x9b\x5a\x71\xa9\x70\x53\xab\x42\x2a\xdb\xd4\xaa\x90\x7a\x36\xb5\x3a\x95\x3a\x36\xb5\x3a\x95\xea\x35\xb5\x9a\x49\xd5\x9a\x5a\xcd\xa4\x56\xe7\xa6\xba\x83\xae\xa3\xbb\x5b\x5e\x87\x6a\xad\xa7\x5d\xd3\x7f\x55\xca\xda\xc3\xb6\xe3\xe6\x0f\x61\x04\xaf\x97\xcf\x86\x20\x5b\x14\x8a\x96\x64\x84\x08\x5e\x95\xf5\x69\x03\xbd\x6a\x34\xfa\x1a\x91\xb7\x00\x69\xae\xe5\xda\xe2\x98\x4b\x1c\xea\x7c\x41\x1f\x07\x9c\x9a\xbf\x56\x05\xea\xc3\x43\xf4\x1f\x50\x8d\xd8\x4e\xbc\x2e\xe9\xbc\x53\x85\xea\xcd\xac\xa9\x73\xbc\x19\x3b\x8b\xa7\xc0\xe6\x5a\x0b\xf7\x79\x3c\x09\x35\xeb\x54\xc1\x9e\xc9\xe2\xbf\x7a\xf1\xea\x39\x94\x28\xae\xcb\x01\x77\xe0\xe8\x00\x0e\x36\xbd\xbe\x43\x5d\xb0\xd8\x75\xc2\x54\x42\xce\x3b\x5c\xcc\x87\x5c\xcc\x4c\x5c\xcc\x87\x5c\xcc\x74\x2e\xba\x70\xf1\x10\xce\x52\xc9\x58\x57\xa9\xa5\x66\xce\x07\xad\xf6\xf6\x2e\xc5\xb7\x5b\x8d\xe2\xed\x34\x8a\x5b\x8d\xe2\xad\x34\x8a\x67\x9d\x02\xdf\xb3\xba\x0a\xb7\x56\x98\x7b\xae\x6a\x75\x6b\x42\xc2\x4a\xc2\x5d\x30\xd8\xc7\x9c\x68\x2a\xad\xf1\x45\xa3\x2a\xc5\xf3\x0e\x1b\x73\x03\x1b\x33\x13\x1b\xf3\x01\x1b\xb3\x0e\x1b\x5d\x84\xd1\x00\x1f\x89\x9c\x3a\xdd\xa9\x76\xb8\x2b\x94\xc4\xad\xda\x63\x97\xda\x5f\x95\xb1\x8c\x5c\xc6\x81\xb9\x07\x39\x57\x90\x8e\x33\xe1\x12\x12\x47\x5a\x20\xb1\xde\x0a\x5d\xc3\x4a\x06\xb0\x31\xb3\xe8\xc3\xce\x6b\xd8\x51\x1e\xda\x48\x33\x17\x42\x2b\xe3\xfe\xc8\xd5\x05\x6f\x43\xd9\x4c\x82\xcf\xa0\x66\x9b\xc0\x23\x34\xe9\xed\xa1\x07\xb5\x77\x36\xbf\xfc\x0f\x84\xd1\x3d\x34\xd8\x36\x3d\xe4\x43\xfc\x5b\x6b\x70\x9c\x0d\xf1\xef\x7e\xe3\x2d\x16\x2e\xf0\x75\xb9\x00\x29\x6e\xc9\x83\xd4\xce\x90\x03\xa9\x89\x01\x7d\x33\xd2\x76\x54\x7c\x55\xda\xd4\xdb\x8e\x7a\xaf\x4a\x13\x73\xf6\x9a\xf8\xaa\x28\xfe\x0c\xdd\x46\xc5\x4c\x95\xc5\x17\x5f\xcc\xe7\xf8\x64\x1b\xe9\xfb\x7c\x2e\xda\xcc\x55\x1b\xf1\xe5\x74\xee\x28\xa6\x3f\x83\x6a\xfa\x02\x75\x2a\xe9\xc0\xe7\x4c\x7e\x4e\xd5\x67\x7b\xf3\x39\x34\x17\x54\x52\x49\x12\x3e\x67\xf2\x73\xaa\x3e\xbb\x4b\xf2\xcf\x64\x4d\x7e\x15\x70\xe4\xb8\xc2\xe6\xb2\xbc\xf4\x9e\x2c\x7e\xc0\x66\x75\xc5\x7e\xf5\xb0\x53\xb3\x7f\xa6\xdd\x22\xc1\xea\x51\xc7\x59\x99\x1f\xde\xa6\x26\x0d\x22\x45\x73\xd6\xa5\x39\xef\xd0\x9c\x75\x69\xce\x75\x9a\xb3\x6d\x68\x62\xd9\x4f\xae\x86\x06\x79\xde\x84\xcb\x41\x81\xd6\x65\xff\x67\xf5\xa5\x15\xda\xc3\xa0\x7d\x28\x68\xfa\xf5\x33\x59\x86\xdb\x4d\x53\xf6\x53\x01\xd7\x34\x67\x5d\x9a\xf3\x0e\xcd\x59\x97\xe6\x5c\xa7\x39\x6b\x69\x1a\xb3\xce\xf1\x7b\x08\xcc\xbc\xfe\x08\xd5\x97\x7e\xb4\x1f\xa6\xfa\x11\x9c\xf7\xc7\xd2\x75\x8c\xea\x47\x08\x06\x3f\x96\xb6\x10\xfa\x01\x2e\x4a\x10\x30\xb3\x79\xc3\xa2\xc9\x29\x25\xa0\x20\x38\x6b\xfb\x22\xc3\x45\x85\xf5\x70\x31\xdb\x26\x56\xb5\x64\xc5\xbf\x42\x22\x6e\x9a\x15\x90\xca\x66\x26\x82\xd9\xb5\x28\xfe\x68\x0c\x3d\x7d\x8a\x3f\x96\x26\x8a\x3f\x96\xd7\xa1\x68\x0e\x76\x7d\x8a\xaf\x8c\x14\x5f\x99\x28\x9a\xad\xad\x7f\x79\x85\x85\x24\x4c\x5e\xd4\x6e\x0f\x80\x56\xee\x60\x1e\xa4\x8e\x4a\xfb\x32\x3c\x02\x8b\x44\x67\xb1\xc6\xb5\x1d\x9b\x7f\x3d\xcf\x59\xc5\xd1\xa5\xfb\x4d\x5f\xfc\xc1\xfb\xa6\xd1\xbe\xe1\x75\xf3\xd4\xc4\x36\x0c\x40\x85\xa9\x0d\xbc\xd8\x16\xa6\x36\xf0\x0e\xcd\x4d\x6d\xe0\x15\x9a\x9b\xda\xc0\x2b\xf9\x24\x9f\xc3\xf5\x1d\x73\xdb\xfd\x1d\xf0\x4e\x3f\xc9\x67\x00\x25\x45\xc7\x75\xc9\xe5\x03\xa1\x59\x6f\x02\x11\x98\x32\x13\x8f\x30\xa5\x90\x99\x78\x84\xd9\x8b\xd4\xd4\x06\x26\x2f\x52\x53\x1b\x98\x27\x61\xa6\x36\x30\x4d\x32\xb8\xcd\x40\xfc\xc1\xb4\xcb\x44\x9a\x7a\x45\xac\xc2\x80\x89\x9b\x89\x94\x83\xb0\xac\xfd\x76\xc4\x91\xd2\xa8\x86\xc9\xce\x8d\x5e\x56\xa2\xcd\x19\x42\x66\xf0\x18\xec\x9f\x0d\xb2\x81\xc7\x4d\x31\x8a\xc9\x63\xb0\x7b\x26\x99\x7d\xec\xe9\xdc\xb2\x21\xb3\x7d\x3c\xda\x2c\xa3\x24\x08\x22\x4a\x87\x04\x71\x4b\x10\xc4\x93\x2a\x82\x9d\x48\x90\x8e\x13\xd4\xe6\x25\x25\x41\x02\x21\x76\x48\x90\xb4\x04\xc9\xac\x1e\x97\x26\x00\xaf\x85\xd7\x71\x82\xda\x4c\xa6\x24\xe8\x0b\x82\xf9\x90\xa0\xdf\x12\xf4\x05\xad\x5c\x11\xf4\x47\xdc\xa1\x8f\x47\x9b\xfb\x94\x04\x03\x41\x90\x0f\x09\x06\x2d\xc1\x40\xd0\xe2\x8a\x60\xa0\x13\xe4\xe3\x04\xb5\xd9\x52\x49\x30\x14\x04\x8b\x21\xc1\xb0\x25\x18\x0a\x5a\x85\x22\x18\xea\x04\x8b\x71\x82\xda\xfc\xaa\x24\x18\xc1\x4b\xc5\x90\x60\xd4\x12\x84\xec\xfd\x54\x11\x8c\x3a\x2f\x11\xe3\x04\xb5\x19\x59\x49\x30\x16\x04\x67\x43\x82\x71\x4b\x10\x5e\x9b\xd4\x98\x2c\xe0\x5d\x49\xc0\x27\x9f\xbd\xf8\x72\x29\xce\xcd\x5d\x8a\x83\x45\x72\xaf\x6e\x36\x13\xc8\xa0\x0e\x8b\xef\xdd\xf4\xb5\x38\x66\x32\xf8\x9f\xf2\x62\x9c\xe3\xe5\xe2\x03\x5f\xc9\x2a\xbf\xa8\x5a\x22\x9f\xdc\x4d\xcb\x4a\x24\x28\x39\x62\xb0\x3f\x3b\xe5\xc5\x72\xc5\xd5\x76\xea\x81\xd6\xb4\xb3\x26\xda\xda\x5d\xb5\x7c\xed\x93\x9b\xb8\x88\xe7\x8f\x7a\x05\x8f\xce\x67\x53\x1f\xe4\x1e\xc2\x1e\x09\x0e\x7d\x55\xa7\xf8\xcb\xe9\x26\xeb\x51\xa5\x10\x93\x5d\x4f\x37\x89\x26\x23\xa7\x9b\x3a\xdb\x1a\x06\xa7\x9b\x42\x4c\xbe\x9c\x6e\xba\xe9\xd3\x4d\x42\x2b\xdb\x9d\x6e\x32\x2a\xa7\x73\xba\x49\x2a\xc8\x79\xba\x49\x9e\xa3\xdd\xf2\xf4\xb7\xff\x87\x3e\xcf\xc4\x17\xd9\xdd\x94\xad\x79\x14\xf4\x1e\x9c\xe5\x61\x1f\xf4\xc3\xf9\xfb\xbc\xe8\xfd\x98\x95\xe7\x33\xbe\xfa\x5d\x8e\x44\x69\xac\xc2\x77\xc1\xa1\x7c\x20\x19\x83\xcf\x3a\x3f\xff\x0a\x47\xa7\x5e\x6d\x75\x27\x10\x6c\x9e\x39\x86\xae\x37\x70\xda\x6f\xe3\x47\xa1\x0e\x0f\xd1\x73\xbe\x3a\x83\x51\xf4\x78\xb6\x2c\x33\x8e\x70\xff\xda\x14\xd1\xfc\xf9\x31\xee\x9e\x5d\x0a\xe3\x29\x0a\x92\x29\x0a\xf0\x14\xf9\xfe\x14\x91\x70\x8a\x70\x3c\x45\xc9\x14\x21\xac\x6d\x35\x0a\xe9\x14\x85\xde\x14\x05\x64\x8a\xfc\x60\x8a\x48\x34\x45\x98\x4e\x11\xf6\xa6\x88\xe8\x70\xc9\x14\x85\x78\x8a\x02\x7f\x8a\xfc\x70\x8a\x48\x3c\x45\x38\x99\x22\x2c\xf0\x6b\x70\x91\x37\x45\x21\x99\xa2\x20\x98\x22\x3f\x9a\xa2\xc8\x9f\xa2\x30\x9c\xa2\x20\x9e\x22\x3f\xd1\x00\x7d\x3c\x45\xc4\x9f\x22\x1c\x4e\x51\x3c\x45\x28\x22\x53\x14\x06\x53\x14\xc0\xd5\x02\x3a\xa0\xe0\x84\x4c\x11\x0e\xa6\x28\x12\x80\x78\x8a\x42\x7f\x8a\x82\x70\x8a\xfc\x58\x03\x24\xc9\x14\x11\x3c\x45\x58\x90\x9c\x22\x44\xe8\x14\x11\x6f\x8a\xb0\x60\x47\x82\xbd\x75\xc8\x95\x98\xe5\x4a\xba\x72\x15\x5c\x08\x39\x8a\x7e\x13\xf1\x79\x8a\x50\xa8\x73\xab\x08\x8b\x6e\x09\x6e\x81\x21\x4f\xe7\xd2\x57\x82\x13\x5c\x09\x80\x68\x8a\xf4\xee\xe2\x48\xca\x43\x08\x18\xb8\xf7\xbb\x8a\x10\x0a\x15\x02\x16\xf2\xf3\x63\x29\xd8\x30\xec\xc9\x2b\xf0\x94\xb6\x42\xa9\xfd\x40\xa7\x20\x54\x23\x4c\xc3\x17\x2a\x8d\xa4\xda\x43\x5d\x87\x42\x05\xc2\x1e\x84\x5d\x08\x1d\x0a\xc1\xd6\x59\x4d\xe7\x46\xa8\x8b\xb3\x8b\x39\x83\x6b\x52\x44\x52\xb9\x9e\x95\xc5\xe0\x86\x27\xf0\x82\x1f\x4e\x7e\x7e\xf9\xf8\x87\x47\xf2\x4e\x29\x21\x31\x32\x45\xd0\x79\x21\x21\x2a\x2c\x52\xa9\x09\xa4\xab\x2c\x15\x2b\x75\x12\x65\xbd\x20\x10\xaa\xd3\x7f\xf9\xdd\xb3\xd7\x7c\x8d\xd8\x22\x57\xb5\xd1\xcf\x41\xa5\xf2\x3e\x0d\x03\x1f\x02\xfe\xe7\xe7\x5d\x7d\xf6\x52\x4a\x6f\xe3\xdd\x83\x97\x11\x4a\x3c\x6f\xda\x7f\x56\xbf\x2b\x48\x10\x03\x00\xe9\x00\x50\xcf\x23\x03\x10\x5f\x03\x19\x3e\x0d\xf4\xa7\x06\x02\x61\x97\x00\x31\x10\x88\xba\x4c\x9a\x40\xe2\x5e\x3f\x0c\x84\x68\x87\x91\x21\x8a\xa4\x4f\x65\x88\x82\xe9\x20\x26\x80\xb4\x2f\xad\x21\x48\xd6\x23\x33\x00\xc8\xfb\x5d\x19\x82\x70\x0d\x64\x48\xa1\xe8\x72\x39\x6c\x4e\x5d\xad\x31\x1d\xd5\x07\xa1\x23\x04\x7c\x3a\x62\x55\x41\x9f\x88\xc1\x2e\xa8\xdb\x6e\x22\x3a\x6a\x98\x31\x75\x19\x26\xa5\xa3\xfa\x4e\xe8\x88\xbe\x59\x9f\x09\x83\x49\xf4\xc9\x0c\x39\xc9\xe8\xa8\xc6\x73\x3a\x62\x35\x9c\xba\xad\xbb\xe8\xd3\x30\x68\xde\xaa\x2e\x15\x25\xb0\x59\x90\x44\x7b\x6a\x51\xa6\xdf\x01\x31\x52\x0f\xba\x58\x4c\x7d\x0c\x75\x10\xa3\x4d\xe8\x7c\x1a\x9e\xc7\x5d\x36\x1c\xbe\x81\x1d\xe6\x9f\xf4\x39\xb5\x06\x0a\xec\xd0\x68\xda\xed\x8c\xc1\x2a\x3a\x9d\xb1\xc6\x09\xec\xb0\x5f\xde\x03\xb1\x85\x0a\x6c\x0e\x05\x74\x54\x14\x98\x8e\x8a\x82\xd0\x51\xd5\xfb\xd4\xad\xb6\xa0\x87\xc2\x16\x2b\x5c\xe2\x8e\xa8\xcb\x84\x63\x3a\xa2\x0c\x4a\x47\x24\x99\xd0\x51\xd3\x62\xd4\xad\xd0\xb4\x2f\x6f\xc3\xe0\xd1\xa7\x32\x04\xc9\xa9\x4b\xa5\x9c\x8e\xb8\x50\xd1\xd7\xa8\x7e\x47\xd5\x74\x2c\xcb\x08\x3c\x8f\x06\x1e\xb6\x46\x10\x05\x63\x4d\x33\x1a\x05\xda\x22\x48\x4d\xc4\x33\x11\x09\xba\x44\x8c\x30\x61\x17\x8f\x91\x99\xa8\x8b\xc7\x08\x13\xb7\x30\x06\x2a\x7a\xb0\x35\x36\x4f\xfa\x24\x0c\x48\x58\xbf\x3b\xf6\x84\x43\x11\x32\x20\xc9\x3a\x82\x35\x00\xe4\x2d\x80\x35\x80\x48\x16\x0c\x8d\x8b\xbe\x56\xac\x79\x97\x53\x98\x98\x8e\xf4\x82\x50\x97\xb4\xfd\x3e\x09\x93\x6d\xd0\x9e\xde\x4d\xb6\x41\xc7\x05\x1e\xd1\x11\x43\x8d\xe9\xb8\xa1\x52\x3a\xa2\x94\x84\x3a\x94\xc2\xa8\xdb\x97\xd2\x3e\x07\xf6\x40\xe2\x74\x95\x9c\x8e\x18\x31\xef\xcb\xd4\x1e\x4f\xac\x16\xa4\xbf\x80\x18\x9e\xe2\x2d\xdc\x1e\x93\x2d\x9c\x09\xfb\x5b\x38\x3e\x0e\xb6\xb0\x67\x1c\x3a\x5d\x1f\x47\x63\x2e\x89\xe3\x91\x60\xa8\xa7\xe0\x66\x0c\xc9\x58\xb8\xc4\x6c\xcc\xef\x71\xba\x45\xb4\xc4\xd9\x58\x20\xc3\xf9\x16\xc1\x12\xf3\x2d\x42\x19\x2e\xfa\x1a\x32\x9a\xcb\x58\xa8\xc0\x78\xcc\x43\x31\xd9\xc2\x41\xb0\x3f\xe2\x65\x38\xd8\x26\xb0\x85\x5b\x84\x1d\x1c\x39\xa3\x1b\x8e\xb7\x08\x4b\x98\x6e\xe1\x8b\x38\xd9\xc2\xeb\x31\xdb\x22\x9a\xe2\x74\x2c\x82\xe1\xcc\x15\xc2\x70\x3e\x16\x16\xf8\x16\x61\x14\x17\xbd\x08\xb5\x4b\xaa\x82\xbd\xc0\x12\x8c\xcc\x2c\x93\x8e\x54\xb0\x35\x45\x91\xb8\x4d\xd8\x03\xed\xb9\x67\x78\x1e\xf6\x94\x33\x84\x88\x3a\x42\x33\xd1\x88\x3b\x10\xe3\xc3\xb1\x3d\x37\x69\xa9\xd8\x32\x93\xba\xa7\xb6\xac\xa4\xe5\x62\xc8\x67\xd6\x93\xe6\x10\x22\xef\x48\xcb\x96\x9a\x00\x06\x4b\x5a\xa2\xda\x9a\x25\xe0\xea\x1e\xa6\x63\xec\x13\x6a\x37\x14\x9f\x8e\x19\x4a\x40\xc7\x14\x1d\x52\x77\xe7\x23\xea\x36\xa5\x58\x7b\x3e\x7c\x4a\xa9\x5d\x74\x09\x75\x89\x8e\xd1\x31\xf3\x4a\xa9\xdb\x09\x32\xea\x36\x9d\x9c\x8e\x19\x06\xa7\x63\x4e\x50\xd0\x31\x13\xef\xa4\x15\x16\x23\xc0\x23\xee\x8a\xc9\x88\x85\x62\x7f\x34\x64\xe0\xc0\x69\xa9\x38\x1c\x75\x78\x1c\x8d\x46\x0d\x1c\xbb\x22\x31\x1d\xf5\x44\x9c\x8c\x86\x0c\xcc\x1c\xde\x88\xd3\x91\x70\x81\xb3\xd1\xa8\x85\xf5\x70\x60\x20\xc1\x47\x62\x2f\x2e\x46\x43\x92\x4a\x2d\x9c\xdd\xc4\x4e\xbf\xc2\x64\x3c\xb4\xf8\x8e\xc8\x81\x83\x11\xb7\xc6\xe1\x68\x6c\xc1\x91\xd3\x81\x71\x3c\x1a\xdb\x30\x1d\x09\x3e\x38\x19\xf5\x40\xcc\x46\xc2\x00\x4e\x47\x63\x20\xce\x46\x43\x01\xce\x47\xe3\x11\xe6\x8e\x60\x87\x8b\x6e\x34\xda\x25\x7f\xa0\x9e\x24\x69\x8e\x2d\x75\xf6\x89\xbd\xc0\x92\x4a\xd4\x4c\x1b\x9e\xfb\x2d\x86\xc0\x6c\x88\x81\xdd\x88\xc2\xae\x44\xcc\x39\x44\x93\x1c\x9b\xc8\xc7\x5e\x27\xfd\xb3\x8f\x9f\xf5\x8a\x8a\x39\x83\x68\x75\x6b\xce\x1f\xe4\x73\x73\xee\xd0\x8a\xcf\xb6\x82\xd2\x8a\xc7\x80\x23\xd7\xbc\xd4\x92\x39\xd4\xe6\x6d\xce\x1d\x5a\x05\x5b\xfa\xef\xd4\x2f\xa6\xf6\xee\x11\x3a\xc6\xbc\x4f\xc7\x04\x10\x50\xb7\x8a\x43\x3a\xd6\x85\x88\x5a\xed\x27\xa6\x63\xc6\x47\xa9\x4b\x7e\x49\x97\xb8\x2d\x89\x70\x58\x47\x4a\x5d\xda\xcb\xe8\x98\xf5\xe5\xd4\x6d\xbf\x9c\xba\xdd\xaf\xa0\x63\x1e\x82\xbd\x11\x17\xc1\x78\xc4\x0b\x31\x19\x75\x43\xec\xbb\x46\x0a\xa7\x85\xe3\x70\xd4\x45\x70\xe4\x8d\xe9\x09\xc7\xa3\x91\x0c\xd3\x51\x6f\xc1\xc9\x68\xb8\xc0\x6c\x34\xe0\xe1\x74\x24\x66\xe2\x6c\x34\x6e\xe0\x7c\x24\x2c\x61\xee\x88\x4b\xb8\x70\x86\x0d\x99\x3d\xb8\xfb\x80\x47\xfd\x12\x13\xbb\x63\x62\x7f\xc4\xed\x71\x30\x62\xf8\x38\x1c\xf5\x1d\x1c\x8d\x47\xb7\xd8\x11\xde\x30\x1d\x77\x9e\xc4\x19\x3f\x30\x1b\x8d\x7f\x38\x1d\x0d\xa2\x38\x73\x06\x11\x9c\x8f\x46\x29\xcc\x47\xc2\x14\x2e\xba\x71\x64\xb7\xe4\xc1\x18\x53\x6a\x7e\x6d\x2b\x24\x0d\x37\xc6\x94\xe1\x9e\xb6\x5d\xc3\x98\x31\x28\x00\x98\x4f\x31\xe6\x0d\x4d\xce\x67\x78\x1e\xd5\x08\x6c\x00\x71\xcb\xa0\xe1\xa9\xae\x73\x5b\xca\xd0\xf2\x67\xc9\x19\xda\x1e\x1a\x28\xa4\x2d\x83\x66\x16\xb2\x0e\x80\x69\xe0\xb0\xfa\x1e\xd7\x95\x63\x40\x5d\x74\x84\x63\x9e\x73\x70\xb5\xc7\x74\x44\xb8\x84\x7a\x36\xc3\xf1\xa9\xdb\x70\x02\xea\x32\x9c\x90\x8e\xd8\x45\x44\x47\xa4\x16\xd3\x11\xd3\xa3\x74\x44\xb5\x09\xb5\xc9\x9d\xd1\x11\x9d\xa6\xd4\x6d\xb5\x19\x1d\xb1\x9a\x9c\x8e\x68\x8e\x53\xb7\xe1\x16\xd4\x65\xf6\xd8\x73\xba\x2d\xc6\x9e\x55\xaf\x98\x8c\xf9\x34\xf6\xc7\x7c\x12\x07\x23\x5e\x8d\xc3\x31\xa7\xc0\xd1\x58\xe4\xc0\xf1\x88\x6f\x37\xe3\x9e\x55\x8d\x38\x19\x73\x20\xcc\x46\xe2\x23\x4e\xc7\x22\x08\xce\x9c\x11\x0a\xe7\x63\x11\x06\x73\xfb\xe0\x5c\x8c\x44\x08\xc8\x0f\xdc\xba\xc2\x23\x96\x86\xc9\x88\xa7\x63\x7f\xcc\x99\x71\x30\xe6\xac\x38\x1c\x0b\x55\x91\x3d\x14\xe1\x78\x2c\x58\x60\xea\x76\x97\x64\xcc\xe1\x31\xb3\x06\x0b\x9c\x8e\xf9\x32\xce\x46\xc2\x05\xce\x9d\xc1\x12\xf3\xb1\x50\x86\x8b\x5e\xc0\xd9\x25\x2b\x50\x6c\x53\x53\x14\xa9\x71\x9a\xf2\x02\xd9\x96\x98\xfb\xec\xb7\xcf\x89\x09\x77\xd0\x4a\xc4\x88\x3f\xd4\xfb\x63\xca\x0a\x9a\xa7\x43\xdc\x71\xc7\xa0\xad\xa3\xa2\x31\x1b\xd0\x98\x1a\x22\x66\x35\x59\x23\xcb\xa9\x32\x50\x53\x06\xa0\xc9\x6a\xf8\x3c\xd7\xd0\x0e\x9f\xf2\xa6\xaf\xc3\x67\x45\x47\xca\xa6\x9e\x3a\x95\x84\xa9\x5b\x49\x84\x5a\x7a\xe4\x53\x97\x76\x02\xea\xea\x4f\x48\xdd\x56\x17\x51\xb7\x65\xc4\xd4\x2e\x0f\x4a\x5d\x76\x91\x50\xbb\x3d\x33\xea\x56\x7d\x4a\xdd\x3a\xcc\xa8\xc5\xa6\x72\xea\x56\x11\xa7\x2e\x9b\x2a\xa8\xdb\x94\xb1\x37\xe2\x47\x18\x8f\x18\x1f\x26\x23\x9e\x8a\x7d\x87\x01\xe2\xc0\xe9\xa7\x38\x1c\x71\x45\x1c\x79\x23\x31\x28\x76\xfa\x5c\x93\xc1\x5a\x78\x4f\xac\x51\x9b\xd9\xbc\x15\xa7\x23\xa1\x0d\x67\x8e\xb8\x88\xf3\x91\x18\x82\xf9\x88\xcf\xe2\xc2\x19\xdc\xc4\x88\x6e\x61\x1c\x3b\x4d\x09\x13\xa7\xd3\x62\x7f\xc4\x2f\x71\x30\xe2\x98\x38\x74\x78\x26\x8e\x46\x62\x0d\x8e\x47\x83\xd5\x88\x27\xe1\x64\xc4\x47\x31\x73\x04\x00\x9c\x3a\xa3\x16\xce\x9c\xa1\x05\xe7\x36\xff\xc7\x7c\xcc\x85\x8b\x6e\xe8\xd9\x7d\xe8\x36\xd8\x48\xcd\x6a\xe0\x61\xc3\xd0\xad\x52\x0d\xc3\xa0\xad\x90\x9a\x9a\x05\x4d\x92\x63\x7a\x1a\x5a\xba\x1f\x49\x94\x86\x31\xba\x4d\x99\x86\x4f\xa9\xd6\x01\xd3\x30\xdd\xf4\x7d\xd8\x94\x69\x46\x3e\x7c\x9a\x6a\x9d\x30\xbd\xaa\x6b\x79\x9c\x61\x98\x96\x72\x1b\x62\xe5\xad\xdc\x4c\x2f\xe9\x5a\xe6\x3b\xec\xa9\x4b\x0c\x98\x9a\x85\x4a\xa8\x4b\xbf\x3e\x75\xf5\x31\xa0\x0e\xc3\x09\xa9\x4b\x78\x11\x75\xf5\x24\xa6\x36\xf1\x50\xea\x30\xab\x84\xba\x54\xcd\xa8\x4b\x23\x29\x75\x18\x42\x46\x6d\x66\x9e\x53\x97\x25\x73\x6a\xb6\xd8\x82\x3a\x94\x8c\x3d\xa7\x96\x31\x76\xba\x2b\x71\xfa\x2b\xf6\x9d\xbe\x82\x03\x97\x3b\xe0\xd0\xe9\x4a\x38\x72\x3a\x04\x8e\x5d\x11\x41\x8d\x37\xc6\x47\x89\x33\x5a\x60\xe6\xf2\x18\x9c\x5a\x82\x06\xce\x6c\x41\x36\x77\x7a\x2e\xe6\xce\xa0\x80\x0b\x6b\x44\xc4\x9e\x53\xeb\xd8\xe9\x88\x98\xb8\xbd\xdb\xb7\x58\x1a\x0e\x9c\x8e\x86\x43\x97\x0b\xe3\xc8\xea\x87\x38\x76\x46\x06\x4c\x9d\xde\x8f\x13\xa7\x2f\x62\x66\x09\x56\x38\x75\xba\x1b\xce\x5c\xd1\x01\xe7\x56\x2f\xc6\xdc\x19\x39\x70\xa1\x05\x87\x5d\xc6\x54\x2a\x06\x78\x62\x40\xd8\x08\x67\x18\x8f\xef\xb5\x8b\x1b\xc3\x70\x2c\xdb\x0d\x03\xb1\xc2\x67\x78\x14\x4a\x7c\xc4\xc8\x47\xd4\x3c\x34\x05\x61\xc5\x89\x79\x9c\xa1\x9e\x99\xff\xa4\xe9\xb7\x29\x04\x4b\x3e\x4d\x8f\xd2\x06\xa9\x81\xcf\xec\x9e\x3c\xec\x31\x0c\xbf\x66\x3b\xe1\x8d\x10\x0d\x6d\x0a\xc5\x84\xe1\x51\xbd\xa8\x64\xed\xb9\x7c\x8c\x5d\x32\x55\x30\xc4\xa5\x7f\x05\xe3\xbb\x74\xad\x7e\x0f\x5c\xc2\x56\x30\xa1\x5d\xac\x0a\x22\x1a\xed\x73\x6c\x31\x2d\xf5\x98\xba\x24\xaa\x60\x12\x9b\x96\xd4\x73\x66\xb7\x52\x05\x91\xba\xec\x51\xc1\x64\x66\x95\xab\xa7\xb9\xcb\x8c\x14\x0c\x77\x99\xa8\x82\x29\xec\x1e\x5a\x67\xc4\x46\xc7\xc6\xae\x1e\x60\x62\x11\x32\xf6\x6d\x16\x87\x03\x17\xb3\x38\x74\xa9\x05\x47\x2e\x61\xe0\xd8\xd1\x45\x5b\xfc\x4d\xec\x2a\xc4\xcc\x65\xa9\x38\x75\xc6\xc3\xcc\xe5\x51\x38\xb7\xdb\x37\xe6\x36\xa3\xc3\xc5\xb8\x77\xb5\x2f\x37\x56\x08\xec\x8e\x05\x98\x8c\x1b\x1c\xf6\xc7\xbc\x0f\x07\x4e\xef\xc3\xe1\x78\x10\xa8\x95\xed\xec\x6e\x3c\x1e\x94\x30\x1d\x0f\x6e\x38\x19\x8f\x06\xb5\x39\xb8\xbc\x4c\x1a\x85\xf5\x69\x36\x16\xd6\xa4\x61\x38\xf8\xe4\x63\x11\xa7\x36\x12\xa0\xa2\x8d\xec\xf2\xa3\x5e\xd7\xe0\x29\x5b\xbf\x5f\xa3\x6a\xc6\x2a\xb4\xe6\x73\x9e\x55\x50\x8f\xe8\xe5\x77\xcf\x5e\xa3\x72\x71\x5e\x5f\x13\xd1\x54\x34\x78\xfa\xed\xcb\xde\xc5\xc5\xed\xc1\xc4\x29\x6a\x37\xfe\xc3\x05\x8a\xea\x0b\x7c\x56\x5f\xa6\x7a\x43\x4f\xfd\x2a\x01\xe4\x97\xfa\xb3\xf8\x32\xd5\xfa\xd3\xe7\x5c\xab\xaa\xf4\xfd\xc3\x97\xb2\x30\x16\x92\x85\x5f\xdc\x77\x54\x09\xe8\xe6\x82\x2a\xf9\x45\xab\x92\x72\xdd\x2b\xaa\xdc\xa5\xf5\xde\xf3\xab\xa6\x04\xd8\x7b\x7e\x65\x28\x7d\xf7\x9e\x5f\xd5\x75\xf5\xde\xf3\x2b\x73\x59\x3d\x41\x43\xaa\x28\x8c\x50\x5a\x56\x6b\xc4\xb2\x6c\xb9\xca\xcb\xc5\x29\xaa\x96\xe8\xf9\x31\x36\xe2\xfd\xae\x84\x52\x40\x6f\xfa\x35\x90\x4d\x77\x87\x84\x91\xfd\xee\x90\x16\xdd\xf3\xa5\x40\xf8\xfc\x18\xbf\x29\xdf\xa2\xbb\x08\x1b\x6a\x94\x2a\xba\xb2\x3c\xff\xa4\xee\xdd\x9b\xb6\xbd\x2a\xc7\x27\xfe\x33\xf1\x31\xba\xab\xa1\x86\x3a\x7c\x7b\xe8\xf6\x00\xb1\xa1\x60\xe9\xb7\xeb\x35\x3f\x4b\xe7\x1c\xe1\x08\xad\x2f\xd2\xf7\xfc\xca\x20\xfe\xf5\x45\xfa\x23\xbf\x5a\x37\x2a\x68\xbf\xdb\x85\xb2\x78\x09\x40\x52\x34\xf5\x97\x07\x08\x47\xcd\x37\xfb\x15\x2b\xc7\x50\x71\x4a\xf1\x63\x16\xe4\xba\xc6\xae\x78\x79\xa3\x90\xbe\x55\x4c\x19\xf1\xba\xaf\x6e\x49\xcb\xea\x25\x54\x45\x39\xd2\x8a\xa0\x34\x78\x6d\x28\xa5\x41\x05\xd4\x68\x50\x64\xd8\xc6\x64\x35\x24\xb0\x5b\x4d\x97\x4e\xb1\x5a\x9e\x41\x80\x99\xf3\xa2\x42\x84\x82\x67\x08\xca\xe6\x86\x52\x38\x6f\x26\x25\x3a\x94\x77\x43\x78\x50\xc0\xb1\x36\xae\xc9\xe4\xf9\x31\x51\x36\xb8\x87\xf6\x1b\x09\xec\xa1\xbf\x20\x42\xdf\x42\x8d\x47\xb0\xad\x12\xfd\x05\xee\xb8\xd8\x9a\xbd\x55\x79\x3a\xdb\x9e\xbf\x00\xca\x77\xb6\x4c\xee\x75\xb8\x24\x14\x1e\x4b\x5e\xd1\x3e\x22\x81\x85\xe1\x3d\x03\xc7\x03\xb2\xa6\xca\xfe\xa2\x03\xe5\x22\xe3\x88\xb3\x6c\xa6\xcc\x0e\x95\x6b\xc4\xce\xcf\xe7\x25\xcf\x85\x2e\xd9\x02\xf1\xcd\x39\x5b\xe4\x3c\xaf\xeb\x32\x42\x78\x9f\x1a\xb1\x09\x11\x28\x34\x19\x5b\xa0\x94\xa3\x74\xb5\x7c\xcf\x17\xa8\x5c\x54\x4b\x44\x65\x51\xe0\x35\x5a\x67\x6c\x2e\xd1\x4b\x94\x6b\x33\xb6\xcb\x59\x99\xcd\x10\x9b\xcf\x97\x97\x6b\x40\x2d\xf0\x56\x4b\x81\xf6\x62\xcd\x73\x74\x59\x56\xb3\xe5\x45\x25\x19\x5c\x97\xcb\xc5\x10\x8b\x12\x34\x94\xd7\x9c\xb4\x5f\x1e\x3c\x50\xd7\xca\xb4\x3f\x89\x80\xe2\x63\x93\xe4\x3a\x96\x8b\xa5\xe5\xc6\x6e\xc3\x55\x68\x21\x88\xb5\x9f\x21\x66\x4d\x4a\xa9\xc4\xaf\x91\xd0\xbe\x6f\x56\x95\xad\x1f\xb1\xde\x8f\xf8\xad\x2a\xec\xf9\xab\xfe\x13\x5c\x0a\x30\xb8\x6a\xc7\x10\x01\x8f\x65\xe1\x4b\x54\x2e\x3e\xf0\xd5\x9a\xdb\xa3\x60\xb9\xf8\xf0\xb2\x17\x08\x3b\x3f\x6d\x35\x40\x60\xc7\x00\xd1\x62\xd3\x25\xb6\x7e\x83\x43\x61\xd0\x7d\xec\x1f\x3b\x13\x0e\xed\x17\xbe\xc8\x56\x57\xe7\xd5\x0e\x57\x01\xaa\x8a\xb5\xcb\xe3\xa6\x5d\x0b\x3c\xed\x86\x7c\x6b\x09\xdd\x9c\x7f\x0e\xaa\xad\x44\x5c\xb5\x7b\x8f\xdd\x94\xa7\xb5\x20\x4d\x49\xc7\x7f\xf0\x4a\xcf\xd3\xba\xcc\xcd\x01\xa9\x76\x35\x56\x5f\x07\x12\x6c\xd5\x07\x83\x9b\xb3\x0c\xd9\xc7\x0f\x8b\xb2\x2a\xd9\x5c\x2f\x7d\xd5\x85\xe1\x9b\x6c\xc6\x16\xa7\xfc\xc9\x8b\xb6\x2c\xaa\xac\x3c\xe6\x6d\xbc\x42\xfe\xaf\x6f\xd2\xe6\x36\xf2\x7e\x6a\x78\x63\x2d\x0a\x6b\x9b\x17\x4f\xf4\x36\x04\xe8\xf8\xea\x6f\xbb\x36\x54\xf2\xe6\x15\x85\xf8\xff\x96\xbc\x41\x9b\x50\xfd\x19\x2b\xd3\xba\xae\x6a\x93\xe5\xc3\xc0\xa3\xe4\x47\xe9\x55\xf0\x79\xfc\xda\x36\xc3\x48\x64\xcc\x27\x00\x9d\xed\xda\x8b\xc6\x30\x74\x3b\xb1\xc0\xae\xba\xb0\x2b\x05\x6b\x64\xf2\x11\x2f\xd7\x15\x9f\x37\x56\x6c\xc6\x58\x40\xe7\xb7\x4b\x2d\xa8\x3b\x40\x17\x62\xa0\x95\xa5\xd6\xde\x94\x6f\xdf\x4c\x26\x8a\xdb\x77\x6d\xb8\x16\x89\x64\xf3\xea\x02\xdf\xa1\xac\xb6\x49\x34\x86\x80\xdd\x73\xa4\x95\x4d\x52\x3d\x4f\x9a\xd7\x6c\x14\xe3\x01\xfc\xaf\x8b\x7c\x89\xd6\x97\xec\x5c\xa6\x1f\x73\xb6\xae\xa4\x31\x0c\x43\x78\xe5\x56\x59\x8f\xd9\xae\xc2\x5c\x8e\x5f\x19\x6c\x18\x2a\x8a\xef\xea\xea\x03\xd7\xb8\x31\x17\xbc\x8e\xab\x5f\x27\xa4\x8c\x84\x2e\xc3\x1b\x59\x85\x96\x17\xd5\x20\x02\x37\x21\xd7\xad\xb2\x4e\xc8\xb5\xeb\xac\x33\x64\xbc\xe7\x57\xb2\x04\x74\x14\x1c\xfa\x44\x7f\x52\x7e\xb0\x3c\xd0\xea\x46\x47\xc6\xaa\xd1\x87\xe8\xa5\xb0\x40\xf5\x12\xb0\x5a\xae\xd7\x6d\x9a\x0e\x35\x0f\x21\x21\x86\xd7\x52\xd9\xa2\x19\xa8\x5a\xc1\x4d\xea\xf1\xea\x8c\xad\xdf\x77\x5c\xb6\xb6\xdd\xc9\xa4\x63\xa2\xc2\x11\xeb\xd1\xf5\x5d\xa7\xeb\xc2\x69\x05\x16\x4d\x04\x1d\x93\x7d\x07\x36\xfb\x95\xd1\xf0\xc5\x33\x91\x51\x49\xcc\x0a\xaa\xf6\xbb\x01\xdb\x2f\x9e\x6c\xcf\xf6\xca\xce\xf6\xdc\xcd\xf6\xdc\xc1\xf6\x6a\x0b\xb6\x9d\x45\xa4\xd7\x75\x15\x69\x39\xfd\xb1\x5d\x1d\xe9\xb1\x22\xcc\x12\x57\xc5\x37\x95\x5e\x8a\xf9\xfb\x87\x2f\x0f\x54\x82\xd6\xa9\xc5\x3c\x45\x59\x71\x6a\x28\xae\x7d\x3e\x67\x82\x89\x4d\x85\xfa\x58\x54\xc2\x35\x69\xe9\x98\x10\x35\x95\x9d\x87\x13\x35\xdd\xa2\xdb\xdf\x3f\x7c\x69\xac\xb8\x7d\xb2\x2a\xcf\xe7\xfc\xee\x6e\x53\x44\xb2\x51\x67\xa2\x48\xff\xe9\x8f\x33\x5d\xa4\x26\x22\x04\xdb\x25\x54\x28\xcd\xfa\xd7\x03\xa9\x2c\x96\xaf\x31\x3a\x12\x70\x07\x52\xaa\x0f\xa5\x8e\x97\xab\x49\x7b\xcf\xba\xba\x38\xbe\x26\x7d\xb0\x9e\x97\x19\x9f\x78\x53\x44\xf6\x06\x77\x61\x34\x68\xc9\x35\xd1\x92\x29\x0a\x1c\x68\xfd\x6b\xa2\x0d\xa6\x28\xda\xb3\x5f\xa4\x71\xed\x77\x0f\xbe\xc6\x07\x7a\x63\xad\x85\x55\x32\x07\xfa\x3b\xc7\x16\x0d\xfc\x2d\x28\xdc\xcc\x3b\x8d\xa0\xb5\x23\x73\x64\xd7\xee\xe3\x2d\x28\x98\x47\x3d\x9c\x90\x1b\x1b\xf6\xfe\x49\xc2\x6a\x13\x5d\x6e\x20\xb8\xb6\xb8\x76\x0c\xb1\xb6\x10\xd7\x0d\xb4\x0d\x94\xb3\x7e\x7e\x03\xd5\x2b\xa1\xaf\x15\x66\xbf\x17\x92\x69\xaf\xaa\xbe\x56\xdc\xfd\x5e\x18\x4c\xdb\xaa\xee\xf7\xc2\x68\xaa\x8a\xbd\xdf\x8b\xf0\xc7\xb7\x53\x1a\x7c\x52\xc1\xfd\xdf\xb3\xd2\xfe\x67\xab\x87\xff\xdf\x53\xd9\x1e\x6e\x2a\x28\x17\x3c\xbf\xd9\x12\xf7\xdf\xb1\x35\x6f\xab\xd6\xb3\x35\xd7\x9e\xbd\xf6\x89\xb3\x02\xfe\xd0\x97\x37\x51\x80\x16\xec\x8c\xaf\xcf\x75\x2f\x3d\xd4\xd9\x10\x20\x82\x0d\xf9\xdf\x7f\x7c\x34\xa1\xf9\x16\x45\x41\x73\x85\x8d\x09\xcd\xeb\x28\x10\x7c\x00\x53\x9b\x28\x38\x50\x5f\x04\xff\x86\xcc\xa0\x45\x2d\xd1\xab\xe9\x94\xf2\xef\x7c\x8d\x18\x5a\xf0\xcb\xf9\x15\x92\xbe\x96\x9b\x08\xeb\x01\x05\x75\x6e\xf3\x58\x5c\x9c\xa5\x7c\xf5\x11\xc1\xad\x52\x70\xab\x8a\xf8\xe0\x13\x48\xe7\x0f\x9c\x4d\xe6\xcb\x4b\x68\x21\xfe\x6b\x6a\xd0\x6d\xdc\x8d\x6e\x43\x80\x5a\x2e\x9b\x56\x2e\x75\x44\xa8\xc5\x53\x0f\xcc\x72\xf5\xcf\x23\x9e\x0f\x6f\x65\x81\x17\x7a\x91\xd7\x9d\xef\xac\x25\x0d\x21\x7e\x51\x76\x32\x2a\xd1\xc3\xa9\xe0\xda\x3c\x86\xa9\xfb\xb5\x0c\xb7\x7a\xc2\x63\xd1\xdb\x23\xd4\xbd\x7d\x5b\x7f\x33\xef\x6b\xea\xbb\xb2\xba\x2c\xd7\x1c\xfd\xf4\xec\x64\x0d\x18\xc6\x14\x53\x5f\x94\xa2\x0c\xe4\x23\xfa\x56\xe8\x57\xc8\xe5\x2e\x08\x46\x8d\x24\xac\xa8\xf8\x0a\x2d\xf8\x29\xab\xca\xc5\xe9\x0d\x08\x1e\x50\x71\x21\x78\xa5\x82\x83\xc5\xb2\x9a\x58\xa5\x7a\x78\x88\x16\xcb\xd1\x4c\x15\xee\x64\x91\x02\xfd\xad\x91\xee\x7d\x23\x98\x14\xec\x6f\xb5\x90\x0d\x29\xa9\x92\x8c\x12\x4c\x6d\x0d\xad\x3a\xef\x77\xb8\xeb\x64\x00\x36\xad\x7c\xfb\xd3\xf7\x9a\x56\x60\x39\x01\xc6\xed\x73\xb6\x86\xe5\x85\xad\x7c\xa8\xd1\x14\xe0\x10\x2e\xd1\x28\xab\x5a\x0a\x12\x35\xde\x1b\x56\xfe\xb7\x3f\x7d\x7f\x33\xaa\x97\x6b\x3b\xad\xe2\xd9\x22\x9f\xb0\xc5\xb2\x9a\xf1\x95\x62\xc4\x65\x06\x6c\x91\xeb\x66\x20\x7a\x38\x62\x0a\xad\x9f\xdd\x96\x02\x19\xb3\x8a\xc6\xf3\x14\xfc\xef\x66\x1f\xcf\x5e\x7c\x6e\xf3\x78\xf6\xe2\x33\x59\xc7\xb3\x17\x37\x63\x1c\xcb\x55\xc7\x36\x96\xab\x1d\x4c\x63\xb9\xba\xb6\x65\xfc\xba\xa3\x65\xfc\xfa\x3b\x5b\xc6\xeb\xcf\x6f\x1a\xaf\x3f\x9b\x6d\xbc\xbe\x29\xe3\xd8\xf4\xac\x63\xb3\x93\x79\x6c\x3e\xc1\x3e\xde\xed\x68\x1f\xef\x7e\x27\xfb\x80\x45\x79\xdd\x32\x16\x72\x66\x54\xbd\x10\xce\x79\x51\x6d\x9f\x95\x2d\xc0\x26\xe4\x37\xb4\x2c\x1a\x4c\x70\x85\xcd\x4d\x19\x03\x20\xbb\x19\x73\x00\x54\x1d\x83\x80\x5f\x9e\x4c\x48\xe8\xb2\x03\x09\xa4\x9b\xc2\xc2\x64\x07\xe2\x15\x68\x81\x1e\x20\x9f\xd8\x56\xba\x34\x4b\x99\xb4\xa6\xf2\xe0\x01\x5a\xc0\x12\x79\x63\x0c\x72\xeb\x10\x41\x77\xd1\xc2\x78\x59\xbd\xd9\x84\x04\x9e\xa1\xad\x7d\x44\xf5\xcb\x93\x9b\x21\x1d\xcd\x64\x81\xee\x1a\x6e\x0c\x1d\x90\xee\x2f\x75\x09\x72\xff\x9d\xd6\x0b\x53\xf9\xff\x76\xe6\xfb\x62\x62\x7f\xb9\xa8\xad\xf7\xc5\x0d\x59\xaf\xd4\x7b\xd7\x52\x35\xe3\xad\xed\x79\x0b\xe3\x1d\x44\x4c\x40\x75\x0d\xfb\xd5\xbc\xa0\xc1\x33\x6e\xc0\x8a\xfc\xef\x6e\xc1\x2f\x96\x15\xab\xf8\xe7\x0e\xc0\x2b\xa0\x72\x53\x26\x0c\xd8\x6e\xc6\x84\x25\x63\xba\x09\xaf\x96\xa3\xf1\x57\x80\x8c\xda\xaf\xea\x11\xd8\x81\x8a\xea\x8b\x3d\x91\x0e\xb6\xbf\xbc\x98\x44\xc1\xc0\x2c\x3f\x55\x61\x37\x14\x73\xfe\x58\x1a\x1b\x09\x39\x02\x62\x77\x85\xbd\x18\x28\xec\xc9\x75\x14\xf6\x6d\x9e\x7f\xee\xcc\x97\xe5\xf9\x67\xca\x7c\xe5\x95\xdf\x37\xf1\xce\x9c\xf7\xde\x99\xf3\x9d\xde\x99\xf3\xad\xdf\x99\xfb\x23\xc2\x7e\x93\xc8\xc2\x86\x51\x73\xf2\x9b\xb1\xd5\xea\x4a\x34\xab\xc7\x10\x79\x31\x7c\x67\x58\x69\xaf\x87\x37\xe3\x18\x26\x52\xfb\x6d\xce\x8d\xf6\x25\x0d\xc5\xc3\xa7\x46\x74\xf9\xcd\xbc\xba\xf2\xed\x42\x5d\x01\xbe\x2c\xf4\xb9\xcd\xb5\xe9\x86\xe3\xd5\xf2\x9c\xaf\xaa\x2b\xf4\x0f\x75\xc5\x30\x00\x82\x79\x35\x28\x06\xd3\x8a\xca\x40\xd6\x07\x26\x3c\x75\x58\x69\xee\x44\xef\x46\x97\x75\x79\xba\x28\x8b\x32\x63\x8b\x0a\xa5\xf0\xbc\x5c\x68\xbe\x01\x44\x1d\xb3\xbf\xed\xbc\x74\xcd\x4c\xfd\xcb\x0d\xcc\x03\x0f\x39\xb0\xbb\x63\x47\x5c\x93\x67\xe7\xc2\x2c\xd9\x7c\xaf\x23\xfb\x51\xc1\x21\x63\x40\x6e\x24\xa7\xa1\xdd\x4a\x88\xbc\xab\xe6\x4f\xf0\xd5\x4b\x5d\xd4\xfd\x5e\x74\xd6\x7c\xbb\x3e\xfb\x89\xc8\xde\x0c\xda\x8b\xbf\x5d\xa7\xb5\xa7\xbb\x62\xc1\x14\x27\x98\xe1\x14\xce\xd4\x64\x38\xc7\x1c\x17\x7b\x03\x24\x6f\xff\x8d\xba\x3a\x45\xd8\xdb\x7a\x79\x00\x8c\x6e\xda\x98\xed\x20\x2c\x5f\xaa\xcd\x13\x10\x16\xeb\x2f\xf2\xbf\xbf\xfe\x6a\x38\x80\x21\xf2\xfe\xc6\x07\xfe\x74\x84\x86\xab\x60\xfa\x9f\x1c\x9b\x6b\xf0\xa3\x86\x8d\xfe\x5e\x40\x6b\xd2\xde\x47\x20\x7d\x68\xce\x17\xa7\xd5\x0c\x7d\x8d\xe8\x96\x5b\xa9\xfb\x81\xe6\x78\xb9\xf8\xc0\x57\xf5\xab\xa1\x16\x86\x55\x7c\x10\x83\x76\x7d\x3a\x60\xab\xc0\x53\x8f\xda\x8d\x76\x3b\x2b\x73\x1f\xd1\x49\x37\x88\xde\x59\xa3\x9c\x55\x0c\xb1\xf5\x8e\x74\xb6\x9e\xc9\xea\xae\x14\x6e\xb4\x00\x7d\x50\x2d\x5f\xfb\xc4\xbe\x14\x02\x8f\x3f\x61\xcf\x8e\xa2\xd5\x35\x2a\xc3\xce\x9d\x1a\xee\x89\x54\x66\xc3\x64\xad\x5e\xd3\x2e\x1e\xa9\x36\x03\x2e\xd9\xdd\xad\x37\xef\x77\x69\xbb\x4f\x7a\xb5\x4b\x78\x75\xab\x37\x83\x2d\xfc\xe2\xaf\xe6\xe1\xe0\xfc\x62\x3d\x9b\xd4\x89\x94\xc8\x11\x4c\xef\x95\x66\xe8\x5e\x2e\x81\x0c\xfb\x64\xeb\x54\x44\x53\x70\x1d\x41\x6a\x9c\xd3\xae\xdb\x58\x37\x92\x0c\xbc\x02\xd0\x08\x93\xcc\x96\xe7\x30\x48\x5a\xc6\x7e\x34\x9a\xb6\x36\x66\xcf\x51\x36\x5f\x2e\x5c\x6f\x2a\xdb\x9a\x34\xe0\xe9\xdb\x32\xfc\x68\xb7\x65\x78\xec\xb4\x65\x1d\x33\x64\x29\x92\xdd\x66\xe7\xab\x69\xa7\xeb\x31\xc0\xff\x19\x0c\xfb\xcf\x52\x32\x43\xa4\x75\x2c\x95\xf8\x86\x61\xb6\xde\x35\x66\x27\x00\x67\x98\xea\x85\x75\x99\x9c\x58\xc8\x34\x2e\x74\xd9\xf1\x9f\x51\x37\xb8\xdc\xc6\x07\x2e\x95\xc9\xd7\xe8\xdf\x94\x6f\x4d\x62\xb7\x9b\x2a\x00\x77\xd6\x97\x9b\xf4\xd8\xba\x6f\xa6\xb7\x5b\x46\x6d\x8d\xf9\xf8\x76\x4a\xc3\x6d\xf6\xbb\x1c\x7e\xfd\x27\x34\xab\xaa\xf3\xf5\xbd\xc3\xc3\xb3\x6a\xb6\x3e\x48\xf9\xe1\x45\x55\xd0\x5f\xd6\xe8\x03\x39\xc0\x07\x04\xa5\x57\xe8\x7f\x9e\xb1\x6a\x56\xb2\xb5\xb0\x98\x76\x83\x0c\xec\x0a\x91\x9b\x3d\x0e\x0f\xd1\xf7\xbc\x92\xc7\xe1\x38\x17\xe2\x2e\x59\x3a\xe7\x6b\xf4\x37\x45\xe9\x6f\xb7\xbe\x82\x6d\xfc\x2b\xce\x1f\x36\xfb\x5f\x06\x3b\x69\xd0\x1d\xa9\xbc\x3b\xe8\xf6\xed\xfa\xe7\xfb\x76\xf4\xe8\x6f\xb2\x3b\x1a\xf2\xa7\xf0\x43\x8b\xfb\x4c\x7d\xef\xa2\x56\xbf\xde\xbe\x6d\xd8\x9f\x73\xd4\x61\xb2\x01\x76\xb2\x71\x0a\x3b\x67\xfe\x36\x95\xbb\xf1\x7f\x5a\xe6\xfc\xe0\x97\x35\x5a\xae\xd0\x77\x72\x2b\x4d\x59\x94\x3c\x47\xd9\x32\xe7\x53\xc0\xc2\x16\x39\xba\x58\x73\x54\x56\x62\x5c\xfb\x9b\x90\xa3\xd6\x07\xb5\x0f\xa7\xe9\xc3\xa9\xfa\xde\xed\x83\xfc\xf5\xbe\xdc\x93\xd4\x36\x3b\x68\xa0\x8f\x74\x64\xbf\xfe\xaa\x7d\x3b\xb8\x2c\x17\xb9\x78\xbb\xec\xc0\xc8\xad\x43\x82\x17\xa4\xff\x0c\x9b\x7d\x6e\x7d\x75\xf8\xf5\xdd\x1b\xfb\xfb\xfa\xf0\x96\xec\xed\xba\x5a\x95\x8b\xd3\x47\xab\xe5\xd9\xf1\x8c\xad\x8e\x97\xb9\xd0\xdc\x4b\xf8\xf1\xa0\xd0\x7e\x55\xc2\x3f\x61\xef\xf9\x42\xca\xb8\x6f\xb2\xe7\x17\x8b\x2b\x21\xdf\x5b\x5f\x35\x11\xec\x22\x5b\x93\x9c\x8b\x1f\x27\x92\x8e\xec\x20\x2c\x6d\xc2\xe6\xfb\x7a\x08\x84\x9f\xb2\xe5\xc5\xa2\xe2\x2b\x35\x73\x09\x3f\xcd\xeb\x58\x21\x9b\xb7\xc1\x02\x9e\xc2\x79\xc6\xfa\x0b\xdf\x54\x2b\x26\xbe\x5c\xce\xca\x39\x47\x93\x1a\xdb\x03\x85\x44\x92\xfe\x0a\xda\xb4\x08\x33\xd5\xbd\x6f\xab\xba\xc1\xfe\xbe\x70\xf5\xaf\x40\xa7\x12\xf8\x9b\x23\xe4\x6d\xbe\xa7\x9e\x27\x74\x2e\x7f\x7a\x00\x3f\x7d\xf7\xe8\x91\xf8\xc9\x42\x49\x88\x0b\x5e\xd7\xd7\x17\xab\xd5\xf2\x94\x55\x7c\x0a\x56\x57\xcd\xf8\x8a\xc3\x39\x4f\xb4\xe0\x9b\x0a\x09\x16\x58\x56\xf1\x15\x34\x82\x6e\x6c\xc3\x1f\x30\x38\x91\xe0\xb7\x91\xb7\x79\x74\xec\x79\x7b\xc2\x42\xbd\xcd\xf7\xf0\xf1\x1f\x22\x38\xcf\x97\x97\x2d\x7d\x68\xf6\x95\x94\xbc\x1c\xca\x27\xaa\x8b\x02\x81\xff\xe8\xd1\x1e\x1c\xcd\xf4\xf6\xd0\x3e\xd2\x30\xc3\x83\xfd\xba\xe2\x90\xa2\xde\x66\xc1\xaa\xab\x17\x8b\x33\x56\x65\x33\x9e\xb7\xf4\xee\xa3\xe5\x62\x7e\x85\xd8\xf9\x39\x87\x7e\x97\x6b\x70\x40\x74\xb1\x28\xab\xa9\x78\xd1\xcc\xd8\x9a\xc3\xdb\xa6\x10\x44\x83\xa9\x81\x11\x42\xaa\xea\x7d\x51\x0d\x56\x31\xd4\x33\xed\xeb\x39\x2b\x57\xc3\x9e\x41\xbf\x14\xaf\x5f\x29\xd1\xdd\xbd\xab\x78\xbf\xd5\xef\x80\xa5\xa5\x00\x14\xff\x57\xf1\x5e\x42\xd5\xde\x78\x1d\x67\xe0\x0b\x70\x06\x18\x85\x5b\x5f\x68\xac\x5c\xe6\x2d\x5d\x23\x2f\x17\x39\xdf\xa0\x23\x74\x17\x1b\xcd\xbe\xf1\xa3\x3b\x77\x34\xe3\xdf\xdf\x97\xcd\x2c\xc6\x0f\x74\xde\x00\xc8\xdb\xbe\xb1\x0b\x53\x7a\x24\x34\x2e\x25\x23\x7f\xbd\x7b\x54\xab\xff\xbe\x26\x2f\xb4\x7f\x64\x88\x1f\x35\xa2\x6f\xbe\x41\xd8\xab\x0d\x08\xfd\xaa\x7c\x48\xa9\xa4\xe6\x44\x1a\x2b\xfa\x15\x75\xec\xb0\x11\xfe\x16\x84\x00\xa1\x4d\x49\x8d\xf0\xb3\x19\xcf\xde\xbf\xcc\xd8\x9c\xad\xfe\xb7\x68\x35\x11\x7a\x78\xbe\x2c\x17\x72\x37\x35\x08\xa0\xf9\xa9\xeb\xf1\xed\xcf\xd2\xeb\x5b\xe1\x54\xb3\xd5\xf2\x12\x3d\x5c\xad\x96\xab\x09\xf4\xea\xce\x13\x91\x0a\xb5\xa6\xf9\xd7\xfd\x3b\x68\xbf\x45\x70\x50\x2d\x65\x64\x9d\xe0\x68\xef\xa0\x5a\xfe\xf5\xfc\x9c\xaf\x8e\xd9\x9a\x4f\xf6\xd0\xbe\x44\x20\x4c\x7e\xb1\xac\x84\x81\x03\xb3\x52\x2e\x77\xc4\xc3\xba\xa3\x1f\x3f\xc3\x48\xd0\xca\x09\xb2\x6a\x91\x89\xb7\xe2\x98\xca\x65\x36\x35\x38\x49\x29\x1b\xb4\x31\xd1\x05\xf8\x4d\xdd\x46\x6a\x14\xa6\x2a\x37\xd4\xdb\xeb\xeb\x45\x3a\xc4\x71\xdd\xd0\xa4\x16\x0d\xed\x6d\x65\x9c\x8f\x1e\x51\x15\xeb\x54\x98\xc3\x77\xd3\xab\x8a\xa3\x35\xff\xaf\x0b\xbe\xc8\x20\xd0\xd9\x19\x6d\x69\xd4\xa6\x03\x03\xe1\xd5\x59\xba\x9c\x37\x8e\x64\xa3\x4c\xbd\x2e\x65\x32\xa4\xdc\x60\x1a\x17\x52\x24\x05\x84\x95\x80\x8e\xbd\x86\xa5\x66\xe3\xb1\x81\x09\x08\xc3\x3a\x13\xfe\x90\x09\x87\xc1\xdf\xdf\x91\x49\x4c\x24\x97\x9e\xe2\xf2\xa1\xd7\x41\xb1\x7f\x64\xb1\x9a\x68\x8b\xce\x3c\xf4\x06\x9d\x09\x3e\x49\xa2\x98\x2a\x66\x63\xc9\xec\xa3\x2d\x99\xc5\x64\xd7\x4e\xb5\x90\x26\xae\xba\x1d\xed\x7a\x40\x63\x9b\x80\xa1\xef\x12\x22\xf5\x57\xe3\x44\x3f\x69\x6a\x90\x8a\xd4\x7d\x98\x5c\x0d\xb2\xa6\x16\x7e\x74\x50\x69\x40\xeb\x1f\x84\x12\x64\xb4\xda\x72\x70\x69\x7b\xac\x13\xd6\x47\x19\x0d\xe5\xfe\x91\xc3\xf5\x7b\x11\xbd\x6d\xf6\xb9\x12\xe1\x46\xf6\x2b\xce\xf2\xe3\xe5\xa2\x2a\x17\x17\x70\x78\x16\xb4\xdf\x86\x22\xc1\xc9\x0f\xd0\xf7\x6f\x8e\x80\xad\x63\x91\x58\x18\x46\x83\x3b\x3f\x2c\x3e\xb0\x79\x99\x03\x90\x94\xf6\x1d\xd5\xad\x46\xde\x5d\x2a\x48\x22\x84\x89\x82\x37\x0d\x9d\xb7\xca\x4d\x44\xd3\xe6\xc7\xfd\x7d\x91\x8c\xd7\x11\xaa\x87\xe6\xb6\x0c\x23\x32\x11\x14\x51\xf2\x1f\x5a\x30\x34\x42\xfb\x8f\x1a\xc6\x0e\x0f\xd1\x0f\x05\xba\xe4\x48\xe4\x6b\x17\xe7\x48\x64\xaa\x53\x54\x56\xff\xef\xff\xfc\xdf\x7a\x58\xd2\x51\x00\xc7\xb7\x2c\x3d\x1f\x00\xde\x19\x04\x7f\x69\xbd\x2f\xc1\x0b\x26\xad\x95\x0b\x60\xac\x9b\x21\xd1\xbf\xf8\xfa\x97\xc0\x60\xbe\x43\x5d\x7d\x82\xaa\xba\x98\x8e\x86\x5a\x57\x92\x2d\xd8\x1c\x0e\x3f\x34\x72\x7c\xc1\x59\x8e\x8a\x72\xb5\xae\x6a\x29\x41\xb7\x76\x57\xf3\x70\x74\x43\x93\xc5\x72\x28\xde\xf5\x5e\x6d\x13\x92\xd0\x6d\xa5\x7f\x15\x59\x35\x5e\x1b\xf9\xd6\xbc\x0e\xc7\xb0\x1e\x9e\x87\xb5\x41\x1d\xd7\xa8\x40\x2d\xe8\xc8\xe2\x30\xf7\xfb\xf1\x40\x47\x86\xe5\x6b\x06\xd4\xdc\x69\xb4\x6b\x4a\xc0\x1a\xeb\x6d\xcd\x57\x8b\x51\xdd\x04\x7e\x07\x13\xac\xd3\x7a\xd9\x77\xbf\x2f\xdb\x33\x76\x85\xca\x45\x36\xbf\x80\x97\x10\xf1\x72\xa1\xbf\xd2\x98\xa4\xfc\xa8\x96\xce\xc3\x1d\xa4\x03\xa6\x7c\x3d\x01\x7a\xea\x3d\x8d\xc0\xde\x24\x49\x4b\x17\xa8\x6f\x13\xa8\x07\xc9\x8b\x14\xd8\x58\x7e\xf0\x39\x65\x3e\x1c\xe1\xfb\x12\xa5\x4a\xa2\x8f\x6e\x56\xa2\x10\x32\xae\x29\xf4\x18\x84\xee\x6d\xfa\x62\xf7\x36\xde\xf1\x1e\xfa\x15\x24\x32\x91\x3c\xc8\x5f\x1b\x7d\x04\x56\x7d\xc0\x1b\x95\xe1\x1d\x03\x7b\xfa\x2b\x98\x59\x13\xb5\x3c\x8d\x5a\xf8\xeb\xc9\xa3\xbb\x14\xe5\x30\x53\xc6\xf3\x26\xf2\xd6\x61\x53\x9d\xc0\x6a\xbe\x43\x40\xd3\xbe\x43\xfc\xb9\xdf\xcb\x49\x54\xae\xd1\x8e\xc6\x92\xbf\x06\x5f\x37\x25\xd1\xc0\xea\xa8\x06\x54\xf4\x00\xa8\x25\x25\x5a\x8c\x6d\x67\x7f\x3a\xe9\x4e\x3b\x4f\x54\x9d\x9d\x6b\xd9\xc8\xa4\x3a\x3b\x47\x47\xbd\xb1\x64\x0f\xfd\xe9\xe8\x48\x06\xe5\x7e\x76\xa2\x16\x31\xaa\xb3\xf3\x7e\x9e\xa1\xbd\xa0\xb7\xd0\x7b\x9f\x73\xf2\x4d\x88\x15\x1d\x01\x83\x77\x3e\xf0\xd5\xba\x5c\x2e\xee\xdc\x43\x77\x60\xd2\xf7\xce\x54\xfc\x2a\xf9\xb9\x73\x4f\xcb\x0a\xe1\x77\xd9\x5d\xf5\xbb\xfc\x72\xeb\xab\x8f\x6a\x92\xee\xe5\xf2\x8c\xa3\x6f\x9f\x7e\x8f\xd2\x8b\x72\x9e\xa3\xe5\x79\x55\x9e\x95\x7f\xe7\xab\xf5\x14\xcd\xcb\xf7\x1c\xad\x0e\x7e\x59\x4f\xe5\x2b\x31\xcc\xb4\xaf\xcf\x79\x56\x16\x65\x26\x9c\x37\x2f\x41\xe1\xe7\xac\xaa\xf8\x6a\xb1\x06\x7c\xd0\xa8\x9a\x71\x54\x2c\xe7\xf3\xe5\x65\xb9\x38\xbd\x27\xe7\x3c\x85\xf9\xf5\xce\x45\xa2\x3b\xb5\xd1\xdc\x91\x93\xbb\x1d\x80\x03\x76\x96\xf7\x66\x51\x9b\x23\x92\xe2\xd9\xad\xaf\xa4\xba\xd4\xa1\xc9\x66\x9a\xbb\x3b\x80\x89\x3e\x83\xee\x40\x39\xed\xdb\x45\x6f\xd6\xf8\x4f\xda\xf7\x83\xc5\x32\xe7\x27\x57\xe7\xbc\x4d\xe6\xda\xb9\x6a\xf5\xe2\x51\x2e\xf4\x79\xe3\x17\xe5\xe2\x74\xf9\xbf\x5e\xa2\x0f\xde\x01\x3d\xf0\xe0\xf5\xbc\x6d\xa1\x9d\x25\x6d\x98\x51\xa1\xb1\xc6\xc4\x56\x97\x33\x36\xef\x61\x8a\x0f\xbc\xbb\x72\x22\x66\x55\xef\x8d\x92\xa7\x18\xd5\x6f\x33\xb6\x7e\x76\xb9\x78\x5e\x6f\x81\x39\x52\x40\x07\xdd\xdf\x01\xbc\x59\x22\x81\xaa\x71\x52\x28\x75\xc4\xe8\x82\xcb\xf5\x21\xf1\x1c\x0e\x12\xef\x09\xd9\xe8\xb2\x7a\xf3\x5e\x16\x30\x14\x10\xf0\xb9\x33\xf9\xd5\xeb\xd7\x8b\x59\xb9\x58\x8a\x5e\x31\x74\xc9\x53\xa4\x0e\xaa\xaa\x59\xeb\x03\x65\xd0\x4a\x26\x1f\x6f\xa9\x23\xaa\xb0\x6c\xf2\x71\xfa\x8f\x8f\x6f\xa7\x34\xda\x66\x49\x64\x70\x62\xf7\xf5\xd3\x27\x8f\xab\xea\xfc\x85\x18\x32\xd6\x55\x83\xed\xcf\x69\x79\x2a\x37\xb3\x1c\xfc\xb2\xfe\xf3\x36\x98\xef\x5c\xac\x39\xbc\xb0\x65\xd5\x9d\xfb\xb7\x86\x84\xbe\x2b\x4f\x7f\x02\x84\xf7\x45\x87\x7f\x59\xcf\x44\x50\x2e\x4f\x17\xcb\x15\xbf\x37\x2f\x17\xfc\x56\x43\xfa\x92\xa7\xfe\x56\x24\x85\x92\x5e\xf1\x54\x8e\x4d\xf2\x98\xf1\x9d\x83\xc3\x79\x99\x1e\x0a\x14\x22\x38\xdf\x3a\x3c\x44\xf9\x72\x71\xa7\x42\xcb\x0f\x7c\xb5\x2a\x73\x5e\xaf\x38\xd4\x0b\x1c\xb7\xb4\x33\xc8\x6a\xe9\x40\x44\xb8\x3b\xcd\x8e\x06\x58\x90\xe8\x00\x1c\x48\x9a\x5d\x28\x61\x21\xb0\x4e\xa6\x83\x00\x77\xf7\x6f\x7d\x34\x88\x43\x3e\x51\x2b\x5b\x35\xcb\x7f\xbe\x47\xc8\xc7\xb7\x42\x0c\xd3\x37\x52\x0c\x6f\xf7\x6e\xdd\xfa\xff\x01\x00\x00\xff\xff\x02\x09\x77\x52\x22\x24\x06\x00") func web3JsBytes() ([]byte, error) { return bindataRead( @@ -106,7 +106,7 @@ func web3Js() (*asset, error) { } info := bindataFileInfo{name: "web3.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4c, 0xb2, 0xb5, 0x69, 0x9e, 0x5e, 0xe3, 0x56, 0x47, 0x6, 0x96, 0x4a, 0x7a, 0xcd, 0xdf, 0x79, 0xc5, 0xf2, 0x71, 0x64, 0x77, 0x8a, 0x53, 0x58, 0x6, 0xb1, 0xff, 0x4f, 0x0, 0xd6, 0x7f, 0x61}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x12, 0x22, 0x81, 0x1, 0xe2, 0x72, 0xd3, 0xd5, 0x4d, 0x2d, 0x30, 0xa5, 0x3, 0x90, 0x3a, 0xf8, 0x17, 0x2d, 0xe3, 0x5, 0x44, 0x21, 0x63, 0xba, 0x1a, 0x37, 0x3f, 0x3f, 0xa5, 0x30, 0x5e, 0x6f}} return a, nil } diff --git a/internal/jsre/deps/web3.js b/internal/jsre/deps/web3.js index cc10e20cd..dfd8f9d93 100644 --- a/internal/jsre/deps/web3.js +++ b/internal/jsre/deps/web3.js @@ -3949,10 +3949,18 @@ var outputSyncingFormatter = function(result) { result.startingBlock = utils.toDecimal(result.startingBlock); result.currentBlock = utils.toDecimal(result.currentBlock); result.highestBlock = utils.toDecimal(result.highestBlock); - if (result.knownStates) { - result.knownStates = utils.toDecimal(result.knownStates); - result.pulledStates = utils.toDecimal(result.pulledStates); - } + result.syncedAccounts = utils.toDecimal(result.syncedAccounts); + result.syncedAccountBytes = utils.toDecimal(result.syncedAccountBytes); + result.syncedBytecodes = utils.toDecimal(result.syncedBytecodes); + result.syncedBytecodeBytes = utils.toDecimal(result.syncedBytecodeBytes); + result.syncedStorage = utils.toDecimal(result.syncedStorage); + result.syncedStorageBytes = utils.toDecimal(result.syncedStorageBytes); + result.healedTrienodes = utils.toDecimal(result.healedTrienodes); + result.healedTrienodeBytes = utils.toDecimal(result.healedTrienodeBytes); + result.healedBytecodes = utils.toDecimal(result.healedBytecodes); + result.healedBytecodeBytes = utils.toDecimal(result.healedBytecodeBytes); + result.healingTrienodes = utils.toDecimal(result.healingTrienodes); + result.healingBytecode = utils.toDecimal(result.healingBytecode); return result; }; diff --git a/les/client_handler.go b/les/client_handler.go index ee4dd9407..d90f9a2fe 100644 --- a/les/client_handler.go +++ b/les/client_handler.go @@ -74,7 +74,7 @@ func newClientHandler(ulcServers []string, ulcFraction int, checkpoint *params.T height = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1 } handler.fetcher = newLightFetcher(backend.blockchain, backend.engine, backend.peers, handler.ulc, backend.chainDb, backend.reqDist, handler.synchronise) - handler.downloader = downloader.New(height, backend.chainDb, nil, backend.eventMux, nil, backend.blockchain, handler.removePeer) + handler.downloader = downloader.New(height, backend.chainDb, backend.eventMux, nil, backend.blockchain, handler.removePeer) handler.backend.peers.subscribe((*downloaderPeerNotify)(handler)) return handler } diff --git a/les/downloader/downloader.go b/les/downloader/downloader.go index d02d91b2a..c4c0d26a8 100644 --- a/les/downloader/downloader.go +++ b/les/downloader/downloader.go @@ -40,7 +40,6 @@ import ( "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/metrics" "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/trie" ) var ( @@ -97,8 +96,7 @@ type Downloader struct { queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed - stateDB ethdb.Database // Database to state sync into (and deduplicate via) - stateBloom *trie.SyncBloom // Bloom filter for fast trie node and contract code existence checks + stateDB ethdb.Database // Database to state sync into (and deduplicate via) // Statistics syncStatsChainOrigin uint64 // Origin block number where syncing started at @@ -207,13 +205,12 @@ type BlockChain interface { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { +func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { if lightchain == nil { lightchain = chain } dl := &Downloader{ stateDB: stateDb, - stateBloom: stateBloom, mux: mux, checkpoint: checkpoint, queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), @@ -231,9 +228,9 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, stateCh: make(chan dataPack), SnapSyncer: snap.NewSyncer(stateDb), stateSyncStart: make(chan *stateSync), - syncStatsState: stateSyncStats{ - processed: rawdb.ReadFastTrieProgress(stateDb), - }, + //syncStatsState: stateSyncStats{ + // processed: rawdb.ReadFastTrieProgress(stateDb), + //}, trackStateReq: make(chan *stateReq), } go dl.stateFetcher() @@ -268,8 +265,8 @@ func (d *Downloader) Progress() ethereum.SyncProgress { StartingBlock: d.syncStatsChainOrigin, CurrentBlock: current, HighestBlock: d.syncStatsChainHeight, - PulledStates: d.syncStatsState.processed, - KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, + //PulledStates: d.syncStatsState.processed, + //KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, } } @@ -367,12 +364,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { log.Info("Block synchronisation started") } - // If we are already full syncing, but have a fast-sync bloom filter laying - // around, make sure it doesn't use memory any more. This is a special case - // when the user attempts to fast sync a new empty network. - if mode == FullSync && d.stateBloom != nil { - d.stateBloom.Close() - } // If snap sync was requested, create the snap scheduler and switch to fast // sync mode. Long term we could drop fast sync or merge the two together, // but until snap becomes prevalent, we should support both. TODO(karalabe). @@ -628,9 +619,6 @@ func (d *Downloader) Terminate() { default: close(d.quitCh) } - if d.stateBloom != nil { - d.stateBloom.Close() - } d.quitLock.Unlock() // Cancel any pending download requests @@ -705,9 +693,11 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty // calculateRequestSpan calculates what headers to request from a peer when trying to determine the // common ancestor. // It returns parameters to be used for peer.RequestHeadersByNumber: -// from - starting block number -// count - number of headers to request -// skip - number of headers to skip +// +// from - starting block number +// count - number of headers to request +// skip - number of headers to skip +// // and also returns 'max', the last block which is expected to be returned by the remote peers, // given the (from,count,skip) func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { @@ -1322,22 +1312,22 @@ func (d *Downloader) fetchReceipts(from uint64) error { // various callbacks to handle the slight differences between processing them. // // The instrumentation parameters: -// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) -// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) -// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) -// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) -// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) -// - pending: task callback for the number of requests still needing download (detect completion/non-completability) -// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) -// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) -// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) -// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) -// - fetch: network callback to actually send a particular download request to a physical remote peer -// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) -// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) -// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks -// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) -// - kind: textual label of the type being downloaded to display in log messages +// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) +// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) +// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) +// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) +// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) +// - pending: task callback for the number of requests still needing download (detect completion/non-completability) +// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) +// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) +// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) +// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) +// - fetch: network callback to actually send a particular download request to a physical remote peer +// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) +// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) +// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks +// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) +// - kind: textual label of the type being downloaded to display in log messages func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool), fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, @@ -1930,15 +1920,6 @@ func (d *Downloader) commitPivotBlock(result *fetchResult) error { return err } atomic.StoreInt32(&d.committed, 1) - - // If we had a bloom filter for the state sync, deallocate it now. Note, we only - // deallocate internally, but keep the empty wrapper. This ensures that if we do - // a rollback after committing the pivot and restarting fast sync, we don't end - // up using a nil bloom. Empty bloom is fine, it just returns that it does not - // have the info we need, so reach down to the database instead. - if d.stateBloom != nil { - d.stateBloom.Close() - } return nil } diff --git a/les/downloader/downloader_test.go b/les/downloader/downloader_test.go index e96373548..3e66934f9 100644 --- a/les/downloader/downloader_test.go +++ b/les/downloader/downloader_test.go @@ -89,7 +89,7 @@ func newTester() *downloadTester { tester.stateDb = rawdb.NewMemoryDatabase() tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) - tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer) + tester.downloader = New(0, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer) return tester } @@ -1207,8 +1207,8 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync t.Helper() p := d.Progress() - p.KnownStates, p.PulledStates = 0, 0 - want.KnownStates, want.PulledStates = 0, 0 + //p.KnownStates, p.PulledStates = 0, 0 + //want.KnownStates, want.PulledStates = 0, 0 if p != want { t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) } diff --git a/les/downloader/statesync.go b/les/downloader/statesync.go index e4db59a4e..639944ea7 100644 --- a/les/downloader/statesync.go +++ b/les/downloader/statesync.go @@ -21,15 +21,13 @@ import ( "sync" "time" - "golang.org/x/crypto/sha3" - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/state" "github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/trie" + "golang.org/x/crypto/sha3" ) // stateReq represents a batch of state fetch requests grouped together into @@ -299,7 +297,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync { return &stateSync{ d: d, root: root, - sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil), + sched: state.NewStateSync(root, d.stateDB, nil), keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), trieTasks: make(map[common.Hash]*trieTask), codeTasks: make(map[common.Hash]*codeTask), @@ -611,6 +609,6 @@ func (s *stateSync) updateStats(written, duplicate, unexpected int, duration tim log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected) } if written > 0 { - rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed) + //rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed) } } diff --git a/les/fetcher/block_fetcher_test.go b/les/fetcher/block_fetcher_test.go index 6553df3c2..cc19951a9 100644 --- a/les/fetcher/block_fetcher_test.go +++ b/les/fetcher/block_fetcher_test.go @@ -60,8 +60,8 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common block.AddTx(tx) } // If the block number is a multiple of 5, add a bonus uncle to the block - if i%5 == 0 { - block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))}) + if i > 0 && i%5 == 0 { + block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i - 1))}) } }) hashes := make([]common.Hash, n+1) diff --git a/light/lightchain.go b/light/lightchain.go index f14b1679c..fb9b17cc5 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -382,6 +382,9 @@ func (lc *LightChain) postChainEvents(events []interface{}) { // In the case of a light chain, InsertHeaderChain also creates and posts light // chain events when necessary. func (lc *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { + if len(chain) == 0 { + return 0, nil + } if atomic.LoadInt32(&lc.disableCheckFreq) == 1 { checkFreq = 0 } diff --git a/mobile/ethereum.go b/mobile/ethereum.go index f5c154337..1385db29f 100644 --- a/mobile/ethereum.go +++ b/mobile/ethereum.go @@ -78,11 +78,21 @@ type SyncProgress struct { progress ethereum.SyncProgress } -func (p *SyncProgress) GetStartingBlock() int64 { return int64(p.progress.StartingBlock) } -func (p *SyncProgress) GetCurrentBlock() int64 { return int64(p.progress.CurrentBlock) } -func (p *SyncProgress) GetHighestBlock() int64 { return int64(p.progress.HighestBlock) } -func (p *SyncProgress) GetPulledStates() int64 { return int64(p.progress.PulledStates) } -func (p *SyncProgress) GetKnownStates() int64 { return int64(p.progress.KnownStates) } +func (p *SyncProgress) GetStartingBlock() int64 { return int64(p.progress.StartingBlock) } +func (p *SyncProgress) GetCurrentBlock() int64 { return int64(p.progress.CurrentBlock) } +func (p *SyncProgress) GetHighestBlock() int64 { return int64(p.progress.HighestBlock) } +func (p *SyncProgress) GetSyncedAccounts() int64 { return int64(p.progress.SyncedAccounts) } +func (p *SyncProgress) GetSyncedAccountBytes() int64 { return int64(p.progress.SyncedAccountBytes) } +func (p *SyncProgress) GetSyncedBytecodes() int64 { return int64(p.progress.SyncedBytecodes) } +func (p *SyncProgress) GetSyncedBytecodeBytes() int64 { return int64(p.progress.SyncedBytecodeBytes) } +func (p *SyncProgress) GetSyncedStorage() int64 { return int64(p.progress.SyncedStorage) } +func (p *SyncProgress) GetSyncedStorageBytes() int64 { return int64(p.progress.SyncedStorageBytes) } +func (p *SyncProgress) GetHealedTrienodes() int64 { return int64(p.progress.HealedTrienodes) } +func (p *SyncProgress) GetHealedTrienodeBytes() int64 { return int64(p.progress.HealedTrienodeBytes) } +func (p *SyncProgress) GetHealedBytecodes() int64 { return int64(p.progress.HealedBytecodes) } +func (p *SyncProgress) GetHealedBytecodeBytes() int64 { return int64(p.progress.HealedBytecodeBytes) } +func (p *SyncProgress) GetHealingTrienodes() int64 { return int64(p.progress.HealingTrienodes) } +func (p *SyncProgress) GetHealingBytecode() int64 { return int64(p.progress.HealingBytecode) } // Topics is a set of topic lists to filter events with. type Topics struct{ topics [][]common.Hash } diff --git a/p2p/peer.go b/p2p/peer.go index 1c3484fa5..c7a1390ca 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -121,10 +121,18 @@ type Peer struct { // NewPeer returns a peer for testing purposes. func NewPeer(id enode.ID, name string, caps []Cap) *Peer { + // Generate a fake set of local protocols to match as running caps. Almost + // no fields needs to be meaningful here as we're only using it to cross- + // check with the "remote" caps array. + protos := make([]Protocol, len(caps)) + for i, cap := range caps { + protos[i].Name = cap.Name + protos[i].Version = cap.Version + } pipe, _ := net.Pipe() node := enode.SignNull(new(enr.Record), id) conn := &conn{fd: pipe, transport: nil, node: node, caps: caps, name: name} - peer := newPeer(log.Root(), conn, nil) + peer := newPeer(log.Root(), conn, protos) close(peer.closed) // ensures Disconnect doesn't block return peer } diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go index 3fba704c7..55c1d063f 100644 --- a/tests/fuzzers/stacktrie/trie_fuzzer.go +++ b/tests/fuzzers/stacktrie/trie_fuzzer.go @@ -67,6 +67,8 @@ func (s *spongeDb) Has(key []byte) (bool, error) { panic("implement func (s *spongeDb) Get(key []byte) ([]byte, error) { return nil, errors.New("no such elem") } func (s *spongeDb) Delete(key []byte) error { panic("implement me") } func (s *spongeDb) NewBatch() ethdb.Batch { return &spongeBatch{s} } +func (s *spongeDb) NewBatchWithSize(size int) ethdb.Batch { return &spongeBatch{s} } +func (s *spongeDb) NewSnapshot() (ethdb.Snapshot, error) { panic("implement me") } func (s *spongeDb) Stat(property string) (string, error) { panic("implement me") } func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") } func (s *spongeDb) Close() error { return nil } diff --git a/trie/iterator_test.go b/trie/iterator_test.go index 46920e954..7ba5133b9 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -470,10 +470,18 @@ func (l *loggingDb) NewBatch() ethdb.Batch { return l.backend.NewBatch() } +func (l *loggingDb) NewBatchWithSize(size int) ethdb.Batch { + return l.backend.NewBatchWithSize(size) +} + func (l *loggingDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - fmt.Printf("NewIterator\n") return l.backend.NewIterator(prefix, start) } + +func (l *loggingDb) NewSnapshot() (ethdb.Snapshot, error) { + return l.backend.NewSnapshot() +} + func (l *loggingDb) Stat(property string) (string, error) { return l.backend.Stat(property) } diff --git a/trie/sync.go b/trie/sync.go index 3eaea7e57..3e7ac79b9 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -129,11 +129,10 @@ type Sync struct { codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash queue *prque.Prque // Priority queue with the pending requests fetches map[int]int // Number of active fetches per trie node depth - bloom *SyncBloom // Bloom filter for fast state existence checks } // NewSync creates a new trie data download scheduler. -func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom) *Sync { +func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback) *Sync { ts := &Sync{ database: database, membatch: newSyncMemBatch(), @@ -141,7 +140,6 @@ func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallb codeReqs: make(map[common.Hash]*request), queue: prque.New(nil), fetches: make(map[int]int), - bloom: bloom, } ts.AddSubTrie(root, nil, common.Hash{}, callback) return ts @@ -156,16 +154,11 @@ func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, cal if s.membatch.hasNode(root) { return } - if s.bloom == nil || s.bloom.Contains(root[:]) { - // Bloom filter says this might be a duplicate, double check. - // If database says yes, then at least the trie node is present - // and we hold the assumption that it's NOT legacy contract code. - blob := rawdb.ReadTrieNode(s.database, root) - if len(blob) > 0 { - return - } - // False positive, bump fault meter - bloomFaultMeter.Mark(1) + // If database says this is a duplicate, then at least the trie node is + // present, and we hold the assumption that it's NOT legacy contract code. + blob := rawdb.ReadTrieNode(s.database, root) + if len(blob) > 0 { + return } // Assemble the new sub-trie sync request req := &request{ @@ -196,18 +189,13 @@ func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) { if s.membatch.hasCode(hash) { return } - if s.bloom == nil || s.bloom.Contains(hash[:]) { - // Bloom filter says this might be a duplicate, double check. - // If database says yes, the blob is present for sure. - // Note we only check the existence with new code scheme, fast - // sync is expected to run with a fresh new node. Even there - // exists the code with legacy format, fetch and store with - // new scheme anyway. - if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 { - return - } - // False positive, bump fault meter - bloomFaultMeter.Mark(1) + // If database says duplicate, the blob is present for sure. + // Note we only check the existence with new code scheme, fast + // sync is expected to run with a fresh new node. Even there + // exists the code with legacy format, fetch and store with + // new scheme anyway. + if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 { + return } // Assemble the new sub-trie sync request req := &request{ @@ -314,15 +302,9 @@ func (s *Sync) Commit(dbw ethdb.Batch) error { // Dump the membatch into a database dbw for key, value := range s.membatch.nodes { rawdb.WriteTrieNode(dbw, key, value) - if s.bloom != nil { - s.bloom.Add(key[:]) - } } for key, value := range s.membatch.codes { rawdb.WriteCode(dbw, key, value) - if s.bloom != nil { - s.bloom.Add(key[:]) - } } // Drop the membatch data and return s.membatch = newSyncMemBatch() @@ -418,15 +400,10 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { if s.membatch.hasNode(hash) { continue } - if s.bloom == nil || s.bloom.Contains(node) { - // Bloom filter says this might be a duplicate, double check. - // If database says yes, then at least the trie node is present - // and we hold the assumption that it's NOT legacy contract code. - if blob := rawdb.ReadTrieNode(s.database, hash); len(blob) > 0 { - continue - } - // False positive, bump fault meter - bloomFaultMeter.Mark(1) + // If database says duplicate, then at least the trie node is present + // and we hold the assumption that it's NOT legacy contract code. + if blob := rawdb.ReadTrieNode(s.database, hash); len(blob) > 0 { + continue } // Locally unknown node, schedule for retrieval requests = append(requests, &request{ diff --git a/trie/sync_bloom.go b/trie/sync_bloom.go deleted file mode 100644 index eee37ad93..000000000 --- a/trie/sync_bloom.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "encoding/binary" - "fmt" - "sync" - "sync/atomic" - "time" - - bloomfilter "github.com/holiman/bloomfilter/v2" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/metrics" -) - -var ( - bloomAddMeter = metrics.NewRegisteredMeter("trie/bloom/add", nil) - bloomLoadMeter = metrics.NewRegisteredMeter("trie/bloom/load", nil) - bloomTestMeter = metrics.NewRegisteredMeter("trie/bloom/test", nil) - bloomMissMeter = metrics.NewRegisteredMeter("trie/bloom/miss", nil) - bloomFaultMeter = metrics.NewRegisteredMeter("trie/bloom/fault", nil) - bloomErrorGauge = metrics.NewRegisteredGauge("trie/bloom/error", nil) -) - -// SyncBloom is a bloom filter used during fast sync to quickly decide if a trie -// node or contract code already exists on disk or not. It self populates from the -// provided disk database on creation in a background thread and will only start -// returning live results once that's finished. -type SyncBloom struct { - bloom *bloomfilter.Filter - inited uint32 - closer sync.Once - closed uint32 - pend sync.WaitGroup - closeCh chan struct{} -} - -// NewSyncBloom creates a new bloom filter of the given size (in megabytes) and -// initializes it from the database. The bloom is hard coded to use 3 filters. -func NewSyncBloom(memory uint64, database ethdb.Iteratee) *SyncBloom { - // Create the bloom filter to track known trie nodes - bloom, err := bloomfilter.New(memory*1024*1024*8, 4) - if err != nil { - panic(fmt.Sprintf("failed to create bloom: %v", err)) - } - log.Info("Allocated fast sync bloom", "size", common.StorageSize(memory*1024*1024)) - - // Assemble the fast sync bloom and init it from previous sessions - b := &SyncBloom{ - bloom: bloom, - closeCh: make(chan struct{}), - } - b.pend.Add(2) - go func() { - defer b.pend.Done() - b.init(database) - }() - go func() { - defer b.pend.Done() - b.meter() - }() - return b -} - -// init iterates over the database, pushing every trie hash into the bloom filter. -func (b *SyncBloom) init(database ethdb.Iteratee) { - // Iterate over the database, but restart every now and again to avoid holding - // a persistent snapshot since fast sync can push a ton of data concurrently, - // bloating the disk. - // - // Note, this is fine, because everything inserted into leveldb by fast sync is - // also pushed into the bloom directly, so we're not missing anything when the - // iterator is swapped out for a new one. - it := database.NewIterator(nil, nil) - - var ( - start = time.Now() - swap = time.Now() - ) - for it.Next() && atomic.LoadUint32(&b.closed) == 0 { - // If the database entry is a trie node, add it to the bloom - key := it.Key() - if len(key) == common.HashLength { - b.bloom.AddHash(binary.BigEndian.Uint64(key)) - bloomLoadMeter.Mark(1) - } else if ok, hash := rawdb.IsCodeKey(key); ok { - // If the database entry is a contract code, add it to the bloom - b.bloom.AddHash(binary.BigEndian.Uint64(hash)) - bloomLoadMeter.Mark(1) - } - // If enough time elapsed since the last iterator swap, restart - if time.Since(swap) > 8*time.Second { - key := common.CopyBytes(it.Key()) - - it.Release() - it = database.NewIterator(nil, key) - - log.Info("Initializing state bloom", "items", b.bloom.N(), "errorrate", b.bloom.FalsePosititveProbability(), "elapsed", common.PrettyDuration(time.Since(start))) - swap = time.Now() - } - } - it.Release() - - // Mark the bloom filter inited and return - log.Info("Initialized state bloom", "items", b.bloom.N(), "errorrate", b.bloom.FalsePosititveProbability(), "elapsed", common.PrettyDuration(time.Since(start))) - atomic.StoreUint32(&b.inited, 1) -} - -// meter periodically recalculates the false positive error rate of the bloom -// filter and reports it in a metric. -func (b *SyncBloom) meter() { - // check every second - tick := time.NewTicker(1 * time.Second) - defer tick.Stop() - - for { - select { - case <-tick.C: - // Report the current error ration. No floats, lame, scale it up. - bloomErrorGauge.Update(int64(b.bloom.FalsePosititveProbability() * 100000)) - case <-b.closeCh: - return - } - } -} - -// Close terminates any background initializer still running and releases all the -// memory allocated for the bloom. -func (b *SyncBloom) Close() error { - b.closer.Do(func() { - // Ensure the initializer is stopped - atomic.StoreUint32(&b.closed, 1) - close(b.closeCh) - b.pend.Wait() - - // Wipe the bloom, but mark it "uninited" just in case someone attempts an access - log.Info("Deallocated state bloom", "items", b.bloom.N(), "errorrate", b.bloom.FalsePosititveProbability()) - - atomic.StoreUint32(&b.inited, 0) - b.bloom = nil - }) - return nil -} - -// Add inserts a new trie node hash into the bloom filter. -func (b *SyncBloom) Add(hash []byte) { - if atomic.LoadUint32(&b.closed) == 1 { - return - } - b.bloom.AddHash(binary.BigEndian.Uint64(hash)) - bloomAddMeter.Mark(1) -} - -// Contains tests if the bloom filter contains the given hash: -// - false: the bloom definitely does not contain hash -// - true: the bloom maybe contains hash -// -// While the bloom is being initialized, any query will return true. -func (b *SyncBloom) Contains(hash []byte) bool { - bloomTestMeter.Mark(1) - if atomic.LoadUint32(&b.inited) == 0 { - // We didn't load all the trie nodes from the previous run of Geth yet. As - // such, we can't say for sure if a hash is not present for anything. Until - // the init is done, we're faking "possible presence" for everything. - return true - } - // Bloom initialized, check the real one and report any successful misses - maybe := b.bloom.ContainsHash(binary.BigEndian.Uint64(hash)) - if !maybe { - bloomMissMeter.Mark(1) - } - return maybe -} diff --git a/trie/sync_test.go b/trie/sync_test.go index 1892462d4..1f064ba67 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -95,7 +95,7 @@ func TestEmptySync(t *testing.T) { emptyB, _ := New(emptyRoot, dbB) for i, trie := range []*Trie{emptyA, emptyB} { - sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New())) + sync := NewSync(trie.Hash(), memorydb.New(), nil) if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 { t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, nodes, paths, codes) } @@ -116,7 +116,7 @@ func testIterativeSync(t *testing.T, count int, bypath bool) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) nodes, paths, codes := sched.Missing(count) var ( @@ -177,7 +177,7 @@ func TestIterativeDelayedSync(t *testing.T) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) nodes, _, codes := sched.Missing(10000) queue := append(append([]common.Hash{}, nodes...), codes...) @@ -223,7 +223,7 @@ func testIterativeRandomSync(t *testing.T, count int) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) queue := make(map[common.Hash]struct{}) nodes, _, codes := sched.Missing(count) @@ -271,7 +271,7 @@ func TestIterativeRandomDelayedSync(t *testing.T) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) queue := make(map[common.Hash]struct{}) nodes, _, codes := sched.Missing(10000) @@ -324,7 +324,7 @@ func TestDuplicateAvoidanceSync(t *testing.T) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) nodes, _, codes := sched.Missing(0) queue := append(append([]common.Hash{}, nodes...), codes...) @@ -371,7 +371,7 @@ func TestIncompleteSync(t *testing.T) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) var added []common.Hash @@ -431,7 +431,7 @@ func TestSyncOrdering(t *testing.T) { // Create a destination trie and sync with the scheduler, tracking the requests diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) nodes, paths, _ := sched.Missing(1) queue := append([]common.Hash{}, nodes...) diff --git a/trie/trie_test.go b/trie/trie_test.go index 23780a5ff..7dad190b3 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -685,6 +685,8 @@ func (s *spongeDb) Has(key []byte) (bool, error) { panic("implement func (s *spongeDb) Get(key []byte) ([]byte, error) { return nil, errors.New("no such elem") } func (s *spongeDb) Delete(key []byte) error { panic("implement me") } func (s *spongeDb) NewBatch() ethdb.Batch { return &spongeBatch{s} } +func (s *spongeDb) NewBatchWithSize(size int) ethdb.Batch { return &spongeBatch{s} } +func (s *spongeDb) NewSnapshot() (ethdb.Snapshot, error) { panic("implement me") } func (s *spongeDb) Stat(property string) (string, error) { panic("implement me") } func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") } func (s *spongeDb) Close() error { return nil }