diff --git a/go.mod b/go.mod index 53399b7a7d..1b3ce09690 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/ReneKroon/ttlcache/v2 v2.3.0 github.com/aws/aws-sdk-go v1.37.24 github.com/c-bata/go-prompt v0.2.5 + github.com/ceph/go-ceph v0.8.0 github.com/cheggaaa/pb v1.0.29 github.com/coreos/go-oidc v2.2.1+incompatible github.com/cs3org/cato v0.0.0-20200828125504-e418fc54dd5e @@ -33,7 +34,7 @@ require ( github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/mapstructure v1.4.1 github.com/onsi/ginkgo v1.15.0 - github.com/onsi/gomega v1.10.5 + github.com/onsi/gomega v1.11.0 github.com/ory/fosite v0.38.0 github.com/pkg/errors v0.9.1 github.com/pkg/xattr v0.4.3 diff --git a/go.sum b/go.sum index 5c6c006283..80cf369da8 100644 --- a/go.sum +++ b/go.sum @@ -54,11 +54,9 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -68,7 +66,6 @@ github.com/andrewmostello/go-tus v0.0.0-20200314041820-904a9904af9a/go.mod h1:XY github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= @@ -77,9 +74,7 @@ github.com/aws/aws-sdk-go v1.23.19/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.37.24 h1:UmdPwGITvz//eFxNyuPlkq8KLlu4ZGvowsCQs+uFIp4= github.com/aws/aws-sdk-go v1.37.24/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-xray-sdk-go v0.9.4/go.mod h1:XtMKdBQfpVut+tJEwI7+dJFRxxRdxHDyVNp2tHXRq04= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -93,6 +88,8 @@ github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/ceph/go-ceph v0.8.0 h1:d+VP0eyconBl9RrvKVUq7S0npyK969ErLkCt5pg2fp0= +github.com/ceph/go-ceph v0.8.0/go.mod h1:wd+keAOqrcsN//20VQnHBGtnBnY0KHl0PA024Ng8HfQ= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= @@ -135,7 +132,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= -github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= @@ -165,7 +161,6 @@ github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -182,7 +177,6 @@ github.com/go-ldap/ldap/v3 v3.2.4 h1:PFavAq2xTgzo/loE8qNXcQaofAaqIpI4WgaLdv+1l3E github.com/go-ldap/ldap/v3 v3.2.4/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.6 h1:xZMThgv5SQ7SMbWtKFkCf9bBdvR2iEyw9k3zGZONuys= github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= @@ -424,6 +418,7 @@ github.com/gobuffalo/validate/v3 v3.2.0/go.mod h1:PrhDOdDHxtN8KUgMvF3TDL0r1YZXV4 github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc= github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY= github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid/v3 v3.1.2/go.mod h1:xPwMqoocQ1L5G6pXX5BcE7N5jlzn2o19oqAKxwZW/kI= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -444,7 +439,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= @@ -470,7 +464,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo= @@ -523,7 +516,6 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.0 h1:gvV6jG9dTgFEncxo+AF7PH6MZXi/vZl25owA/8Dg8Wo= github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -598,7 +590,6 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= @@ -710,27 +701,24 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.9.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.6.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= +github.com/onsi/gomega v1.11.0 h1:+CqWgvj0OZycCaqclBD1pxKHAU+tOkHmQIWvDHq2aug= +github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -869,7 +857,6 @@ github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= @@ -886,7 +873,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1 github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.2 h1:GDarE4TJQI52kYSbSAmLiId1Elfj+xgSDqrUZxFhxlU= github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= @@ -922,7 +908,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -1051,7 +1036,6 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -1101,9 +1085,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= @@ -1122,7 +1104,6 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1185,14 +1166,13 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201101102859-da207088b7d1 h1:a/mKvvZr9Jcc8oKfcmgzyp7OwF73JPWsQLvH1z2Kxck= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210218155724-8ebf48af031b h1:lAZ0/chPUDWwjqosYR0X4M490zQhMsiJ4K3DbA7o+3g= golang.org/x/sys v0.0.0-20210218155724-8ebf48af031b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1284,7 +1264,6 @@ golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1358,7 +1337,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= @@ -1388,6 +1366,8 @@ gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/storage/fs/cephfs/cephfs.go b/pkg/storage/fs/cephfs/cephfs.go new file mode 100644 index 0000000000..b0c4cb588f --- /dev/null +++ b/pkg/storage/fs/cephfs/cephfs.go @@ -0,0 +1,594 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + "context" + cephfs2 "github.com/ceph/go-ceph/cephfs" + "github.com/ceph/go-ceph/rados" + "io" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/ceph/go-ceph" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/logger" + "github.com/cs3org/reva/pkg/storage" + "github.com/cs3org/reva/pkg/storage/fs/registry" + "github.com/cs3org/reva/pkg/storage/utils/chunking" + "github.com/cs3org/reva/pkg/storage/utils/templates" + "github.com/cs3org/reva/pkg/user" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "github.com/pkg/xattr" +) + +const ( + // TODO the below comment is currently copied from the owncloud driver, revisit + // Currently,extended file attributes have four separated + // namespaces (user, trusted, security and system) followed by a dot. + // A non root user can only manipulate the user. namespace, which is what + // we will use to store ownCloud specific metadata. To prevent name + // collisions with other apps We are going to introduce a sub namespace + // "user.ocis." + + ocisPrefix string = "user.ocis." + parentidAttr string = ocisPrefix + "parentid" + ownerIDAttr string = ocisPrefix + "owner.id" + ownerIDPAttr string = ocisPrefix + "owner.idp" + // the base name of the node + // updated when the file is renamed or moved + nameAttr string = ocisPrefix + "name" + + // grantPrefix is the prefix for sharing related extended attributes + grantPrefix string = ocisPrefix + "grant." + metadataPrefix string = ocisPrefix + "md." + + // favorite flag, per user + favPrefix string = ocisPrefix + "fav." + + // a temporary etag for a folder that is removed when the mtime propagation happens + tmpEtagAttr string = ocisPrefix + "tmp.etag" + referenceAttr string = ocisPrefix + "cs3.ref" // target of a cs3 reference + checksumPrefix string = ocisPrefix + "cs." // followed by the algorithm, eg. ocis.cs.sha1 + trashOriginAttr string = ocisPrefix + "trash.origin" // trash origin + + // we use a single attribute to enable or disable propagation of both: synctime and treesize + propagationAttr string = ocisPrefix + "propagation" + + // the tree modification time of the tree below this node, + // propagated when synctime_accounting is true and + // user.ocis.propagation=1 is set + // stored as a readable time.RFC3339Nano + treeMTimeAttr string = ocisPrefix + "tmtime" + + // the size of the tree below this node, + // propagated when treesize_accounting is true and + // user.ocis.propagation=1 is set + // stored as uint64, little endian + treesizeAttr string = ocisPrefix + "treesize" + + // the quota for the storage space / tree, regardless who accesses it + quotaAttr string = ocisPrefix + "quota" +) + +func init() { + registry.Register("cephfs", New) +} + +func parseConfig(m map[string]interface{}) (*Options, error) { + o := &Options{} + if err := mapstructure.Decode(m, o); err != nil { + err = errors.Wrap(err, "error decoding conf") + return nil, err + } + return o, nil +} + +func (o *Options) init(m map[string]interface{}) { + if o.UserLayout == "" { + o.UserLayout = "{{.Id.OpaqueId}}" + } + // ensure user layout has no starting or trailing / + o.UserLayout = strings.Trim(o.UserLayout, "/") + + if o.ShareFolder == "" { + o.ShareFolder = "/Shares" + } + // ensure share folder always starts with slash + o.ShareFolder = filepath.Join("/", o.ShareFolder) + + // c.DataDirectory should never end in / unless it is the root + o.Root = filepath.Clean(o.Root) +} + +// New returns an implementation to of the storage.FS interface that talk to +// a local filesystem. +func New(m map[string]interface{}) (storage.FS, error) { + o, err := parseConfig(m) + if err != nil { + return nil, err + } + o.init(m) + + // create data paths for internal layout + dataPaths := []string{ + filepath.Join(o.Root, "nodes"), + // notes contain symlinks from nodes//uploads/ to ../../uploads/ + // better to keep uploads on a fast / volatile storage before a workflow finally moves them to the nodes dir + filepath.Join(o.Root, "uploads"), + filepath.Join(o.Root, "trash"), + } + for _, v := range dataPaths { + if err := os.MkdirAll(v, 0700); err != nil { + logger.New().Error().Err(err). + Str("path", v). + Msg("could not create data dir") + } + } + + lu := &Lookup{ + Options: o, + } + + var conn *rados.Conn + var mount *cephfs2.MountInfo + if conn, err = rados.NewConn(); err == nil { + if err = conn.ReadConfigFile(o.CephConf); err == nil { + if err = conn.Connect(); err == nil { + mount, err = cephfs2.CreateFromRados(conn) + } + } + } + + if err != nil { + logger.New().Error().Err(err). + Msg("could not connect to cluster") + + return nil, err + } + + // the root node has an empty name + // the root node has no parent + if err = createNode( + &Node{lu: lu, ID: "root"}, + &userv1beta1.UserId{ + OpaqueId: o.Owner, + }, + ); err != nil { + return nil, err + } + + tp, err := NewTree(lu) + if err != nil { + return nil, err + } + + return &cephfs{ + cl: &CephClient{rados: conn, cephfs: mount}, + tp: tp, + lu: lu, + o: o, + p: &Permissions{lu: lu}, + chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")), + }, nil +} + +type cephfs struct { + cl *CephClient + tp TreePersistence + lu *Lookup + o *Options + p *Permissions + chunkHandler *chunking.ChunkHandler +} + +func (fs *cephfs) Shutdown(ctx context.Context) error { + return nil +} + +// TODO Document in the cs3 should we return quota or free space? +func (fs *cephfs) GetQuota(ctx context.Context) (uint64, uint64, error) { + var node *Node + var err error + if node, err = fs.lu.HomeOrRootNode(ctx); err != nil { + return 0, 0, err + } + + if !node.Exists { + err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + return 0, 0, err + } + + rp, err := fs.p.AssemblePermissions(ctx, node) + switch { + case err != nil: + return 0, 0, errtypes.InternalError(err.Error()) + case !rp.GetQuota: + return 0, 0, errtypes.PermissionDenied(node.ID) + } + + ri, err := node.AsResourceInfo(ctx, rp, []string{"treesize", "quota"}) + if err != nil { + return 0, 0, err + } + + quotaStr := _quotaUnknown + if ri.Opaque != nil && ri.Opaque.Map != nil && ri.Opaque.Map["quota"] != nil && ri.Opaque.Map["quota"].Decoder == "plain" { + quotaStr = string(ri.Opaque.Map["quota"].Value) + } + + avail, err := fs.getAvailableSize(fs.lu.toInternalPath(node.ID)) + if err != nil { + return 0, 0, err + } + total := avail + ri.Size + + switch { + case quotaStr == _quotaUncalculated, quotaStr == _quotaUnknown, quotaStr == _quotaUnlimited: + // best we can do is return current total + // TODO indicate unlimited total? -> in opaque data? + default: + if quota, err := strconv.ParseUint(quotaStr, 10, 64); err == nil { + if total > quota { + total = quota + } + } + } + return total, ri.Size, nil +} + +// CreateHome creates a new root node that has no parent id +func (fs *cephfs) CreateHome(ctx context.Context) (err error) { + if !fs.o.EnableHome || fs.o.UserLayout == "" { + return errtypes.NotSupported("cephfs: CreateHome() home supported disabled") + } + + var n, h *Node + if n, err = fs.lu.RootNode(ctx); err != nil { + return + } + h, err = fs.lu.WalkPath(ctx, n, fs.lu.mustGetUserLayout(ctx), func(ctx context.Context, n *Node) error { + if !n.Exists { + if err := fs.tp.CreateDir(ctx, n); err != nil { + return err + } + } + return nil + }) + if err != nil { + return + } + + // update the owner + u := user.ContextMustGetUser(ctx) + if err = h.writeMetadata(u.Id); err != nil { + return + } + + if fs.o.TreeTimeAccounting { + homePath := h.lu.toInternalPath(h.ID) + // mark the home node as the end of propagation + if err = xattr.Set(homePath, propagationAttr, []byte("1")); err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", h).Msg("could not mark home as propagation root") + return + } + } + return +} + +// GetHome is called to look up the home path for a user +// It is NOT supposed to return the internal path but the external path +func (fs *cephfs) GetHome(ctx context.Context) (string, error) { + if !fs.o.EnableHome || fs.o.UserLayout == "" { + return "", errtypes.NotSupported("cephfs: GetHome() home supported disabled") + } + u := user.ContextMustGetUser(ctx) + layout := templates.WithUser(u, fs.o.UserLayout) + return filepath.Join(fs.o.Root, layout), nil // TODO use a namespace? +} + +// Tree persistence + +// GetPathByID returns the fn pointed by the file id, without the internal namespace +func (fs *cephfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { + return fs.tp.GetPathByID(ctx, id) +} + +func (fs *cephfs) CreateDir(ctx context.Context, fn string) (err error) { + var n *Node + if n, err = fs.lu.NodeFromPath(ctx, fn); err != nil { + return + } + + if n.Exists { + return errtypes.AlreadyExists(fn) + } + + pn, err := n.Parent() + if err != nil { + return errors.Wrap(err, "cephfs: error getting parent "+n.ParentID) + } + ok, err := fs.p.HasPermission(ctx, pn, func(rp *provider.ResourcePermissions) bool { + return rp.CreateContainer + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) + } + + err = fs.tp.CreateDir(ctx, n) + + if fs.o.TreeTimeAccounting { + nodePath := n.lu.toInternalPath(n.ID) + // mark the home node as the end of propagation + if err = xattr.Set(nodePath, propagationAttr, []byte("1")); err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not mark node to propagate") + return + } + } + return +} + +// CreateReference creates a reference as a node folder with the target stored in extended attributes +// There is no difference between the /Shares folder and normal nodes because the storage is not supposed to be accessible without the storage provider. +// In effect everything is a shadow namespace. +// To mimic the eos end owncloud driver we only allow references as children of the "/Shares" folder +// TODO when home support is enabled should the "/Shares" folder still be listed? +func (fs *cephfs) CreateReference(ctx context.Context, p string, targetURI *url.URL) (err error) { + + p = strings.Trim(p, "/") + parts := strings.Split(p, "/") + + if len(parts) != 2 { + return errtypes.PermissionDenied("cephfs: references must be a child of the share folder: share_folder=" + fs.o.ShareFolder + " path=" + p) + } + + if parts[0] != strings.Trim(fs.o.ShareFolder, "/") { + return errtypes.PermissionDenied("cephfs: cannot create references outside the share folder: share_folder=" + fs.o.ShareFolder + " path=" + p) + } + + // create Shares folder if it does not exist + var n *Node + if n, err = fs.lu.NodeFromPath(ctx, fs.o.ShareFolder); err != nil { + return errtypes.InternalError(err.Error()) + } else if !n.Exists { + if err = fs.tp.CreateDir(ctx, n); err != nil { + return + } + } + + if n, err = n.Child(parts[1]); err != nil { + return errtypes.InternalError(err.Error()) + } + + if n.Exists { + // TODO append increasing number to mountpoint name + return errtypes.AlreadyExists(p) + } + + if err = fs.tp.CreateDir(ctx, n); err != nil { + return + } + + internal := n.lu.toInternalPath(n.ID) + if err = xattr.Set(internal, referenceAttr, []byte(targetURI.String())); err != nil { + return errors.Wrapf(err, "cephfs: error setting the target %s on the reference file %s", targetURI.String(), internal) + } + return nil +} + +func (fs *cephfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) { + var oldNode, newNode *Node + if oldNode, err = fs.lu.NodeFromResource(ctx, oldRef); err != nil { + return + } + + if !oldNode.Exists { + err = errtypes.NotFound(filepath.Join(oldNode.ParentID, oldNode.Name)) + return + } + + ok, err := fs.p.HasPermission(ctx, oldNode, func(rp *provider.ResourcePermissions) bool { + return rp.Move + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(oldNode.ID) + } + + if newNode, err = fs.lu.NodeFromResource(ctx, newRef); err != nil { + return + } + if newNode.Exists { + err = errtypes.AlreadyExists(filepath.Join(newNode.ParentID, newNode.Name)) + return + } + + return fs.tp.Move(ctx, oldNode, newNode) +} + +func (fs *cephfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (ri *provider.ResourceInfo, err error) { + var node *Node + if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return + } + + if !node.Exists { + err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + return + } + + rp, err := fs.p.AssemblePermissions(ctx, node) + switch { + case err != nil: + return nil, errtypes.InternalError(err.Error()) + case !rp.Stat: + return nil, errtypes.PermissionDenied(node.ID) + } + + return node.AsResourceInfo(ctx, rp, mdKeys) +} + +func (fs *cephfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) (finfos []*provider.ResourceInfo, err error) { + var node *Node + if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return + } + + if !node.Exists { + err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + return + } + + rp, err := fs.p.AssemblePermissions(ctx, node) + switch { + case err != nil: + return nil, errtypes.InternalError(err.Error()) + case !rp.ListContainer: + return nil, errtypes.PermissionDenied(node.ID) + } + + var children []*Node + children, err = fs.tp.ListFolder(ctx, node) + if err != nil { + return + } + + for i := range children { + np := rp + // add this childs permissions + addPermissions(np, node.PermissionSet(ctx)) + if ri, err := children[i].AsResourceInfo(ctx, np, mdKeys); err == nil { + finfos = append(finfos, ri) + } + } + return +} + +func (fs *cephfs) Delete(ctx context.Context, ref *provider.Reference) (err error) { + var node *Node + if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return + } + if !node.Exists { + err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + return + } + + ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { + return rp.Delete + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) + } + + return fs.tp.Delete(ctx, node) +} + +// Data persistence + +func (fs *cephfs) ContentPath(n *Node) string { + return n.lu.toInternalPath(n.ID) +} + +func (fs *cephfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { + node, err := fs.lu.NodeFromResource(ctx, ref) + if err != nil { + return nil, errors.Wrap(err, "cephfs: error resolving ref") + } + + if !node.Exists { + err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + return nil, err + } + + ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { + return rp.InitiateFileDownload + }) + switch { + case err != nil: + return nil, errtypes.InternalError(err.Error()) + case !ok: + return nil, errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) + } + + contentPath := fs.ContentPath(node) + + r, err := os.Open(contentPath) + if err != nil { + if os.IsNotExist(err) { + return nil, errtypes.NotFound(contentPath) + } + return nil, errors.Wrap(err, "cephfs: error reading "+contentPath) + } + return r, nil +} + +// arbitrary metadata persistence in metadata.go + +// Version persistence in revisions.go + +// Trash persistence in recycle.go + +// share persistence in grants.go + +func (fs *cephfs) copyMD(s string, t string) (err error) { + var attrs []string + if attrs, err = xattr.List(s); err != nil { + return err + } + for i := range attrs { + if strings.HasPrefix(attrs[i], ocisPrefix) { + var d []byte + if d, err = xattr.Get(s, attrs[i]); err != nil { + return err + } + if err = xattr.Set(t, attrs[i], d); err != nil { + return err + } + } + } + return nil +} + +func isSameUserID(i *userpb.UserId, j *userpb.UserId) bool { + switch { + case i == nil, j == nil: + return false + case i.OpaqueId == j.OpaqueId && i.Idp == j.Idp: + return true + default: + return false + } +} diff --git a/pkg/storage/fs/cephfs/cephfs_test.go b/pkg/storage/fs/cephfs/cephfs_test.go new file mode 100644 index 0000000000..129387d1b5 --- /dev/null +++ b/pkg/storage/fs/cephfs/cephfs_test.go @@ -0,0 +1,174 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +// +build storageRace + +package cephfs + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path" + "sync" + "testing" + + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/user" + "github.com/stretchr/testify/assert" +) + +// TestLackAdvisoryLocks demonstrates that access to a file +// is not mutually exclusive on the oCIS storage. +var ( + config = make(map[string]interface{}) + ctx context.Context + f, f1 *os.File + tmpDir string +) + +func TestMain(m *testing.M) { + var err error + + // prepare storage + { + tmpDir, _ = ioutil.TempDir("", "ocis_fs_unittests") + { + config["root"] = tmpDir + config["enable_home"] = false + config["user_layout"] = "{{.Id.OpaqueId}}" + config["owner"] = "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c" + } + } + + // prepare context + { + u := &userpb.User{ + Id: &userpb.UserId{ + OpaqueId: "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c", + }, + Username: "test", + Mail: "marie@example.org", + DisplayName: "Marie Curie", + Groups: []string{ + "radium-lovers", + "polonium-lovers", + "physics-lovers", + }, + } + ctx = user.ContextSetUser(context.Background(), u) + } + + // do not do this. Prepare f0 + if err = ioutil.WriteFile(fmt.Sprintf("%s/%s", tmpDir, "f.lol"), []byte("test"), 0644); err != nil { + panic(err) + } + f, err = os.Open(fmt.Sprintf("%s/%s", tmpDir, "f.lol")) + if err != nil { + panic(err) + } + + // do not do this. Prepare f1 + if err = ioutil.WriteFile(fmt.Sprintf("%s/%s", tmpDir, "f1.lol"), []byte("another run"), 0644); err != nil { + panic(err) + } + f1, err = os.Open(fmt.Sprintf("%s/%s", tmpDir, "f1.lol")) + if err != nil { + panic(err) + } + + fmt.Printf("%s\n", tmpDir) + m.Run() + + cts, err := ioutil.ReadFile(path.Join(tmpDir, "nodes", "root", "uploaded.txt")) + if err != nil { + panic(err) + } + fmt.Println(string(cts)) +} + +// Scenario: start 2 uploads, pause the first one, let the second one finish first, +// resume the first one at some point in time. Both uploads should finish. +// Needs to result in 2 versions, last finished is the most recent version. +func TestTwoUploadsVersioning(t *testing.T) { + //runtime.GOMAXPROCS(1) // uncomment to remove concurrency and see revisions working. + ofs, err := New(config) + if err != nil { + t.Error(err) + } + + wg := &sync.WaitGroup{} + wg.Add(2) + + // upload file with contents: "test" + go func(wg *sync.WaitGroup) { + ofs.Upload(ctx, &provider.Reference{ + Spec: &provider.Reference_Path{Path: "uploaded.txt"}, + }, f) + wg.Done() + }(wg) + + // upload file with contents: "another run" + go func(wg *sync.WaitGroup) { + ofs.Upload(ctx, &provider.Reference{ + Spec: &provider.Reference_Path{Path: "uploaded.txt"}, + }, f1) + wg.Done() + }(wg) + + // this test, by the way the oCIS storage is implemented, is non-deterministic, and the contents + // of uploaded.txt will change on each run depending on which of the 2 routines above makes it + // first into the scheduler. In order to make it deterministic, we have to consider the Upload impl- + // ementation and we can leverage concurrency and add locks only when the destination path are the + // same for 2 uploads. + + wg.Wait() + revisions, err := ofs.ListRevisions(ctx, &provider.Reference{ + Spec: &provider.Reference_Path{Path: "uploaded.txt"}, + }) + assert.NoError(t, err) + assert.Equal(t, 1, len(revisions)) +} + +// TestParallelMkcol ensures that, on an unit level, if multiple requests fight for creating a directory (race condition) +// only the first one will create it. Note that there is little to synchronize here because if the folder is already +// created, the underlying filesystem (not the storage driver layer) will fail when attempting to create the directory. +func TestParallelMkcol(t *testing.T) { + ofs, err := New(config) + if err != nil { + t.Error(err) + } + + for i := 0; i < 10; i++ { + t.Run("", func(t *testing.T) { + t.Parallel() + if err := ofs.CreateDir(ctx, "fightforit"); err != nil { + rinfo, err := ofs.GetMD(ctx, &provider.Reference{ + Spec: &provider.Reference_Path{Path: "fightforit"}, + }, nil) + if err != nil { + t.Error(err) + } + + assert.NotNil(t, rinfo) + } + }) + } +} diff --git a/pkg/storage/fs/cephfs/cephfs_unix.go b/pkg/storage/fs/cephfs/cephfs_unix.go new file mode 100644 index 0000000000..52d7c729af --- /dev/null +++ b/pkg/storage/fs/cephfs/cephfs_unix.go @@ -0,0 +1,32 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +// +build !windows + +package cephfs + +import "syscall" + +func (fs *cephfs) getAvailableSize(path string) (uint64, error) { + stat := syscall.Statfs_t{} + err := syscall.Statfs(path, &stat) + if err != nil { + return 0, err + } + return stat.Bavail * uint64(stat.Bsize), nil +} diff --git a/pkg/storage/fs/cephfs/cephfs_windows.go b/pkg/storage/fs/cephfs/cephfs_windows.go new file mode 100644 index 0000000000..3bce81f02c --- /dev/null +++ b/pkg/storage/fs/cephfs/cephfs_windows.go @@ -0,0 +1,36 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +// +build windows + +package cephfs + +import "golang.org/x/sys/windows" + +func (fs *cephfs) getAvailableSize(path string) (uint64, error) { + var free, total, avail uint64 + pathPtr, err := windows.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + err = windows.GetDiskFreeSpaceEx(pathPtr, &avail, &total, &free) + if err != nil { + return 0, err + } + return avail, nil +} diff --git a/pkg/storage/fs/cephfs/connection.go b/pkg/storage/fs/cephfs/connection.go new file mode 100644 index 0000000000..65dab13566 --- /dev/null +++ b/pkg/storage/fs/cephfs/connection.go @@ -0,0 +1,29 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + cephfs2 "github.com/ceph/go-ceph/cephfs" + "github.com/ceph/go-ceph/rados" +) + +type CephClient struct { + rados *rados.Conn + cephfs *cephfs2.MountInfo +} diff --git a/pkg/storage/fs/cephfs/grants.go b/pkg/storage/fs/cephfs/grants.go new file mode 100644 index 0000000000..d619d03e7f --- /dev/null +++ b/pkg/storage/fs/cephfs/grants.go @@ -0,0 +1,167 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + "context" + "path/filepath" + "strings" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/storage/utils/ace" + "github.com/pkg/xattr" +) + +func (fs *cephfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { + log := appctx.GetLogger(ctx) + log.Debug().Interface("ref", ref).Interface("grant", g).Msg("AddGrant()") + var node *Node + if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return + } + if !node.Exists { + err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + return + } + + ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { + // TODO remove AddGrant or UpdateGrant grant from CS3 api, redundant? tracked in https://github.com/cs3org/cs3apis/issues/92 + return rp.AddGrant || rp.UpdateGrant + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) + } + + np := fs.lu.toInternalPath(node.ID) + e := ace.FromGrant(g) + principal, value := e.Marshal() + if err := xattr.Set(np, grantPrefix+principal, value); err != nil { + return err + } + return fs.tp.Propagate(ctx, node) +} + +func (fs *cephfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) { + var node *Node + if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return + } + if !node.Exists { + err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + return + } + + ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { + return rp.ListGrants + }) + switch { + case err != nil: + return nil, errtypes.InternalError(err.Error()) + case !ok: + return nil, errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) + } + + log := appctx.GetLogger(ctx) + np := fs.lu.toInternalPath(node.ID) + var attrs []string + if attrs, err = xattr.List(np); err != nil { + log.Error().Err(err).Msg("error listing attributes") + return nil, err + } + + log.Debug().Interface("attrs", attrs).Msg("read attributes") + + aces := extractACEsFromAttrs(ctx, np, attrs) + + grants = make([]*provider.Grant, 0, len(aces)) + for i := range aces { + grants = append(grants, aces[i].Grant()) + } + + return grants, nil +} + +func (fs *cephfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { + var node *Node + if node, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return + } + if !node.Exists { + err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + return + } + + ok, err := fs.p.HasPermission(ctx, node, func(rp *provider.ResourcePermissions) bool { + return rp.RemoveGrant + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) + } + + var attr string + if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { + attr = grantPrefix + _groupAcePrefix + g.Grantee.GetGroupId().OpaqueId + } else { + attr = grantPrefix + _userAcePrefix + g.Grantee.GetUserId().OpaqueId + } + + np := fs.lu.toInternalPath(node.ID) + if err = xattr.Remove(np, attr); err != nil { + return + } + + return fs.tp.Propagate(ctx, node) +} + +func (fs *cephfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { + // TODO remove AddGrant or UpdateGrant grant from CS3 api, redundant? tracked in https://github.com/cs3org/cs3apis/issues/92 + return fs.AddGrant(ctx, ref, g) +} + +// extractACEsFromAttrs reads ACEs in the list of attrs from the node +func extractACEsFromAttrs(ctx context.Context, fsfn string, attrs []string) (entries []*ace.ACE) { + log := appctx.GetLogger(ctx) + entries = []*ace.ACE{} + for i := range attrs { + if strings.HasPrefix(attrs[i], grantPrefix) { + var value []byte + var err error + if value, err = xattr.Get(fsfn, attrs[i]); err != nil { + log.Error().Err(err).Str("attr", attrs[i]).Msg("could not read attribute") + continue + } + var e *ace.ACE + principal := attrs[i][len(grantPrefix):] + if e, err = ace.Unmarshal(principal, value); err != nil { + log.Error().Err(err).Str("principal", principal).Str("attr", attrs[i]).Msg("could not unmarshal ace") + continue + } + entries = append(entries, e) + } + } + return +} diff --git a/pkg/storage/fs/cephfs/interfaces.go b/pkg/storage/fs/cephfs/interfaces.go new file mode 100644 index 0000000000..340e14abcc --- /dev/null +++ b/pkg/storage/fs/cephfs/interfaces.go @@ -0,0 +1,64 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + "context" + "os" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" +) + +// TODO the different aspects of a storage: Tree, Lookup and Permissions should be able to be reusable +// Below is a start of Interfaces that needs to be worked out further + +// TreePersistence is used to manage a tree hierarchy +type TreePersistence interface { + GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) + GetMD(ctx context.Context, node *Node) (os.FileInfo, error) + ListFolder(ctx context.Context, node *Node) ([]*Node, error) + //CreateHome(owner *userpb.UserId) (n *Node, err error) + CreateDir(ctx context.Context, node *Node) (err error) + //CreateReference(ctx context.Context, node *Node, targetURI *url.URL) error + Move(ctx context.Context, oldNode *Node, newNode *Node) (err error) + Delete(ctx context.Context, node *Node) (err error) + + Propagate(ctx context.Context, node *Node) (err error) +} + +// Lookup is used to encapsulate path transformations +/* +type Lookup interface { + NodeFromResource(ctx context.Context, ref *provider.Reference) (node *Node, err error) + NodeFromID(ctx context.Context, id *provider.ResourceId) (node *Node, err error) + NodeFromPath(ctx context.Context, fn string) (node *Node, err error) + Path(ctx context.Context, node *Node) (path string, err error) + + // HomeNode returns the currently logged in users home node + // requires EnableHome to be true + HomeNode(ctx context.Context) (node *Node, err error) + + // RootNode returns the storage root node + RootNode(ctx context.Context) (node *Node, err error) + + // HomeOrRootNode returns the users home node when home support is enabled. + // it returns the storages root node otherwise + HomeOrRootNode(ctx context.Context) (node *Node, err error) +} +*/ diff --git a/pkg/storage/fs/cephfs/lookup.go b/pkg/storage/fs/cephfs/lookup.go new file mode 100644 index 0000000000..2f4dd464ea --- /dev/null +++ b/pkg/storage/fs/cephfs/lookup.go @@ -0,0 +1,163 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/storage/utils/templates" + "github.com/cs3org/reva/pkg/user" +) + +// Lookup implements transformations from filepath to node and back +type Lookup struct { + Options *Options +} + +// NodeFromResource takes in a request path or request id and converts it to a Node +func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) (*Node, error) { + if ref.GetPath() != "" { + return lu.NodeFromPath(ctx, ref.GetPath()) + } + + if ref.GetId() != nil { + return lu.NodeFromID(ctx, ref.GetId()) + } + + // reference is invalid + return nil, fmt.Errorf("invalid reference %+v", ref) +} + +// NodeFromPath converts a filename into a Node +func (lu *Lookup) NodeFromPath(ctx context.Context, fn string) (node *Node, err error) { + log := appctx.GetLogger(ctx) + log.Debug().Interface("fn", fn).Msg("NodeFromPath()") + + if node, err = lu.HomeOrRootNode(ctx); err != nil { + return + } + + // TODO collect permissions of the current user on every segment + if fn != "/" { + node, err = lu.WalkPath(ctx, node, fn, func(ctx context.Context, n *Node) error { + log.Debug().Interface("node", n).Msg("NodeFromPath() walk") + return nil + }) + } + + return +} + +// NodeFromID returns the internal path for the id +func (lu *Lookup) NodeFromID(ctx context.Context, id *provider.ResourceId) (n *Node, err error) { + if id == nil || id.OpaqueId == "" { + return nil, fmt.Errorf("invalid resource id %+v", id) + } + return ReadNode(ctx, lu, id.OpaqueId) +} + +// Path returns the path for node +func (lu *Lookup) Path(ctx context.Context, n *Node) (p string, err error) { + var root *Node + if root, err = lu.HomeOrRootNode(ctx); err != nil { + return + } + for n.ID != root.ID { + p = filepath.Join(n.Name, p) + if n, err = n.Parent(); err != nil { + appctx.GetLogger(ctx). + Error().Err(err). + Str("path", p). + Interface("node", n). + Msg("Path()") + return + } + } + return +} + +// RootNode returns the root node of the storage +func (lu *Lookup) RootNode(ctx context.Context) (node *Node, err error) { + return &Node{ + lu: lu, + ID: "root", + Name: "", + ParentID: "", + Exists: true, + }, nil +} + +// HomeNode returns the home node of a user +func (lu *Lookup) HomeNode(ctx context.Context) (node *Node, err error) { + if !lu.Options.EnableHome { + return nil, errtypes.NotSupported("cephfs: home supported disabled") + } + + if node, err = lu.RootNode(ctx); err != nil { + return + } + node, err = lu.WalkPath(ctx, node, lu.mustGetUserLayout(ctx), nil) + return +} + +// WalkPath calls n.Child(segment) on every path segment in p starting at the node r +// If a function f is given it will be executed for every segment node, but not the root node r +func (lu *Lookup) WalkPath(ctx context.Context, r *Node, p string, f func(ctx context.Context, n *Node) error) (*Node, error) { + segments := strings.Split(strings.Trim(p, "/"), "/") + var err error + for i := range segments { + if r, err = r.Child(segments[i]); err != nil { + return r, err + } + // if an intermediate node is missing return not found + if !r.Exists && i < len(segments)-1 { + return r, errtypes.NotFound(segments[i]) + } + if f != nil { + if err = f(ctx, r); err != nil { + return r, err + } + } + } + return r, nil +} + +// HomeOrRootNode returns the users home node when home support is enabled. +// it returns the storages root node otherwise +func (lu *Lookup) HomeOrRootNode(ctx context.Context) (node *Node, err error) { + if lu.Options.EnableHome { + return lu.HomeNode(ctx) + } + return lu.RootNode(ctx) +} + +func (lu *Lookup) mustGetUserLayout(ctx context.Context) string { + u := user.ContextMustGetUser(ctx) + return templates.WithUser(u, lu.Options.UserLayout) +} + +func (lu *Lookup) toInternalPath(id string) string { + return filepath.Join(lu.Options.Root, "nodes", id) +} diff --git a/pkg/storage/fs/cephfs/metadata.go b/pkg/storage/fs/cephfs/metadata.go new file mode 100644 index 0000000000..b9817ed075 --- /dev/null +++ b/pkg/storage/fs/cephfs/metadata.go @@ -0,0 +1,212 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + "context" + "fmt" + "path/filepath" + "strconv" + "strings" + "time" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/user" + "github.com/pkg/errors" + "github.com/pkg/xattr" +) + +func parseMTime(v string) (t time.Time, err error) { + p := strings.SplitN(v, ".", 2) + var sec, nsec int64 + if sec, err = strconv.ParseInt(p[0], 10, 64); err == nil { + if len(p) > 1 { + nsec, err = strconv.ParseInt(p[1], 10, 64) + } + } + return time.Unix(sec, nsec), err +} + +func (fs *cephfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) { + n, err := fs.lu.NodeFromResource(ctx, ref) + if err != nil { + return errors.Wrap(err, "cephfs: error resolving ref") + } + sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() + + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return err + } + + ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { + // TODO add explicit SetArbitraryMetadata grant to CS3 api, tracked in https://github.com/cs3org/cs3apis/issues/91 + return rp.InitiateFileUpload + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) + } + + nodePath := n.lu.toInternalPath(n.ID) + + errs := []error{} + // TODO should we really continue updating when an error occurs? + if md.Metadata != nil { + if val, ok := md.Metadata["mtime"]; ok { + delete(md.Metadata, "mtime") + err := n.SetMtime(ctx, val) + if err != nil { + errs = append(errs, errors.Wrap(err, "could not set mtime")) + } + } + // TODO(jfd) special handling for atime? + // TODO(jfd) allow setting birth time (btime)? + // TODO(jfd) any other metadata that is interesting? fileid? + // TODO unset when file is updated + // TODO unset when folder is updated or add timestamp to etag? + if val, ok := md.Metadata["etag"]; ok { + delete(md.Metadata, "etag") + err := n.SetEtag(ctx, val) + if err != nil { + errs = append(errs, errors.Wrap(err, "could not set etag")) + } + } + if val, ok := md.Metadata[_favoriteKey]; ok { + delete(md.Metadata, _favoriteKey) + if u, ok := user.ContextGetUser(ctx); ok { + if uid := u.GetId(); uid != nil { + if err := n.SetFavorite(uid, val); err != nil { + sublog.Error().Err(err). + Interface("user", u). + Msg("could not set favorite flag") + errs = append(errs, errors.Wrap(err, "could not set favorite flag")) + } + } else { + sublog.Error().Interface("user", u).Msg("user has no id") + errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) + } + } else { + sublog.Error().Interface("user", u).Msg("error getting user from ctx") + errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) + } + } + } + for k, v := range md.Metadata { + attrName := metadataPrefix + k + if err = xattr.Set(nodePath, attrName, []byte(v)); err != nil { + errs = append(errs, errors.Wrap(err, "cephfs: could not set metadata attribute "+attrName+" to "+k)) + } + } + + switch len(errs) { + case 0: + return fs.tp.Propagate(ctx, n) + case 1: + // TODO Propagate if anything changed + return errs[0] + default: + // TODO Propagate if anything changed + // TODO how to return multiple errors? + return errors.New("multiple errors occurred, see log for details") + } +} + +func (fs *cephfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) { + n, err := fs.lu.NodeFromResource(ctx, ref) + if err != nil { + return errors.Wrap(err, "cephfs: error resolving ref") + } + sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() + + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return err + } + + ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { + // TODO use SetArbitraryMetadata grant to CS3 api, tracked in https://github.com/cs3org/cs3apis/issues/91 + return rp.InitiateFileUpload + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) + } + + nodePath := n.lu.toInternalPath(n.ID) + errs := []error{} + for _, k := range keys { + switch k { + case _favoriteKey: + if u, ok := user.ContextGetUser(ctx); ok { + // the favorite flag is specific to the user, so we need to incorporate the userid + if uid := u.GetId(); uid != nil { + fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) + if err := xattr.Remove(nodePath, fa); err != nil { + sublog.Error().Err(err). + Interface("user", u). + Str("key", fa). + Msg("could not unset favorite flag") + errs = append(errs, errors.Wrap(err, "could not unset favorite flag")) + } + } else { + sublog.Error(). + Interface("user", u). + Msg("user has no id") + errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) + } + } else { + sublog.Error(). + Interface("user", u). + Msg("error getting user from ctx") + errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) + } + default: + if err = xattr.Remove(nodePath, metadataPrefix+k); err != nil { + // a non-existing attribute will return an error, which we can ignore + // (using string compare because the error type is syscall.Errno and not wrapped/recognizable) + if e, ok := err.(*xattr.Error); !ok || !(e.Err.Error() == "no data available" || + // darwin + e.Err.Error() == "attribute not found") { + sublog.Error().Err(err). + Str("key", k). + Msg("could not unset metadata") + errs = append(errs, errors.Wrap(err, "could not unset metadata")) + } + } + } + } + switch len(errs) { + case 0: + return fs.tp.Propagate(ctx, n) + case 1: + // TODO Propagate if anything changed + return errs[0] + default: + // TODO Propagate if anything changed + // TODO how to return multiple errors? + return errors.New("multiple errors occurred, see log for details") + } +} diff --git a/pkg/storage/fs/cephfs/node.go b/pkg/storage/fs/cephfs/node.go new file mode 100644 index 0000000000..d7190a78a2 --- /dev/null +++ b/pkg/storage/fs/cephfs/node.go @@ -0,0 +1,912 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + "context" + "crypto/md5" + "encoding/hex" + "fmt" + "hash" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/cs3org/reva/internal/grpc/services/storageprovider" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/mime" + "github.com/cs3org/reva/pkg/storage/utils/ace" + "github.com/cs3org/reva/pkg/user" + "github.com/pkg/errors" + "github.com/pkg/xattr" + "github.com/rs/zerolog/log" +) + +const ( + _shareTypesKey = "http://owncloud.org/ns/share-types" + _userShareType = "0" + + _favoriteKey = "http://owncloud.org/ns/favorite" + _checksumsKey = "http://owncloud.org/ns/checksums" + _quotaKey = "quota" + + _quotaUncalculated = "-1" + _quotaUnknown = "-2" + _quotaUnlimited = "-3" +) + +// Node represents a node in the tree and provides methods to get a Parent or Child instance +type Node struct { + lu *Lookup + ParentID string + ID string + Name string + owner *userpb.UserId + Exists bool +} + +func (n *Node) writeMetadata(owner *userpb.UserId) (err error) { + nodePath := n.lu.toInternalPath(n.ID) + if err = xattr.Set(nodePath, parentidAttr, []byte(n.ParentID)); err != nil { + return errors.Wrap(err, "cephfs: could not set parentid attribute") + } + if err = xattr.Set(nodePath, nameAttr, []byte(n.Name)); err != nil { + return errors.Wrap(err, "cephfs: could not set name attribute") + } + if owner == nil { + if err = xattr.Set(nodePath, ownerIDAttr, []byte("")); err != nil { + return errors.Wrap(err, "cephfs: could not set empty owner id attribute") + } + if err = xattr.Set(nodePath, ownerIDPAttr, []byte("")); err != nil { + return errors.Wrap(err, "cephfs: could not set empty owner idp attribute") + } + } else { + if err = xattr.Set(nodePath, ownerIDAttr, []byte(owner.OpaqueId)); err != nil { + return errors.Wrap(err, "cephfs: could not set owner id attribute") + } + if err = xattr.Set(nodePath, ownerIDPAttr, []byte(owner.Idp)); err != nil { + return errors.Wrap(err, "cephfs: could not set owner idp attribute") + } + } + return +} + +// ReadRecycleItem reads a recycle item as a node +// TODO refactor the returned params into Node properties? would make all the path transformations go away... +func ReadRecycleItem(ctx context.Context, lu *Lookup, key string) (n *Node, trashItem string, deletedNodePath string, origin string, err error) { + + if key == "" { + return nil, "", "", "", errtypes.InternalError("key is empty") + } + + kp := strings.SplitN(key, ":", 2) + if len(kp) != 2 { + appctx.GetLogger(ctx).Error().Err(err).Str("key", key).Msg("malformed key") + return + } + trashItem = filepath.Join(lu.Options.Root, "trash", kp[0], kp[1]) + + var link string + link, err = os.Readlink(trashItem) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash link") + return + } + parts := strings.SplitN(filepath.Base(link), ".T.", 2) + if len(parts) != 2 { + appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Interface("parts", parts).Msg("malformed trash link") + return + } + + n = &Node{ + lu: lu, + ID: parts[0], + } + + deletedNodePath = lu.toInternalPath(filepath.Base(link)) + + // lookup parent id in extended attributes + var attrBytes []byte + if attrBytes, err = xattr.Get(deletedNodePath, parentidAttr); err == nil { + n.ParentID = string(attrBytes) + } else { + return + } + // lookup name in extended attributes + if attrBytes, err = xattr.Get(deletedNodePath, nameAttr); err == nil { + n.Name = string(attrBytes) + } else { + return + } + // lookup ownerId in extended attributes + if attrBytes, err = xattr.Get(deletedNodePath, ownerIDAttr); err == nil { + n.owner = &userpb.UserId{} + n.owner.OpaqueId = string(attrBytes) + } else { + return + } + // lookup ownerIdp in extended attributes + if attrBytes, err = xattr.Get(deletedNodePath, ownerIDPAttr); err == nil { + if n.owner == nil { + n.owner = &userpb.UserId{} + } + n.owner.Idp = string(attrBytes) + } else { + return + } + + // get origin node + origin = "/" + + // lookup origin path in extended attributes + if attrBytes, err = xattr.Get(deletedNodePath, trashOriginAttr); err == nil { + origin = string(attrBytes) + } else { + log.Error().Err(err).Str("trashItem", trashItem).Str("link", link).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /") + } + return +} + +// ReadNode creates a new instance from an id and checks if it exists +func ReadNode(ctx context.Context, lu *Lookup, id string) (n *Node, err error) { + n = &Node{ + lu: lu, + ID: id, + } + + nodePath := lu.toInternalPath(n.ID) + + // lookup parent id in extended attributes + var attrBytes []byte + attrBytes, err = xattr.Get(nodePath, parentidAttr) + switch { + case err == nil: + n.ParentID = string(attrBytes) + case isNoData(err): + return nil, errtypes.InternalError(err.Error()) + case isNotFound(err): + return n, nil // swallow not found, the node defaults to exists = false + default: + return nil, errtypes.InternalError(err.Error()) + } + // lookup name in extended attributes + if attrBytes, err = xattr.Get(nodePath, nameAttr); err == nil { + n.Name = string(attrBytes) + } else { + return + } + + var root *Node + if root, err = lu.HomeOrRootNode(ctx); err != nil { + return + } + parentID := n.ParentID + + log := appctx.GetLogger(ctx) + for parentID != root.ID { + log.Debug().Interface("node", n).Str("root.ID", root.ID).Msg("ReadNode()") + // walk to root to check node is not part of a deleted subtree + + if attrBytes, err = xattr.Get(lu.toInternalPath(parentID), parentidAttr); err == nil { + parentID = string(attrBytes) + log.Debug().Interface("node", n).Str("root.ID", root.ID).Str("parentID", parentID).Msg("ReadNode() found parent") + } else { + log.Error().Err(err).Interface("node", n).Str("root.ID", root.ID).Msg("ReadNode()") + if isNotFound(err) { + return nil, errtypes.NotFound(err.Error()) + } + return + } + } + + n.Exists = true + log.Debug().Interface("node", n).Msg("ReadNode() found node") + + return +} + +// Child returns the child node with the given name +func (n *Node) Child(name string) (c *Node, err error) { + c = &Node{ + lu: n.lu, + ParentID: n.ID, + Name: name, + } + var link string + if link, err = os.Readlink(filepath.Join(n.lu.toInternalPath(n.ID), name)); os.IsNotExist(err) { + err = nil // if the file does not exist we return a node that has Exists = false + return + } + if err != nil { + err = errors.Wrap(err, "cephfs: Wrap: readlink error") + return + } + if strings.HasPrefix(link, "../") { + c.Exists = true + c.ID = filepath.Base(link) + } else { + err = fmt.Errorf("cephfs: expected '../ prefix, got' %+v", link) + } + return +} + +// Parent returns the parent node +func (n *Node) Parent() (p *Node, err error) { + if n.ParentID == "" { + return nil, fmt.Errorf("cephfs: root has no parent") + } + p = &Node{ + lu: n.lu, + ID: n.ParentID, + } + + parentPath := n.lu.toInternalPath(n.ParentID) + + // lookup parent id in extended attributes + var attrBytes []byte + if attrBytes, err = xattr.Get(parentPath, parentidAttr); err == nil { + p.ParentID = string(attrBytes) + } else { + return + } + // lookup name in extended attributes + if attrBytes, err = xattr.Get(parentPath, nameAttr); err == nil { + p.Name = string(attrBytes) + } else { + return + } + + // check node exists + if _, err := os.Stat(parentPath); err == nil { + p.Exists = true + } + return +} + +// Owner returns the cached owner id or reads it from the extended attributes +// TODO can be private as only the AsResourceInfo uses it +func (n *Node) Owner() (o *userpb.UserId, err error) { + if n.owner != nil { + return n.owner, nil + } + + // FIXME ... do we return the owner of the reference or the owner of the target? + // we don't really know the owner of the target ... and as the reference may point anywhere we cannot really find out + // but what are the permissions? all? none? the gateway has to fill in? + // TODO what if this is a reference? + nodePath := n.lu.toInternalPath(n.ID) + // lookup parent id in extended attributes + var attrBytes []byte + // lookup name in extended attributes + if attrBytes, err = xattr.Get(nodePath, ownerIDAttr); err == nil { + if n.owner == nil { + n.owner = &userpb.UserId{} + } + n.owner.OpaqueId = string(attrBytes) + } else { + return + } + // lookup name in extended attributes + if attrBytes, err = xattr.Get(nodePath, ownerIDPAttr); err == nil { + if n.owner == nil { + n.owner = &userpb.UserId{} + } + n.owner.Idp = string(attrBytes) + } else { + return + } + return n.owner, err +} + +// PermissionSet returns the permission set for the current user +// the parent nodes are not taken into account +func (n *Node) PermissionSet(ctx context.Context) *provider.ResourcePermissions { + u, ok := user.ContextGetUser(ctx) + if !ok { + appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions") + return noPermissions + } + if o, _ := n.Owner(); isSameUserID(u.Id, o) { + return ownerPermissions + } + // read the permissions for the current user from the acls of the current node + if np, err := n.ReadUserPermissions(ctx, u); err == nil { + return np + } + return noPermissions +} + +// calculateEtag returns a hash of fileid + tmtime (or mtime) +func calculateEtag(nodeID string, tmTime time.Time) (string, error) { + h := md5.New() + if _, err := io.WriteString(h, nodeID); err != nil { + return "", err + } + if tb, err := tmTime.UTC().MarshalBinary(); err == nil { + if _, err := h.Write(tb); err != nil { + return "", err + } + } else { + return "", err + } + return fmt.Sprintf(`"%x"`, h.Sum(nil)), nil +} + +// SetMtime sets the mtime and atime of a node +func (n *Node) SetMtime(ctx context.Context, mtime string) error { + sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() + if mt, err := parseMTime(mtime); err == nil { + nodePath := n.lu.toInternalPath(n.ID) + // updating mtime also updates atime + if err := os.Chtimes(nodePath, mt, mt); err != nil { + sublog.Error().Err(err). + Time("mtime", mt). + Msg("could not set mtime") + return errors.Wrap(err, "could not set mtime") + } + } else { + sublog.Error().Err(err). + Str("mtime", mtime). + Msg("could not parse mtime") + return errors.Wrap(err, "could not parse mtime") + } + return nil +} + +// SetEtag sets the temporary etag of a node if it differs from the current etag +func (n *Node) SetEtag(ctx context.Context, val string) (err error) { + sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() + nodePath := n.lu.toInternalPath(n.ID) + var tmTime time.Time + if tmTime, err = n.GetTMTime(); err != nil { + // no tmtime, use mtime + var fi os.FileInfo + if fi, err = os.Lstat(nodePath); err != nil { + return + } + tmTime = fi.ModTime() + } + var etag string + if etag, err = calculateEtag(n.ID, tmTime); err != nil { + return + } + + // sanitize etag + val = fmt.Sprintf("\"%s\"", strings.Trim(val, "\"")) + if etag == val { + sublog.Debug(). + Str("etag", val). + Msg("ignoring request to update identical etag") + return nil + } + // etag is only valid until the calculated etag changes, is part of propagation + return xattr.Set(nodePath, tmpEtagAttr, []byte(val)) +} + +// SetFavorite sets the favorite for the current user +// TODO we should not mess with the user here ... the favorites is now a user specific property for a file +// that cannot be mapped to extended attributes without leaking who has marked a file as a favorite +// it is a specific case of a tag, which is user individual as well +// TODO there are different types of tags +// 1. public that are managed by everyone +// 2. private tags that are only visible to the user +// 3. system tags that are only visible to the system +// 4. group tags that are only visible to a group ... +// urgh ... well this can be solved using different namespaces +// 1. public = p: +// 2. private = u:: for user specific +// 3. system = s: for system +// 4. group = g:: +// 5. app? = a:: for apps? +// obviously this only is secure when the u/s/g/a namespaces are not accessible by users in the filesystem +// public tags can be mapped to extended attributes +func (n *Node) SetFavorite(uid *userpb.UserId, val string) error { + nodePath := n.lu.toInternalPath(n.ID) + // the favorite flag is specific to the user, so we need to incorporate the userid + fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) + return xattr.Set(nodePath, fa, []byte(val)) +} + +// AsResourceInfo return the node as CS3 ResourceInfo +func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissions, mdKeys []string) (ri *provider.ResourceInfo, err error) { + sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() + + var fn string + nodePath := n.lu.toInternalPath(n.ID) + + var fi os.FileInfo + + nodeType := provider.ResourceType_RESOURCE_TYPE_INVALID + if fi, err = os.Lstat(nodePath); err != nil { + return + } + + var target []byte + switch { + case fi.IsDir(): + if target, err = xattr.Get(nodePath, referenceAttr); err == nil { + nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE + } else { + nodeType = provider.ResourceType_RESOURCE_TYPE_CONTAINER + } + case fi.Mode().IsRegular(): + nodeType = provider.ResourceType_RESOURCE_TYPE_FILE + case fi.Mode()&os.ModeSymlink != 0: + nodeType = provider.ResourceType_RESOURCE_TYPE_SYMLINK + // TODO reference using ext attr on a symlink + // nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE + } + + id := &provider.ResourceId{OpaqueId: n.ID} + + fn, err = n.lu.Path(ctx, n) + if err != nil { + return nil, err + } + + ri = &provider.ResourceInfo{ + Id: id, + Path: fn, + Type: nodeType, + MimeType: mime.Detect(nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER, fn), + Size: uint64(fi.Size()), + Target: string(target), + PermissionSet: rp, + } + if nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + ts, err := n.GetTreeSize() + if err == nil { + ri.Size = ts + } else { + ri.Size = 0 // make dirs always return 0 if it is unknown + sublog.Debug().Err(err).Msg("could not read treesize") + } + } + + if ri.Owner, err = n.Owner(); err != nil { + sublog.Debug().Err(err).Msg("could not determine owner") + } + + // TODO make etag of files use fileid and checksum + + var tmTime time.Time + if tmTime, err = n.GetTMTime(); err != nil { + // no tmtime, use mtime + tmTime = fi.ModTime() + } + + // use temporary etag if it is set + if b, err := xattr.Get(nodePath, tmpEtagAttr); err == nil { + ri.Etag = fmt.Sprintf(`"%x"`, string(b)) // TODO why do we convert string(b)? is the temporary etag stored as string? -> should we use bytes? use hex.EncodeToString? + } else if ri.Etag, err = calculateEtag(n.ID, tmTime); err != nil { + sublog.Debug().Err(err).Msg("could not calculate etag") + } + + // mtime uses tmtime if present + // TODO expose mtime and tmtime separately? + un := tmTime.UnixNano() + ri.Mtime = &types.Timestamp{ + Seconds: uint64(un / 1000000000), + Nanos: uint32(un % 1000000000), + } + + mdKeysMap := make(map[string]struct{}) + for _, k := range mdKeys { + mdKeysMap[k] = struct{}{} + } + + var returnAllKeys bool + if _, ok := mdKeysMap["*"]; len(mdKeys) == 0 || ok { + returnAllKeys = true + } + + metadata := map[string]string{} + + // read favorite flag for the current user + if _, ok := mdKeysMap[_favoriteKey]; returnAllKeys || ok { + favorite := "" + if u, ok := user.ContextGetUser(ctx); ok { + // the favorite flag is specific to the user, so we need to incorporate the userid + if uid := u.GetId(); uid != nil { + fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) + if val, err := xattr.Get(nodePath, fa); err == nil { + sublog.Debug(). + Str("favorite", fa). + Msg("found favorite flag") + favorite = string(val) + } + } else { + sublog.Error().Err(errtypes.UserRequired("userrequired")).Msg("user has no id") + } + } else { + sublog.Error().Err(errtypes.UserRequired("userrequired")).Msg("error getting user from ctx") + } + metadata[_favoriteKey] = favorite + } + + // share indicator + if _, ok := mdKeysMap[_shareTypesKey]; returnAllKeys || ok { + if n.hasUserShares(ctx) { + metadata[_shareTypesKey] = _userShareType + } + } + + // checksums + if _, ok := mdKeysMap[_checksumsKey]; (nodeType == provider.ResourceType_RESOURCE_TYPE_FILE) && returnAllKeys || ok { + // TODO which checksum was requested? sha1 adler32 or md5? for now hardcode sha1? + readChecksumIntoResourceChecksum(ctx, nodePath, storageprovider.XSSHA1, ri) + readChecksumIntoOpaque(ctx, nodePath, storageprovider.XSMD5, ri) + readChecksumIntoOpaque(ctx, nodePath, storageprovider.XSAdler32, ri) + } + + // quota + if _, ok := mdKeysMap[_quotaKey]; (nodeType == provider.ResourceType_RESOURCE_TYPE_CONTAINER) && returnAllKeys || ok { + var quotaPath string + if n.lu.Options.EnableHome { + if r, err := n.lu.HomeNode(ctx); err == nil { + quotaPath = n.lu.toInternalPath(r.ID) + readQuotaIntoOpaque(ctx, quotaPath, ri) + } else { + sublog.Error().Err(err).Msg("error determining home node for quota") + } + } else { + if r, err := n.lu.RootNode(ctx); err == nil { + quotaPath = n.lu.toInternalPath(r.ID) + readQuotaIntoOpaque(ctx, quotaPath, ri) + } else { + sublog.Error().Err(err).Msg("error determining root node for quota") + } + } + } + + // only read the requested metadata attributes + attrs, err := xattr.List(nodePath) + if err != nil { + sublog.Error().Err(err).Msg("error getting list of extended attributes") + } else { + for i := range attrs { + // filter out non-custom properties + if !strings.HasPrefix(attrs[i], metadataPrefix) { + continue + } + // only read when key was requested + k := attrs[i][len(metadataPrefix):] + if _, ok := mdKeysMap[k]; returnAllKeys || ok { + if val, err := xattr.Get(nodePath, attrs[i]); err == nil { + metadata[k] = string(val) + } else { + sublog.Error().Err(err). + Str("entry", attrs[i]). + Msg("error retrieving xattr metadata") + } + } + + } + } + ri.ArbitraryMetadata = &provider.ArbitraryMetadata{ + Metadata: metadata, + } + + sublog.Debug(). + Interface("ri", ri). + Msg("AsResourceInfo") + + return ri, nil +} + +func readChecksumIntoResourceChecksum(ctx context.Context, nodePath, algo string, ri *provider.ResourceInfo) { + v, err := xattr.Get(nodePath, checksumPrefix+algo) + switch { + case err == nil: + ri.Checksum = &provider.ResourceChecksum{ + Type: storageprovider.PKG2GRPCXS(algo), + Sum: hex.EncodeToString(v), + } + case isNoData(err): + appctx.GetLogger(ctx).Debug().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("checksum not set") + case isNotFound(err): + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("file not fount") + default: + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("could not read checksum") + } +} +func readChecksumIntoOpaque(ctx context.Context, nodePath, algo string, ri *provider.ResourceInfo) { + v, err := xattr.Get(nodePath, checksumPrefix+algo) + switch { + case err == nil: + if ri.Opaque == nil { + ri.Opaque = &types.Opaque{ + Map: map[string]*types.OpaqueEntry{}, + } + } + ri.Opaque.Map[algo] = &types.OpaqueEntry{ + Decoder: "plain", + Value: []byte(hex.EncodeToString(v)), + } + case isNoData(err): + appctx.GetLogger(ctx).Debug().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("checksum not set") + case isNotFound(err): + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("file not fount") + default: + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("algorithm", algo).Msg("could not read checksum") + } +} + +// quota is always stored on the root node +func readQuotaIntoOpaque(ctx context.Context, nodePath string, ri *provider.ResourceInfo) { + v, err := xattr.Get(nodePath, quotaAttr) + switch { + case err == nil: + // make sure we have a proper signed int + // we use the same magic numbers to indicate: + // -1 = uncalculated + // -2 = unknown + // -3 = unlimited + if _, err := strconv.ParseInt(string(v), 10, 64); err == nil { + if ri.Opaque == nil { + ri.Opaque = &types.Opaque{ + Map: map[string]*types.OpaqueEntry{}, + } + } + ri.Opaque.Map[_quotaKey] = &types.OpaqueEntry{ + Decoder: "plain", + Value: v, + } + } else { + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Str("quota", string(v)).Msg("malformed quota") + } + case isNoData(err): + appctx.GetLogger(ctx).Debug().Err(err).Str("nodepath", nodePath).Msg("quota not set") + case isNotFound(err): + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("file not found when reading quota") + default: + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not read quota") + } +} + +// CalculateTreeSize will sum up the size of all children of a node +func (n *Node) CalculateTreeSize(ctx context.Context) (uint64, error) { + var size uint64 + // TODO check if this is a dir? + nodePath := n.lu.toInternalPath(n.ID) + + f, err := os.Open(nodePath) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not open dir") + return 0, err + } + defer f.Close() + + names, err := f.Readdirnames(0) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not read dirnames") + return 0, err + } + for i := range names { + cPath := filepath.Join(nodePath, names[i]) + info, err := os.Stat(cPath) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not stat child entry") + continue // continue after an error + } + if !info.IsDir() { + size += uint64(info.Size()) + } else { + // read from attr + var b []byte + // xattr.Get will follow the symlink + if b, err = xattr.Get(cPath, treesizeAttr); err != nil { + // TODO recursively descend and recalculate treesize + continue // continue after an error + } + csize, err := strconv.ParseUint(string(b), 10, 64) + if err != nil { + // TODO recursively descend and recalculate treesize + continue // continue after an error + } + size += csize + } + } + return size, err + +} + +// HasPropagation checks if the propagation attribute exists and is set to "1" +func (n *Node) HasPropagation() (propagation bool) { + if b, err := xattr.Get(n.lu.toInternalPath(n.ID), propagationAttr); err == nil { + return string(b) == "1" + } + return false +} + +// GetTMTime reads the tmtime from the extended attributes +func (n *Node) GetTMTime() (tmTime time.Time, err error) { + var b []byte + if b, err = xattr.Get(n.lu.toInternalPath(n.ID), treeMTimeAttr); err != nil { + return + } + return time.Parse(time.RFC3339Nano, string(b)) +} + +// SetTMTime writes the tmtime to the extended attributes +func (n *Node) SetTMTime(t time.Time) (err error) { + return xattr.Set(n.lu.toInternalPath(n.ID), treeMTimeAttr, []byte(t.UTC().Format(time.RFC3339Nano))) +} + +// GetTreeSize reads the treesize from the extended attributes +func (n *Node) GetTreeSize() (treesize uint64, err error) { + var b []byte + if b, err = xattr.Get(n.lu.toInternalPath(n.ID), treesizeAttr); err != nil { + return + } + return strconv.ParseUint(string(b), 10, 64) +} + +// SetTreeSize writes the treesize to the extended attributes +func (n *Node) SetTreeSize(ts uint64) (err error) { + return xattr.Set(n.lu.toInternalPath(n.ID), treesizeAttr, []byte(strconv.FormatUint(ts, 10))) +} + +// SetChecksum writes the checksum with the given checksum type to the extended attributes +func (n *Node) SetChecksum(csType string, h hash.Hash) (err error) { + return xattr.Set(n.lu.toInternalPath(n.ID), checksumPrefix+csType, h.Sum(nil)) +} + +// UnsetTempEtag removes the temporary etag attribute +func (n *Node) UnsetTempEtag() (err error) { + if err = xattr.Remove(n.lu.toInternalPath(n.ID), tmpEtagAttr); err != nil { + if e, ok := err.(*xattr.Error); ok && (e.Err.Error() == "no data available" || + // darwin + e.Err.Error() == "attribute not found") { + return nil + } + } + return err +} + +// ReadUserPermissions will assemble the permissions for the current user on the given node without parent nodes +func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap *provider.ResourcePermissions, err error) { + // check if the current user is the owner + o, err := n.Owner() + if err != nil { + // TODO check if a parent folder has the owner set? + appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not determine owner, returning default permissions") + return noPermissions, err + } + if o.OpaqueId == "" { + // this happens for root nodes in the storage. the extended attributes are set to emptystring to indicate: no owner + // TODO what if no owner is set but grants are present? + return noOwnerPermissions, nil + } + if isSameUserID(u.Id, o) { + appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("user is owner, returning owner permissions") + return ownerPermissions, nil + } + + ap = &provider.ResourcePermissions{} + + // for an efficient group lookup convert the list of groups to a map + // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! + groupsMap := make(map[string]bool, len(u.Groups)) + for i := range u.Groups { + groupsMap[u.Groups[i]] = true + } + + var g *provider.Grant + + // we read all grantees from the node + var grantees []string + if grantees, err = n.ListGrantees(ctx); err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("error listing grantees") + return nil, err + } + + // instead of making n getxattr syscalls we are going to list the acls and filter them here + // we have two options here: + // 1. we can start iterating over the acls / grants on the node or + // 2. we can iterate over the number of groups + // The current implementation tries to be defensive for cases where users have hundreds or thousands of groups, so we iterate over the existing acls. + userace := grantPrefix + _userAcePrefix + u.Id.OpaqueId + userFound := false + for i := range grantees { + switch { + // we only need to find the user once + case !userFound && grantees[i] == userace: + g, err = n.ReadGrant(ctx, grantees[i]) + case strings.HasPrefix(grantees[i], grantPrefix+_groupAcePrefix): // only check group grantees + gr := strings.TrimPrefix(grantees[i], grantPrefix+_groupAcePrefix) + if groupsMap[gr] { + g, err = n.ReadGrant(ctx, grantees[i]) + } else { + // no need to check attribute + continue + } + default: + // no need to check attribute + continue + } + + switch { + case err == nil: + addPermissions(ap, g.GetPermissions()) + case isNoData(err): + err = nil + appctx.GetLogger(ctx).Error().Interface("node", n).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing") + // continue with next segment + default: + appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Str("grant", grantees[i]).Msg("error reading permissions") + // continue with next segment + } + } + + appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Interface("node", n).Interface("user", u).Msg("returning aggregated permissions") + return ap, nil +} + +// ListGrantees lists the grantees of the current node +// We don't want to wast time and memory by creating grantee objects. +// The function will return a list of opaque strings that can be used to make a ReadGrant call +func (n *Node) ListGrantees(ctx context.Context) (grantees []string, err error) { + var attrs []string + if attrs, err = xattr.List(n.lu.toInternalPath(n.ID)); err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("error listing attributes") + return nil, err + } + for i := range attrs { + if strings.HasPrefix(attrs[i], grantPrefix) { + grantees = append(grantees, attrs[i]) + } + } + return +} + +// ReadGrant reads a CS3 grant +func (n *Node) ReadGrant(ctx context.Context, grantee string) (g *provider.Grant, err error) { + var b []byte + if b, err = xattr.Get(n.lu.toInternalPath(n.ID), grantee); err != nil { + return nil, err + } + var e *ace.ACE + if e, err = ace.Unmarshal(strings.TrimPrefix(grantee, grantPrefix), b); err != nil { + return nil, err + } + return e.Grant(), nil +} + +func (n *Node) hasUserShares(ctx context.Context) bool { + g, err := n.ListGrantees(ctx) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Msg("hasUserShares: listGrantees") + return false + } + + for i := range g { + if strings.Contains(g[i], grantPrefix+_userAcePrefix) { + return true + } + } + return false +} diff --git a/pkg/storage/fs/cephfs/option.go b/pkg/storage/fs/cephfs/option.go new file mode 100644 index 0000000000..22ac4f9e3d --- /dev/null +++ b/pkg/storage/fs/cephfs/option.go @@ -0,0 +1,104 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +// Option defines a single option function. +type Option func(o *Options) + +// Options defines the available options for this package. +type Options struct { + // ocis fs works on top of a dir of uuid nodes + Root string `mapstructure:"root"` + + // UserLayout describes the relative path from the storage's root node to the users home node. + UserLayout string `mapstructure:"user_layout"` + + // TODO NodeLayout option to save nodes as eg. nodes/1d/d8/1dd84abf-9466-4e14-bb86-02fc4ea3abcf + ShareFolder string `mapstructure:"share_folder"` + + // EnableHome enables the creation of home directories. + EnableHome bool `mapstructure:"enable_home"` + + // propagate mtime changes as tmtime (tree modification time) to the parent directory when user.ocis.propagation=1 is set on a node + TreeTimeAccounting bool `mapstructure:"treetime_accounting"` + + // propagate size changes as treesize + TreeSizeAccounting bool `mapstructure:"treesize_accounting"` + + // set an owner for the root node + Owner string `mapstructure:"owner"` + + // Ceph config to connect + CephConf string `mapstructure:"ceph_conf"` +} + +// newOptions initializes the available default options. +/* for future use, commented to make linter happy +func newOptions(opts ...Option) Options { + opt := Options{} + + for _, o := range opts { + o(&opt) + } + + return opt +} +*/ + +// Root provides a function to set the root option. +func Root(val string) Option { + return func(o *Options) { + o.Root = val + } +} + +// UserLayout provides a function to set the user layout option. +func UserLayout(val string) Option { + return func(o *Options) { + o.UserLayout = val + } +} + +// ShareFolder provides a function to set the ShareFolder option. +func ShareFolder(val string) Option { + return func(o *Options) { + o.ShareFolder = val + } +} + +// EnableHome provides a function to set the EnableHome option. +func EnableHome(val bool) Option { + return func(o *Options) { + o.EnableHome = val + } +} + +// TreeTimeAccounting provides a function to set the TreeTimeAccounting option. +func TreeTimeAccounting(val bool) Option { + return func(o *Options) { + o.TreeTimeAccounting = val + } +} + +// TreeSizeAccounting provides a function to set the TreeSizeAccounting option. +func TreeSizeAccounting(val bool) Option { + return func(o *Options) { + o.TreeSizeAccounting = val + } +} diff --git a/pkg/storage/fs/cephfs/permissions.go b/pkg/storage/fs/cephfs/permissions.go new file mode 100644 index 0000000000..7305d832a3 --- /dev/null +++ b/pkg/storage/fs/cephfs/permissions.go @@ -0,0 +1,275 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + "context" + "strings" + "syscall" + + userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/user" + "github.com/pkg/errors" + "github.com/pkg/xattr" +) + +const ( + _userAcePrefix = "u:" + _groupAcePrefix = "g:" +) + +var noPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ + // no permissions +} + +// permissions for nodes that don't have an owner set, eg the root node +var noOwnerPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ + Stat: true, +} +var ownerPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ + // all permissions + AddGrant: true, + CreateContainer: true, + Delete: true, + GetPath: true, + GetQuota: true, + InitiateFileDownload: true, + InitiateFileUpload: true, + ListContainer: true, + ListFileVersions: true, + ListGrants: true, + ListRecycle: true, + Move: true, + PurgeRecycle: true, + RemoveGrant: true, + RestoreFileVersion: true, + RestoreRecycleItem: true, + Stat: true, + UpdateGrant: true, +} + +// Permissions implements permission checks +type Permissions struct { + lu *Lookup +} + +// AssemblePermissions will assemble the permissions for the current user on the given node, taking into account all parent nodes +func (p *Permissions) AssemblePermissions(ctx context.Context, n *Node) (ap *provider.ResourcePermissions, err error) { + u, ok := user.ContextGetUser(ctx) + if !ok { + appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions") + return noPermissions, nil + } + // check if the current user is the owner + o, err := n.Owner() + if err != nil { + // TODO check if a parent folder has the owner set? + appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not determine owner, returning default permissions") + return noPermissions, err + } + if o.OpaqueId == "" { + // this happens for root nodes in the storage. the extended attributes are set to emptystring to indicate: no owner + // TODO what if no owner is set but grants are present? + return noOwnerPermissions, nil + } + if isSameUserID(u.Id, o) { + appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("user is owner, returning owner permissions") + return ownerPermissions, nil + } + + // determine root + var rn *Node + if rn, err = p.lu.RootNode(ctx); err != nil { + return nil, err + } + + cn := n + + ap = &provider.ResourcePermissions{} + + // for an efficient group lookup convert the list of groups to a map + // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! + groupsMap := make(map[string]bool, len(u.Groups)) + for i := range u.Groups { + groupsMap[u.Groups[i]] = true + } + + // for all segments, starting at the leaf + for cn.ID != rn.ID { + + if np, err := cn.ReadUserPermissions(ctx, u); err == nil { + addPermissions(ap, np) + } else { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", cn).Msg("error reading permissions") + // continue with next segment + } + + if cn, err = cn.Parent(); err != nil { + return ap, errors.Wrap(err, "cephfs: error getting parent "+cn.ParentID) + } + } + + appctx.GetLogger(ctx).Debug().Interface("permissions", ap).Interface("node", n).Interface("user", u).Msg("returning agregated permissions") + return ap, nil +} + +// TODO we should use a bitfield for this ... +func addPermissions(l *provider.ResourcePermissions, r *provider.ResourcePermissions) { + l.AddGrant = l.AddGrant || r.AddGrant + l.CreateContainer = l.CreateContainer || r.CreateContainer + l.Delete = l.Delete || r.Delete + l.GetPath = l.GetPath || r.GetPath + l.GetQuota = l.GetQuota || r.GetQuota + l.InitiateFileDownload = l.InitiateFileDownload || r.InitiateFileDownload + l.InitiateFileUpload = l.InitiateFileUpload || r.InitiateFileUpload + l.ListContainer = l.ListContainer || r.ListContainer + l.ListFileVersions = l.ListFileVersions || r.ListFileVersions + l.ListGrants = l.ListGrants || r.ListGrants + l.ListRecycle = l.ListRecycle || r.ListRecycle + l.Move = l.Move || r.Move + l.PurgeRecycle = l.PurgeRecycle || r.PurgeRecycle + l.RemoveGrant = l.RemoveGrant || r.RemoveGrant + l.RestoreFileVersion = l.RestoreFileVersion || r.RestoreFileVersion + l.RestoreRecycleItem = l.RestoreRecycleItem || r.RestoreRecycleItem + l.Stat = l.Stat || r.Stat + l.UpdateGrant = l.UpdateGrant || r.UpdateGrant +} + +// HasPermission call check() for every node up to the root until check returns true +func (p *Permissions) HasPermission(ctx context.Context, n *Node, check func(*provider.ResourcePermissions) bool) (can bool, err error) { + + var u *userv1beta1.User + var perms *provider.ResourcePermissions + if u, perms = p.getUserAndPermissions(ctx, n); perms != nil { + return check(perms), nil + } + + // determine root + var rn *Node + if rn, err = p.lu.RootNode(ctx); err != nil { + return false, err + } + + cn := n + + // for an efficient group lookup convert the list of groups to a map + // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! + groupsMap := make(map[string]bool, len(u.Groups)) + for i := range u.Groups { + groupsMap[u.Groups[i]] = true + } + + var g *provider.Grant + // for all segments, starting at the leaf + for cn.ID != rn.ID { + + var grantees []string + if grantees, err = cn.ListGrantees(ctx); err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", cn).Msg("error listing grantees") + return false, err + } + + userace := grantPrefix + _userAcePrefix + u.Id.OpaqueId + userFound := false + for i := range grantees { + // we only need the find the user once per node + switch { + case !userFound && grantees[i] == userace: + g, err = cn.ReadGrant(ctx, grantees[i]) + case strings.HasPrefix(grantees[i], grantPrefix+_groupAcePrefix): + gr := strings.TrimPrefix(grantees[i], grantPrefix+_groupAcePrefix) + if groupsMap[gr] { + g, err = cn.ReadGrant(ctx, grantees[i]) + } else { + // no need to check attribute + continue + } + default: + // no need to check attribute + continue + } + + switch { + case err == nil: + appctx.GetLogger(ctx).Debug().Interface("node", cn).Str("grant", grantees[i]).Interface("permissions", g.GetPermissions()).Msg("checking permissions") + if check(g.GetPermissions()) { + return true, nil + } + case isNoData(err): + err = nil + appctx.GetLogger(ctx).Error().Interface("node", cn).Str("grant", grantees[i]).Interface("grantees", grantees).Msg("grant vanished from node after listing") + default: + appctx.GetLogger(ctx).Error().Err(err).Interface("node", cn).Str("grant", grantees[i]).Msg("error reading permissions") + return false, err + } + } + + if cn, err = cn.Parent(); err != nil { + return false, errors.Wrap(err, "cephfs: error getting parent "+cn.ParentID) + } + } + + appctx.GetLogger(ctx).Debug().Interface("permissions", noPermissions).Interface("node", n).Interface("user", u).Msg("no grant found, returning default permissions") + return false, nil +} + +func (p *Permissions) getUserAndPermissions(ctx context.Context, n *Node) (*userv1beta1.User, *provider.ResourcePermissions) { + u, ok := user.ContextGetUser(ctx) + if !ok { + appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("no user in context, returning default permissions") + return nil, noPermissions + } + // check if the current user is the owner + o, err := n.Owner() + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not determine owner, returning default permissions") + return nil, noPermissions + } + if o.OpaqueId == "" { + // this happens for root nodes in the storage. the extended attributes are set to emptystring to indicate: no owner + // TODO what if no owner is set but grants are present? + return nil, noOwnerPermissions + } + if isSameUserID(u.Id, o) { + appctx.GetLogger(ctx).Debug().Interface("node", n).Msg("user is owner, returning owner permissions") + return u, ownerPermissions + } + return u, nil +} +func isNoData(err error) bool { + if xerr, ok := err.(*xattr.Error); ok { + if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { + return serr == syscall.ENODATA + } + } + return false +} + +// The os not exists error is buried inside the xattr error, +// so we cannot just use os.IsNotExists(). +func isNotFound(err error) bool { + if xerr, ok := err.(*xattr.Error); ok { + if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { + return serr == syscall.ENOENT + } + } + return false +} diff --git a/pkg/storage/fs/cephfs/recycle.go b/pkg/storage/fs/cephfs/recycle.go new file mode 100644 index 0000000000..9b3de0d726 --- /dev/null +++ b/pkg/storage/fs/cephfs/recycle.go @@ -0,0 +1,261 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + "context" + "os" + "path/filepath" + "strings" + "time" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/user" + "github.com/pkg/errors" + "github.com/pkg/xattr" +) + +// Recycle items are stored inside the node folder and start with the uuid of the deleted node. +// The `.T.` indicates it is a trash item and what follows is the timestamp of the deletion. +// The deleted file is kept in the same location/dir as the original node. This prevents deletes +// from triggering cross storage moves when the trash is accidentally stored on another partition, +// because the admin mounted a different partition there. +// TODO For an efficient listing of deleted nodes the ocis storages trash folder should have +// contain a directory with symlinks to trash files for every userid/"root" + +func (fs *cephfs) ListRecycle(ctx context.Context) (items []*provider.RecycleItem, err error) { + log := appctx.GetLogger(ctx) + + trashRoot := fs.getRecycleRoot(ctx) + + items = make([]*provider.RecycleItem, 0) + + // TODO how do we check if the storage allows listing the recycle for the current user? check owner of the root of the storage? + // use permissions ReadUserPermissions? + if fs.o.EnableHome { + if !ownerPermissions.ListContainer { + log.Debug().Msg("owner not allowed to list trash") + return items, errtypes.PermissionDenied("owner not allowed to list trash") + } + } else { + if !noPermissions.ListContainer { + log.Debug().Msg("default permissions prevent listing trash") + return items, errtypes.PermissionDenied("default permissions prevent listing trash") + } + } + + f, err := os.Open(trashRoot) + if err != nil { + if os.IsNotExist(err) { + return items, nil + } + return nil, errors.Wrap(err, "tree: error listing "+trashRoot) + } + defer f.Close() + + names, err := f.Readdirnames(0) + if err != nil { + return nil, err + } + for i := range names { + var trashnode string + trashnode, err = os.Readlink(filepath.Join(trashRoot, names[i])) + if err != nil { + log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Msg("error reading trash link, skipping") + err = nil + continue + } + parts := strings.SplitN(filepath.Base(trashnode), ".T.", 2) + if len(parts) != 2 { + log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("trashnode", trashnode).Interface("parts", parts).Msg("malformed trash link, skipping") + continue + } + + nodePath := fs.lu.toInternalPath(filepath.Base(trashnode)) + md, err := os.Stat(nodePath) + if err != nil { + log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("trashnode", trashnode).Interface("parts", parts).Msg("could not stat trash item, skipping") + continue + } + + item := &provider.RecycleItem{ + Type: getResourceType(md.IsDir()), + Size: uint64(md.Size()), + Key: filepath.Base(trashRoot) + ":" + parts[0], // glue using :, a / is interpreted as a path and only the node id will reach the other methods + } + if deletionTime, err := time.Parse(time.RFC3339Nano, parts[1]); err == nil { + item.DeletionTime = &types.Timestamp{ + Seconds: uint64(deletionTime.Unix()), + // TODO nanos + } + } else { + log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Interface("parts", parts).Msg("could parse time format, ignoring") + } + + // lookup origin path in extended attributes + var attrBytes []byte + if attrBytes, err = xattr.Get(nodePath, trashOriginAttr); err == nil { + item.Path = string(attrBytes) + } else { + log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Msg("could not read origin path, skipping") + continue + } + // TODO filter results by permission ... on the original parent? or the trashed node? + // if it were on the original parent it would be possible to see files that were trashed before the current user got access + // so -> check the trash node itself + // hmm listing trash currently lists the current users trash or the 'root' trash. from ocs only the home storage is queried for trash items. + // for now we can only really check if the current user is the owner + if attrBytes, err = xattr.Get(nodePath, ownerIDAttr); err == nil { + if fs.o.EnableHome { + u := user.ContextMustGetUser(ctx) + if u.Id.OpaqueId != string(attrBytes) { + log.Warn().Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Msg("trash item not owned by current user, skipping") + continue + } + } + } else { + log.Error().Err(err).Str("trashRoot", trashRoot).Str("name", names[i]).Str("link", trashnode).Msg("could not read owner, skipping") + continue + } + + items = append(items, item) + } + return +} + +func (fs *cephfs) RestoreRecycleItem(ctx context.Context, key string) (err error) { + log := appctx.GetLogger(ctx) + + var rn *Node + var trashItem string + var deletedNodePath string + var origin string + if rn, trashItem, deletedNodePath, origin, err = ReadRecycleItem(ctx, fs.lu, key); err != nil { + return + } + + // check permissions of deleted node + ok, err := fs.p.HasPermission(ctx, rn, func(rp *provider.ResourcePermissions) bool { + return rp.RestoreRecycleItem + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(key) + } + + // link to origin + var n *Node + n, err = fs.lu.NodeFromPath(ctx, origin) + if err != nil { + return + } + + if n.Exists { + return errtypes.AlreadyExists("origin already exists") + } + + // add the entry for the parent dir + err = os.Symlink("../"+rn.ID, filepath.Join(fs.lu.toInternalPath(n.ParentID), n.Name)) + if err != nil { + return + } + + // rename to node only name, so it is picked up by id + nodePath := fs.lu.toInternalPath(rn.ID) + err = os.Rename(deletedNodePath, nodePath) + if err != nil { + return + } + + n.Exists = true + + // delete item link in trash + if err = os.Remove(trashItem); err != nil { + log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trashitem") + } + return fs.tp.Propagate(ctx, n) + +} + +func (fs *cephfs) PurgeRecycleItem(ctx context.Context, key string) (err error) { + log := appctx.GetLogger(ctx) + + var rn *Node + var trashItem string + var deletedNodePath string + if rn, trashItem, deletedNodePath, _, err = ReadRecycleItem(ctx, fs.lu, key); err != nil { + return + } + + // check permissions of deleted node + ok, err := fs.p.HasPermission(ctx, rn, func(rp *provider.ResourcePermissions) bool { + return rp.PurgeRecycle + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(key) + } + + if err = os.RemoveAll(deletedNodePath); err != nil { + log.Error().Err(err).Str("deletedNodePath", deletedNodePath).Msg("error deleting trash node") + return + } + + // delete item link in trash + if err = os.Remove(trashItem); err != nil { + log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item") + } + // TODO recursively delete all children + return +} + +func (fs *cephfs) EmptyRecycle(ctx context.Context) error { + u, ok := user.ContextGetUser(ctx) + // TODO what permission should we check? we could check the root node of the user? or the owner permissions on his home root node? + // The current impl will wipe your own trash. or when no user provided the trash of 'root' + if !ok { + return os.RemoveAll(fs.getRecycleRoot(ctx)) + } + + // TODO use layout, see Tree.Delete() for problem + return os.RemoveAll(filepath.Join(fs.o.Root, "trash", u.Id.OpaqueId)) +} + +func getResourceType(isDir bool) provider.ResourceType { + if isDir { + return provider.ResourceType_RESOURCE_TYPE_CONTAINER + } + return provider.ResourceType_RESOURCE_TYPE_FILE +} + +func (fs *cephfs) getRecycleRoot(ctx context.Context) string { + if fs.o.EnableHome { + u := user.ContextMustGetUser(ctx) + // TODO use layout, see Tree.Delete() for problem + return filepath.Join(fs.o.Root, "trash", u.Id.OpaqueId) + } + return filepath.Join(fs.o.Root, "trash", "root") +} diff --git a/pkg/storage/fs/cephfs/revisions.go b/pkg/storage/fs/cephfs/revisions.go new file mode 100644 index 0000000000..03a69c05e5 --- /dev/null +++ b/pkg/storage/fs/cephfs/revisions.go @@ -0,0 +1,191 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + "context" + "io" + "os" + "path/filepath" + "strings" + "time" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/pkg/errors" +) + +// Revision entries are stored inside the node folder and start with the same uuid as the current version. +// The `.REV.` indicates it is a revision and what follows is a timestamp, so multiple versions +// can be kept in the same location as the current file content. This prevents new fileuploads +// to trigger cross storage moves when revisions accidentally are stored on another partition, +// because the admin mounted a different partition there. +// We can add a background process to move old revisions to a slower storage +// and replace the revision file with a symbolic link in the future, if necessary. + +func (fs *cephfs) ListRevisions(ctx context.Context, ref *provider.Reference) (revisions []*provider.FileVersion, err error) { + var n *Node + if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return + } + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return + } + + ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { + return rp.ListFileVersions + }) + switch { + case err != nil: + return nil, errtypes.InternalError(err.Error()) + case !ok: + return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) + } + + revisions = []*provider.FileVersion{} + np := fs.lu.toInternalPath(n.ID) + if items, err := filepath.Glob(np + ".REV.*"); err == nil { + for i := range items { + if fi, err := os.Stat(items[i]); err == nil { + rev := &provider.FileVersion{ + Key: filepath.Base(items[i]), + Size: uint64(fi.Size()), + Mtime: uint64(fi.ModTime().Unix()), + } + revisions = append(revisions, rev) + } + } + } + return +} + +func (fs *cephfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { + log := appctx.GetLogger(ctx) + + // verify revision key format + kp := strings.SplitN(revisionKey, ".REV.", 2) + if len(kp) != 2 { + log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") + return nil, errtypes.NotFound(revisionKey) + } + log.Debug().Str("revisionKey", revisionKey).Msg("DownloadRevision") + + // check if the node is available and has not been deleted + n, err := ReadNode(ctx, fs.lu, kp[0]) + if err != nil { + return nil, err + } + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return nil, err + } + + ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { + // TODO add explicit permission in the CS3 api? + return rp.ListFileVersions && rp.RestoreFileVersion && rp.InitiateFileDownload + }) + switch { + case err != nil: + return nil, errtypes.InternalError(err.Error()) + case !ok: + return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) + } + + contentPath := fs.lu.toInternalPath(revisionKey) + + r, err := os.Open(contentPath) + if err != nil { + if os.IsNotExist(err) { + return nil, errtypes.NotFound(contentPath) + } + return nil, errors.Wrap(err, "cephfs: error opening revision "+revisionKey) + } + return r, nil +} + +func (fs *cephfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (err error) { + log := appctx.GetLogger(ctx) + + // verify revision key format + kp := strings.SplitN(revisionKey, ".REV.", 2) + if len(kp) != 2 { + log.Error().Str("revisionKey", revisionKey).Msg("malformed revisionKey") + return errtypes.NotFound(revisionKey) + } + + // check if the node is available and has not been deleted + n, err := ReadNode(ctx, fs.lu, kp[0]) + if err != nil { + return err + } + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return err + } + + ok, err := fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { + return rp.RestoreFileVersion + }) + switch { + case err != nil: + return errtypes.InternalError(err.Error()) + case !ok: + return errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) + } + + // move current version to new revision + nodePath := fs.lu.toInternalPath(kp[0]) + var fi os.FileInfo + if fi, err = os.Stat(nodePath); err == nil { + // versions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries + versionsPath := fs.lu.toInternalPath(kp[0] + ".REV." + fi.ModTime().UTC().Format(time.RFC3339Nano)) + + err = os.Rename(nodePath, versionsPath) + if err != nil { + return + } + + // copy old revision to current location + + revisionPath := fs.lu.toInternalPath(revisionKey) + var revision, destination *os.File + revision, err = os.Open(revisionPath) + if err != nil { + return + } + defer revision.Close() + + destination, err = os.OpenFile(nodePath, os.O_CREATE|os.O_WRONLY, defaultFilePerm) + if err != nil { + return + } + defer destination.Close() + _, err = io.Copy(destination, revision) + if err != nil { + return + } + + return fs.copyMD(revisionPath, nodePath) + } + + log.Error().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("original node does not exist") + return +} diff --git a/pkg/storage/fs/cephfs/tree.go b/pkg/storage/fs/cephfs/tree.go new file mode 100644 index 0000000000..1b696fea61 --- /dev/null +++ b/pkg/storage/fs/cephfs/tree.go @@ -0,0 +1,425 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + "context" + cephfs2 "github.com/ceph/go-ceph/cephfs" + "os" + "path/filepath" + "time" + + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/pkg/xattr" +) + +// Tree manages a hierarchical tree +type Tree struct { + lu *Lookup +} + +// NewTree creates a new Tree instance +func NewTree(lu *Lookup) (TreePersistence, error) { + return &Tree{ + lu: lu, + }, nil +} + +// GetMD returns the metadata of a node in the tree +func (t *Tree) GetMD(ctx context.Context, node *Node) (os.FileInfo, error) { + md, err := os.Stat(t.lu.toInternalPath(node.ID)) + if err != nil { + if os.IsNotExist(err) { + return nil, errtypes.NotFound(node.ID) + } + return nil, errors.Wrap(err, "tree: error stating "+node.ID) + } + + return md, nil +} + +// GetPathByID returns the fn pointed by the file id, without the internal namespace +func (t *Tree) GetPathByID(ctx context.Context, id *provider.ResourceId) (relativeExternalPath string, err error) { + var node *Node + node, err = t.lu.NodeFromID(ctx, id) + if err != nil { + return + } + + relativeExternalPath, err = t.lu.Path(ctx, node) + return +} + +// does not take care of linking back to parent +// TODO check if node exists? +func createNode(n *Node, owner *userpb.UserId) (err error) { + // create a directory node + nodePath := n.lu.toInternalPath(n.ID) + var mount, error = cephfs2.CreateMount() + mount.MakeDir(nodePath, 0700) + if err = os.MkdirAll(nodePath, 0700); err != nil { + return errors.Wrap(err, "cephfs: error creating node") + } + + return n.writeMetadata(owner) +} + +// CreateDir creates a new directory entry in the tree +func (t *Tree) CreateDir(ctx context.Context, node *Node) (err error) { + + if node.Exists || node.ID != "" { + return errtypes.AlreadyExists(node.ID) // path? + } + + // create a directory node + node.ID = uuid.New().String() + + // who will become the owner? the owner of the parent node, not the current user + var p *Node + p, err = node.Parent() + if err != nil { + return + } + var owner *userpb.UserId + owner, err = p.Owner() + if err != nil { + return + } + + err = createNode(node, owner) + if err != nil { + return nil + } + + // make child appear in listings + err = os.Symlink("../"+node.ID, filepath.Join(t.lu.toInternalPath(node.ParentID), node.Name)) + if err != nil { + return + } + return t.Propagate(ctx, node) +} + +// Move replaces the target with the source +func (t *Tree) Move(ctx context.Context, oldNode *Node, newNode *Node) (err error) { + // if target exists delete it without trashing it + if newNode.Exists { + // TODO make sure all children are deleted + if err := os.RemoveAll(t.lu.toInternalPath(newNode.ID)); err != nil { + return errors.Wrap(err, "cephfs: Move: error deleting target node "+newNode.ID) + } + } + + // Always target the old node ID for xattr updates. + // The new node id is empty if the target does not exist + // and we need to overwrite the new one when overwriting an existing path. + tgtPath := t.lu.toInternalPath(oldNode.ID) + + // are we just renaming (parent stays the same)? + if oldNode.ParentID == newNode.ParentID { + + parentPath := t.lu.toInternalPath(oldNode.ParentID) + + // rename child + err = os.Rename( + filepath.Join(parentPath, oldNode.Name), + filepath.Join(parentPath, newNode.Name), + ) + if err != nil { + return errors.Wrap(err, "cephfs: could not rename child") + } + + // update name attribute + if err := xattr.Set(tgtPath, nameAttr, []byte(newNode.Name)); err != nil { + return errors.Wrap(err, "cephfs: could not set name attribute") + } + + return t.Propagate(ctx, newNode) + } + + // we are moving the node to a new parent, any target has been removed + // bring old node to the new parent + + // rename child + err = os.Rename( + filepath.Join(t.lu.toInternalPath(oldNode.ParentID), oldNode.Name), + filepath.Join(t.lu.toInternalPath(newNode.ParentID), newNode.Name), + ) + if err != nil { + return errors.Wrap(err, "cephfs: could not move child") + } + + // update target parentid and name + if err := xattr.Set(tgtPath, parentidAttr, []byte(newNode.ParentID)); err != nil { + return errors.Wrap(err, "cephfs: could not set parentid attribute") + } + if err := xattr.Set(tgtPath, nameAttr, []byte(newNode.Name)); err != nil { + return errors.Wrap(err, "cephfs: could not set name attribute") + } + + // TODO inefficient because we might update several nodes twice, only propagate unchanged nodes? + // collect in a list, then only stat each node once + // also do this in a go routine ... webdav should check the etag async + + err = t.Propagate(ctx, oldNode) + if err != nil { + return errors.Wrap(err, "cephfs: Move: could not propagate old node") + } + err = t.Propagate(ctx, newNode) + if err != nil { + return errors.Wrap(err, "cephfs: Move: could not propagate new node") + } + return nil +} + +// ListFolder lists the content of a folder node +func (t *Tree) ListFolder(ctx context.Context, node *Node) ([]*Node, error) { + dir := t.lu.toInternalPath(node.ID) + f, err := os.Open(dir) + if err != nil { + if os.IsNotExist(err) { + return nil, errtypes.NotFound(dir) + } + return nil, errors.Wrap(err, "tree: error listing "+dir) + } + defer f.Close() + + names, err := f.Readdirnames(0) + if err != nil { + return nil, err + } + nodes := []*Node{} + for i := range names { + link, err := os.Readlink(filepath.Join(dir, names[i])) + if err != nil { + // TODO log + continue + } + n := &Node{ + lu: t.lu, + ParentID: node.ID, + ID: filepath.Base(link), + Name: names[i], + Exists: true, // TODO + } + + nodes = append(nodes, n) + } + return nodes, nil +} + +// Delete deletes a node in the tree +func (t *Tree) Delete(ctx context.Context, n *Node) (err error) { + + // Prepare the trash + // TODO use layout?, but it requires resolving the owners user if the username is used instead of the id. + // the node knows the owner id so we use that for now + o, err := n.Owner() + if err != nil { + return + } + if o.OpaqueId == "" { + // fall back to root trash + o.OpaqueId = "root" + } + err = os.MkdirAll(filepath.Join(t.lu.Options.Root, "trash", o.OpaqueId), 0700) + if err != nil { + return + } + + // get the original path + origin, err := t.lu.Path(ctx, n) + if err != nil { + return + } + + // set origin location in metadata + nodePath := t.lu.toInternalPath(n.ID) + if err := xattr.Set(nodePath, trashOriginAttr, []byte(origin)); err != nil { + return err + } + + deletionTime := time.Now().UTC().Format(time.RFC3339Nano) + + // first make node appear in the owners (or root) trash + // parent id and name are stored as extended attributes in the node itself + trashLink := filepath.Join(t.lu.Options.Root, "trash", o.OpaqueId, n.ID) + err = os.Symlink("../../nodes/"+n.ID+".T."+deletionTime, trashLink) + if err != nil { + // To roll back changes + // TODO unset trashOriginAttr + return + } + + // at this point we have a symlink pointing to a non existing destination, which is fine + + // rename the trashed node so it is not picked up when traversing up the tree and matches the symlink + trashPath := nodePath + ".T." + deletionTime + err = os.Rename(nodePath, trashPath) + if err != nil { + // To roll back changes + // TODO remove symlink + // TODO unset trashOriginAttr + return + } + + // finally remove the entry from the parent dir + src := filepath.Join(t.lu.toInternalPath(n.ParentID), n.Name) + err = os.Remove(src) + if err != nil { + // To roll back changes + // TODO revert the rename + // TODO remove symlink + // TODO unset trashOriginAttr + return + } + + p, err := n.Parent() + if err != nil { + return errors.Wrap(err, "cephfs: error getting parent "+n.ParentID) + } + return t.Propagate(ctx, p) +} + +// Propagate propagates changes to the root of the tree +func (t *Tree) Propagate(ctx context.Context, n *Node) (err error) { + sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() + if !t.lu.Options.TreeTimeAccounting && !t.lu.Options.TreeSizeAccounting { + // no propagation enabled + sublog.Debug().Msg("propagation disabled") + return + } + + // is propagation enabled for the parent node? + + var root *Node + if root, err = t.lu.HomeOrRootNode(ctx); err != nil { + return + } + + // use a sync time and don't rely on the mtime of the current node, as the stat might not change when a rename happened too quickly + sTime := time.Now().UTC() + + // we loop until we reach the root + for err == nil && n.ID != root.ID { + sublog.Debug().Msg("propagating") + + // make n the parent or break the loop + if n, err = n.Parent(); err != nil { + break + } + + sublog = sublog.With().Interface("node", n).Logger() + + // TODO none, sync and async? + if !n.HasPropagation() { + sublog.Debug().Str("attr", propagationAttr).Msg("propagation attribute not set or unreadable, not propagating") + // if the attribute is not set treat it as false / none / no propagation + return nil + } + + if t.lu.Options.TreeTimeAccounting { + // update the parent tree time if it is older than the nodes mtime + updateSyncTime := false + + var tmTime time.Time + tmTime, err = n.GetTMTime() + switch { + case err != nil: + // missing attribute, or invalid format, overwrite + sublog.Debug().Err(err).Msg("could not read tmtime attribute, overwriting") + updateSyncTime = true + case tmTime.Before(sTime): + sublog.Debug(). + Time("tmtime", tmTime). + Time("stime", sTime). + Msg("parent tmtime is older than node mtime, updating") + updateSyncTime = true + default: + sublog.Debug(). + Time("tmtime", tmTime). + Time("stime", sTime). + Dur("delta", sTime.Sub(tmTime)). + Msg("parent tmtime is younger than node mtime, not updating") + } + + if updateSyncTime { + // update the tree time of the parent node + if err = n.SetTMTime(sTime); err != nil { + sublog.Error().Err(err).Time("tmtime", sTime).Msg("could not update tmtime of parent node") + } else { + sublog.Debug().Time("tmtime", sTime).Msg("updated tmtime of parent node") + } + } + + if err := n.UnsetTempEtag(); err != nil { + sublog.Error().Err(err).Msg("could not remove temporary etag attribute") + } + + } + + // size accounting + if t.lu.Options.TreeSizeAccounting { + // update the treesize if it differs from the current size + updateTreeSize := false + + var treeSize, calculatedTreeSize uint64 + calculatedTreeSize, err = n.CalculateTreeSize(ctx) + if err != nil { + continue + } + + treeSize, err = n.GetTreeSize() + switch { + case err != nil: + // missing attribute, or invalid format, overwrite + sublog.Debug().Err(err).Msg("could not read treesize attribute, overwriting") + updateTreeSize = true + case treeSize != calculatedTreeSize: + sublog.Debug(). + Uint64("treesize", treeSize). + Uint64("calculatedTreeSize", calculatedTreeSize). + Msg("parent treesize is different then calculated treesize, updating") + updateTreeSize = true + default: + sublog.Debug(). + Uint64("treesize", treeSize). + Uint64("calculatedTreeSize", calculatedTreeSize). + Msg("parent size matches calculated size, not updating") + } + + if updateTreeSize { + // update the tree time of the parent node + if err = n.SetTreeSize(calculatedTreeSize); err != nil { + sublog.Error().Err(err).Uint64("calculatedTreeSize", calculatedTreeSize).Msg("could not update treesize of parent node") + } else { + sublog.Debug().Uint64("calculatedTreeSize", calculatedTreeSize).Msg("updated treesize of parent node") + } + } + } + } + if err != nil { + sublog.Error().Err(err).Msg("error propagating") + } + return +} diff --git a/pkg/storage/fs/cephfs/upload.go b/pkg/storage/fs/cephfs/upload.go new file mode 100644 index 0000000000..95bf9db09e --- /dev/null +++ b/pkg/storage/fs/cephfs/upload.go @@ -0,0 +1,655 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package cephfs + +import ( + "context" + "crypto/md5" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/adler32" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/logger" + "github.com/cs3org/reva/pkg/storage/utils/chunking" + "github.com/cs3org/reva/pkg/user" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/rs/zerolog" + tusd "github.com/tus/tusd/pkg/handler" +) + +var defaultFilePerm = os.FileMode(0664) + +// TODO Upload (and InitiateUpload) needs a way to receive the expected checksum. +// Maybe in metadata as 'checksum' => 'sha1 aeosvp45w5xaeoe' = lowercase, space separated? +func (fs *cephfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) (err error) { + upload, err := fs.GetUpload(ctx, ref.GetPath()) + if err != nil { + // Upload corresponding to this ID was not found. + // Assume that this corresponds to the resource path to which the file has to be uploaded. + + // Set the length to 0 and set SizeIsDeferred to true + metadata := map[string]string{"sizedeferred": "true"} + uploadIDs, err := fs.InitiateUpload(ctx, ref, 0, metadata) + if err != nil { + return err + } + if upload, err = fs.GetUpload(ctx, uploadIDs["simple"]); err != nil { + return errors.Wrap(err, "cephfs: error retrieving upload") + } + } + + uploadInfo := upload.(*fileUpload) + + p := uploadInfo.info.Storage["NodeName"] + ok, err := chunking.IsChunked(p) // check chunking v1 + if err != nil { + return errors.Wrap(err, "cephfs: error checking path") + } + if ok { + var assembledFile string + p, assembledFile, err = fs.chunkHandler.WriteChunk(p, r) + if err != nil { + return err + } + if p == "" { + if err = uploadInfo.Terminate(ctx); err != nil { + return errors.Wrap(err, "ocfs: error removing auxiliary files") + } + return errtypes.PartialContent(ref.String()) + } + uploadInfo.info.Storage["NodeName"] = p + fd, err := os.Open(assembledFile) + if err != nil { + return errors.Wrap(err, "cephfs: error opening assembled file") + } + defer fd.Close() + defer os.RemoveAll(assembledFile) + r = fd + } + + if _, err := uploadInfo.WriteChunk(ctx, 0, r); err != nil { + return errors.Wrap(err, "cephfs: error writing to binary file") + } + + return uploadInfo.FinishUpload(ctx) +} + +// InitiateUpload returns upload ids corresponding to different protocols it supports +// TODO read optional content for small files in this request +// TODO InitiateUpload (and Upload) needs a way to receive the expected checksum. Maybe in metadata as 'checksum' => 'sha1 aeosvp45w5xaeoe' = lowercase, space separated? +func (fs *cephfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { + + log := appctx.GetLogger(ctx) + + var relative string // the internal path of the file node + + n, err := fs.lu.NodeFromResource(ctx, ref) + if err != nil { + return nil, err + } + + // permissions are checked in NewUpload below + + relative, err = fs.lu.Path(ctx, n) + if err != nil { + return nil, err + } + + info := tusd.FileInfo{ + MetaData: tusd.MetaData{ + "filename": filepath.Base(relative), + "dir": filepath.Dir(relative), + }, + Size: uploadLength, + } + + if metadata != nil { + if metadata["mtime"] != "" { + info.MetaData["mtime"] = metadata["mtime"] + } + if _, ok := metadata["sizedeferred"]; ok { + info.SizeIsDeferred = true + } + if metadata["checksum"] != "" { + parts := strings.SplitN(metadata["checksum"], " ", 2) + if len(parts) != 2 { + return nil, errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'") + } + switch parts[0] { + case "sha1", "md5", "adler32": + info.MetaData["checksum"] = metadata["checksum"] + default: + return nil, errtypes.BadRequest("unsupported checksum algorithm: " + parts[0]) + } + } + } + + log.Debug().Interface("info", info).Interface("node", n).Interface("metadata", metadata).Msg("cephfs: resolved filename") + + upload, err := fs.NewUpload(ctx, info) + if err != nil { + return nil, err + } + + info, _ = upload.GetInfo(ctx) + + return map[string]string{ + "simple": info.ID, + "tus": info.ID, + }, nil +} + +// UseIn tells the tus upload middleware which extensions it supports. +func (fs *cephfs) UseIn(composer *tusd.StoreComposer) { + composer.UseCore(fs) + composer.UseTerminater(fs) + composer.UseConcater(fs) + composer.UseLengthDeferrer(fs) +} + +// To implement the core tus.io protocol as specified in https://tus.io/protocols/resumable-upload.html#core-protocol +// - the storage needs to implement NewUpload and GetUpload +// - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload + +func (fs *cephfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { + + log := appctx.GetLogger(ctx) + log.Debug().Interface("info", info).Msg("cephfs: NewUpload") + + fn := info.MetaData["filename"] + if fn == "" { + return nil, errors.New("cephfs: missing filename in metadata") + } + info.MetaData["filename"] = filepath.Clean(info.MetaData["filename"]) + + dir := info.MetaData["dir"] + if dir == "" { + return nil, errors.New("cephfs: missing dir in metadata") + } + info.MetaData["dir"] = filepath.Clean(info.MetaData["dir"]) + + n, err := fs.lu.NodeFromPath(ctx, filepath.Join(info.MetaData["dir"], info.MetaData["filename"])) + if err != nil { + return nil, errors.Wrap(err, "cephfs: error wrapping filename") + } + + log.Debug().Interface("info", info).Interface("node", n).Msg("cephfs: resolved filename") + + // the parent owner will become the new owner + p, perr := n.Parent() + if perr != nil { + return nil, errors.Wrap(perr, "cephfs: error getting parent "+n.ParentID) + } + + // check permissions + var ok bool + if n.Exists { + // check permissions of file to be overwritten + ok, err = fs.p.HasPermission(ctx, n, func(rp *provider.ResourcePermissions) bool { + return rp.InitiateFileUpload + }) + } else { + // check permissions of parent + ok, err = fs.p.HasPermission(ctx, p, func(rp *provider.ResourcePermissions) bool { + return rp.InitiateFileUpload + }) + } + switch { + case err != nil: + return nil, errtypes.InternalError(err.Error()) + case !ok: + return nil, errtypes.PermissionDenied(filepath.Join(n.ParentID, n.Name)) + } + + info.ID = uuid.New().String() + + binPath, err := fs.getUploadPath(ctx, info.ID) + if err != nil { + return nil, errors.Wrap(err, "cephfs: error resolving upload path") + } + usr := user.ContextMustGetUser(ctx) + + owner, err := p.Owner() + if err != nil { + return nil, errors.Wrap(err, "cephfs: error determining owner") + } + + info.Storage = map[string]string{ + "Type": "OCISStore", + "BinPath": binPath, + + "NodeId": n.ID, + "NodeParentId": n.ParentID, + "NodeName": n.Name, + + "Idp": usr.Id.Idp, + "UserId": usr.Id.OpaqueId, + "UserName": usr.Username, + + "OwnerIdp": owner.Idp, + "OwnerId": owner.OpaqueId, + + "LogLevel": log.GetLevel().String(), + } + // Create binary file in the upload folder with no content + log.Debug().Interface("info", info).Msg("cephfs: built storage info") + file, err := os.OpenFile(binPath, os.O_CREATE|os.O_WRONLY, defaultFilePerm) + if err != nil { + return nil, err + } + defer file.Close() + + u := &fileUpload{ + info: info, + binPath: binPath, + infoPath: filepath.Join(fs.o.Root, "uploads", info.ID+".info"), + fs: fs, + ctx: ctx, + } + + if !info.SizeIsDeferred && info.Size == 0 { + log.Debug().Interface("info", info).Msg("cephfs: finishing upload for empty file") + // no need to create info file and finish directly + err := u.FinishUpload(ctx) + if err != nil { + return nil, err + } + return u, nil + } + + // writeInfo creates the file by itself if necessary + err = u.writeInfo() + if err != nil { + return nil, err + } + + return u, nil +} + +func (fs *cephfs) getUploadPath(ctx context.Context, uploadID string) (string, error) { + return filepath.Join(fs.o.Root, "uploads", uploadID), nil +} + +// GetUpload returns the Upload for the given upload id +func (fs *cephfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { + infoPath := filepath.Join(fs.o.Root, "uploads", id+".info") + + info := tusd.FileInfo{} + data, err := ioutil.ReadFile(infoPath) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, &info); err != nil { + return nil, err + } + + stat, err := os.Stat(info.Storage["BinPath"]) + if err != nil { + return nil, err + } + + info.Offset = stat.Size() + + u := &userpb.User{ + Id: &userpb.UserId{ + Idp: info.Storage["Idp"], + OpaqueId: info.Storage["UserId"], + }, + Username: info.Storage["UserName"], + } + + ctx = user.ContextSetUser(ctx, u) + // TODO configure the logger the same way ... store and add traceid in file info + + var opts []logger.Option + opts = append(opts, logger.WithLevel(info.Storage["LogLevel"])) + opts = append(opts, logger.WithWriter(os.Stderr, logger.ConsoleMode)) + l := logger.New(opts...) + + sub := l.With().Int("pid", os.Getpid()).Logger() + + ctx = appctx.WithLogger(ctx, &sub) + + return &fileUpload{ + info: info, + binPath: info.Storage["BinPath"], + infoPath: infoPath, + fs: fs, + ctx: ctx, + }, nil +} + +type fileUpload struct { + // info stores the current information about the upload + info tusd.FileInfo + // infoPath is the path to the .info file + infoPath string + // binPath is the path to the binary file (which has no extension) + binPath string + // only fs knows how to handle metadata and versions + fs *cephfs + // a context with a user + // TODO add logger as well? + ctx context.Context +} + +// GetInfo returns the FileInfo +func (upload *fileUpload) GetInfo(ctx context.Context) (tusd.FileInfo, error) { + return upload.info, nil +} + +// WriteChunk writes the stream from the reader to the given offset of the upload +func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { + file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) + if err != nil { + return 0, err + } + defer file.Close() + + // calculate cheksum here? needed for the TUS checksum extension. https://tus.io/protocols/resumable-upload.html#checksum + // TODO but how do we get the `Upload-Checksum`? WriteChunk() only has a context, offset and the reader ... + // It is sent with the PATCH request, well or in the POST when the creation-with-upload extension is used + // but the tus handler uses a context.Background() so we cannot really check the header and put it in the context ... + n, err := io.Copy(file, src) + + // If the HTTP PATCH request gets interrupted in the middle (e.g. because + // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. + // However, for the ocis driver it's not important whether the stream has ended + // on purpose or accidentally. + if err != nil { + if err != io.ErrUnexpectedEOF { + return n, err + } + } + + upload.info.Offset += n + err = upload.writeInfo() // TODO info is written here ... we need to truncate in DiscardChunk + + return n, err +} + +// GetReader returns an io.Reader for the upload +func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) { + return os.Open(upload.binPath) +} + +// writeInfo updates the entire information. Everything will be overwritten. +func (upload *fileUpload) writeInfo() error { + data, err := json.Marshal(upload.info) + if err != nil { + return err + } + return ioutil.WriteFile(upload.infoPath, data, defaultFilePerm) +} + +// FinishUpload finishes an upload and moves the file to the internal destination +func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { + + n := &Node{ + lu: upload.fs.lu, + ID: upload.info.Storage["NodeId"], + ParentID: upload.info.Storage["NodeParentId"], + Name: upload.info.Storage["NodeName"], + } + + if n.ID == "" { + n.ID = uuid.New().String() + } + targetPath := upload.fs.lu.toInternalPath(n.ID) + + sublog := appctx.GetLogger(upload.ctx).With().Interface("info", upload.info).Str("binPath", upload.binPath).Str("targetPath", targetPath).Logger() + + // calculate the checksum of the written bytes + // they will all be written to the metadata later, so we cannot omit any of them + // TODO only calculate the checksum in sync that was requested to match, the rest could be async ... but the tests currently expect all to be present + // TODO the hashes all implement BinaryMarshaler so we could try to persist the state for resumable upload. we would neet do keep track of the copied bytes ... + sha1h := sha1.New() + md5h := md5.New() + adler32h := adler32.New() + { + f, err := os.Open(upload.binPath) + if err != nil { + sublog.Err(err).Msg("cephfs: could not open file for checksumming") + // we can continue if no oc checksum header is set + } + defer f.Close() + + r1 := io.TeeReader(f, sha1h) + r2 := io.TeeReader(r1, md5h) + + if _, err := io.Copy(adler32h, r2); err != nil { + sublog.Err(err).Msg("cephfs: could not copy bytes for checksumming") + } + } + // compare if they match the sent checksum + // TODO the tus checksum extension would do this on every chunk, but I currently don't see an easy way to pass in the requested checksum. for now we do it in FinishUpload which is also called for chunked uploads + if upload.info.MetaData["checksum"] != "" { + parts := strings.SplitN(upload.info.MetaData["checksum"], " ", 2) + if len(parts) != 2 { + return errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'") + } + switch parts[0] { + case "sha1": + err = upload.checkHash(parts[1], sha1h) + case "md5": + err = upload.checkHash(parts[1], md5h) + case "adler32": + err = upload.checkHash(parts[1], adler32h) + default: + err = errtypes.BadRequest("unsupported checksum algorithm: " + parts[0]) + } + if err != nil { + return err + } + } + + // defer writing the checksums until the node is in place + + // if target exists create new version + var fi os.FileInfo + if fi, err = os.Stat(targetPath); err == nil { + // versions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries + versionsPath := upload.fs.lu.toInternalPath(n.ID + ".REV." + fi.ModTime().UTC().Format(time.RFC3339Nano)) + + if err = os.Rename(targetPath, versionsPath); err != nil { + sublog.Err(err). + Str("versionsPath", versionsPath). + Msg("cephfs: could not create version") + return + } + } + + // now rename the upload to the target path + // TODO put uploads on the same underlying storage as the destination dir? + // TODO trigger a workflow as the final rename might eg involve antivirus scanning + if err = os.Rename(upload.binPath, targetPath); err != nil { + sublog.Err(err). + Msg("cephfs: could not rename") + return + } + + // now try write all checksums + tryWritingChecksum(&sublog, n, "sha1", sha1h) + tryWritingChecksum(&sublog, n, "md5", md5h) + tryWritingChecksum(&sublog, n, "adler32", adler32h) + + // who will become the owner? the owner of the parent actually ... not the currently logged in user + err = n.writeMetadata(&userpb.UserId{ + Idp: upload.info.Storage["OwnerIdp"], + OpaqueId: upload.info.Storage["OwnerId"], + }) + if err != nil { + return errors.Wrap(err, "cephfs: could not write metadata") + } + + // link child name to parent if it is new + childNameLink := filepath.Join(upload.fs.lu.toInternalPath(n.ParentID), n.Name) + var link string + link, err = os.Readlink(childNameLink) + if err == nil && link != "../"+n.ID { + sublog.Err(err). + Interface("node", n). + Str("childNameLink", childNameLink). + Str("link", link). + Msg("cephfs: child name link has wrong target id, repairing") + + if err = os.Remove(childNameLink); err != nil { + return errors.Wrap(err, "cephfs: could not remove symlink child entry") + } + } + if os.IsNotExist(err) || link != "../"+n.ID { + if err = os.Symlink("../"+n.ID, childNameLink); err != nil { + return errors.Wrap(err, "cephfs: could not symlink child entry") + } + } + + // only delete the upload if it was successfully written to the storage + if err = os.Remove(upload.infoPath); err != nil { + if !os.IsNotExist(err) { + sublog.Err(err).Msg("cephfs: could not delete upload info") + return + } + } + // use set arbitrary metadata? + /*if upload.info.MetaData["mtime"] != "" { + err := upload.fs.SetMtime(ctx, np, upload.info.MetaData["mtime"]) + if err != nil { + log.Err(err).Interface("info", upload.info).Msg("cephfs: could not set mtime metadata") + return err + } + }*/ + + n.Exists = true + + return upload.fs.tp.Propagate(upload.ctx, n) +} + +func (upload *fileUpload) checkHash(expected string, h hash.Hash) error { + if expected != hex.EncodeToString(h.Sum(nil)) { + upload.discardChunk() + return errtypes.ChecksumMismatch(fmt.Sprintf("invalid checksum: expected %s got %x", upload.info.MetaData["checksum"], h.Sum(nil))) + } + return nil +} +func tryWritingChecksum(log *zerolog.Logger, n *Node, algo string, h hash.Hash) { + if err := n.SetChecksum(algo, h); err != nil { + log.Err(err). + Str("csType", algo). + Bytes("hash", h.Sum(nil)). + Msg("cephfs: could not write checksum") + // this is not critical, the bytes are there so we will continue + } +} + +func (upload *fileUpload) discardChunk() { + if err := os.Remove(upload.binPath); err != nil { + if !os.IsNotExist(err) { + appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("binPath", upload.binPath).Interface("info", upload.info).Msg("cephfs: could not discard chunk") + return + } + } +} + +// To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination +// - the storage needs to implement AsTerminatableUpload +// - the upload needs to implement Terminate + +// AsTerminatableUpload returns a TerminatableUpload +func (fs *cephfs) AsTerminatableUpload(upload tusd.Upload) tusd.TerminatableUpload { + return upload.(*fileUpload) +} + +// Terminate terminates the upload +func (upload *fileUpload) Terminate(ctx context.Context) error { + if err := os.Remove(upload.infoPath); err != nil { + if !os.IsNotExist(err) { + return err + } + } + if err := os.Remove(upload.binPath); err != nil { + if !os.IsNotExist(err) { + return err + } + } + return nil +} + +// To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation +// - the storage needs to implement AsLengthDeclarableUpload +// - the upload needs to implement DeclareLength + +// AsLengthDeclarableUpload returns a LengthDeclarableUpload +func (fs *cephfs) AsLengthDeclarableUpload(upload tusd.Upload) tusd.LengthDeclarableUpload { + return upload.(*fileUpload) +} + +// DeclareLength updates the upload length information +func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error { + upload.info.Size = length + upload.info.SizeIsDeferred = false + return upload.writeInfo() +} + +// To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation +// - the storage needs to implement AsConcatableUpload +// - the upload needs to implement ConcatUploads + +// AsConcatableUpload returns a ConcatableUpload +func (fs *cephfs) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload { + return upload.(*fileUpload) +} + +// ConcatUploads concatenates multiple uploads +func (upload *fileUpload) ConcatUploads(ctx context.Context, uploads []tusd.Upload) (err error) { + file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) + if err != nil { + return err + } + defer file.Close() + + for _, partialUpload := range uploads { + fileUpload := partialUpload.(*fileUpload) + + src, err := os.Open(fileUpload.binPath) + if err != nil { + return err + } + defer src.Close() + + if _, err := io.Copy(file, src); err != nil { + return err + } + } + + return +}