diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 3c92c8e2e1..7a0fcfa365 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -15,17 +15,12 @@ jobs: - uses: "actions/setup-go@v2" with: go-version: "^1.17" - - name: "Install linting tools" - run: | - # This is done before checking out, as to not modify go.mod - go install mvdan.cc/gofumpt/gofumports@v0.1.1 - go install golang.org/x/tools/cmd/stringer@latest - uses: "actions/checkout@v2" - name: "Go Mod Tidy" run: "go mod tidy && bash -c '[ $(git status --porcelain | tee /dev/fd/2 | wc -c) -eq 0 ]'" - name: "Formatting (gofumpt)" run: | - GOFUMPT_OUTPUT="$(find . -iname '*.go' -type f | grep -v pb.validate.go | grep -v pb.go | xargs gofumports -d)" + GOFUMPT_OUTPUT="$(find . -iname '*.go' -type f | grep -v pb.validate.go | grep -v pb.go | xargs go run mvdan.cc/gofumpt -d)" if [ -n "$GOFUMPT_OUTPUT" ]; then echo "All the following files are not correctly formatted" echo "${GOFUMPT_OUTPUT}" diff --git a/.golangci.yaml b/.golangci.yaml index 972d36eb35..edee215a6a 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -10,57 +10,50 @@ linters-settings: packages: - "github.com/jmoiron/sqlx" - "github.com/jackc/pgx" + gosec: + excludes: + - "G404" # Allow the usage of math/rand linters: enable: + - "bidichk" + - "bodyclose" - "deadcode" - "errcheck" + - "errname" + - "errorlint" - "gofumpt" - "goimports" + - "goprintffuncname" + - "gosec" - "gosimple" - "govet" + - "ifshort" + - "importas" - "ineffassign" + - "makezero" + - "prealloc" + - "predeclared" + - "promlinter" - "revive" - "rowserrcheck" - "staticcheck" - "structcheck" + - "stylecheck" + - "tenv" - "typecheck" + - "unconvert" - "unused" - "varcheck" + - "wastedassign" + - "whitespace" issues: exclude-rules: - - path: "internal/dispatch" - linters: - - "revive" - - path: "internal/graph" - linters: - - "revive" - path: "internal/namespace" linters: - "revive" - - path: "internal/testfixtures" - linters: - - "revive" - - path: "pkg/consistent" - linters: - - "revive" - - path: "pkg/graph" - linters: - - "revive" - - path: "pkg/membership" - linters: - - "revive" - - path: "pkg/migrate" - linters: - - "revive" - path: "pkg/namespace" linters: - "revive" - - path: "pkg/schemadsl" - linters: - - "revive" - - path: "pkg/validation" - linters: - - "revive" - text: "tx.Rollback()" linters: - "errcheck" diff --git a/go.mod b/go.mod index 7e7d4cb0a1..8546a617b4 100644 --- a/go.mod +++ b/go.mod @@ -66,12 +66,13 @@ require ( golang.org/x/net v0.0.0-20211104170005-ce137452f963 // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect - golang.org/x/tools v0.1.8 // indirect + golang.org/x/tools v0.1.8 google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 google.golang.org/grpc v1.42.0 google.golang.org/protobuf v1.27.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b + mvdan.cc/gofumpt v0.2.1 ) // TODO(jschorr): Remove once https://github.com/dgraph-io/ristretto/pull/286 is merged diff --git a/go.sum b/go.sum index c004abbec0..d68d3182cb 100644 --- a/go.sum +++ b/go.sum @@ -133,6 +133,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -184,6 +185,8 @@ github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= @@ -440,12 +443,14 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= @@ -529,6 +534,8 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -567,6 +574,9 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= @@ -942,6 +952,7 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1183,6 +1194,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -1214,6 +1226,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +mvdan.cc/gofumpt v0.2.1 h1:7jakRGkQcLAJdT+C8Bwc9d0BANkVPSkHZkzNv07pJAs= +mvdan.cc/gofumpt v0.2.1/go.mod h1:a/rvZPhsNaedOJBzqRD9omnwVwHZsBdJirXHa9Gh9Ig= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/internal/auth/presharedkey.go b/internal/auth/presharedkey.go index be4bd20cd8..6d5d4e4f09 100644 --- a/internal/auth/presharedkey.go +++ b/internal/auth/presharedkey.go @@ -14,7 +14,7 @@ const errInvalidPresharedKey = "invalid preshared key: %w" var errInvalidToken = errors.New("invalid token") // RequirePresharedKey requires that gRPC requests have a Bearer Token value -// equivalant to the provided preshared key. +// equivalent to the provided preshared key. func RequirePresharedKey(presharedKey string) grpcauth.AuthFunc { return func(ctx context.Context) (context.Context, error) { token, err := grpcauth.AuthFromMD(ctx, "bearer") diff --git a/internal/dashboard/dashboard.go b/internal/dashboard/dashboard.go index b7dea2c08a..3076818e94 100644 --- a/internal/dashboard/dashboard.go +++ b/internal/dashboard/dashboard.go @@ -41,7 +41,7 @@ const rootTemplate = `
{{if .IsReady }} {{if .IsEmpty}} -
To begin making API requests to SpiceDB, you'll first need to load in a Schema
that defines the permissions system.
@@ -111,14 +111,14 @@ func NewHandler(grpcAddr string, grpcTLSEnabled bool, datastoreEngine string, ds
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
tmpl, err := template.New("root").Parse(rootTemplate)
if err != nil {
- log.Ctx(r.Context()).Error().AnErr("template-error", err).Msg("Got error when parsing template")
+ log.Ctx(r.Context()).Error().AnErr("templateError", err).Msg("Got error when parsing template")
fmt.Fprintf(w, "Internal Error")
return
}
isReady, err := ds.IsReady(r.Context())
if err != nil {
- log.Ctx(r.Context()).Error().AnErr("template-error", err).Msg("Got error when checking database")
+ log.Ctx(r.Context()).Error().AnErr("templateError", err).Msg("Got error when checking database")
fmt.Fprintf(w, "Internal Error")
return
}
@@ -140,7 +140,7 @@ func NewHandler(grpcAddr string, grpcTLSEnabled bool, datastoreEngine string, ds
nsDefs, err := ds.ListNamespaces(r.Context(), headRevision)
if err != nil {
- log.Ctx(r.Context()).Error().AnErr("datastore-error", err).Msg("Got error when trying to load namespaces")
+ log.Ctx(r.Context()).Error().AnErr("datastoreError", err).Msg("Got error when trying to load namespaces")
fmt.Fprintf(w, "Internal Error")
return
}
@@ -179,7 +179,7 @@ func NewHandler(grpcAddr string, grpcTLSEnabled bool, datastoreEngine string, ds
HasSampleSchema: hasSampleSchema,
})
if err != nil {
- log.Ctx(r.Context()).Error().AnErr("template-error", err).Msg("Got error when executing template")
+ log.Ctx(r.Context()).Error().AnErr("templateError", err).Msg("Got error when executing template")
fmt.Fprintf(w, "Internal Error")
return
}
diff --git a/internal/datastore/common/validation.go b/internal/datastore/common/validation.go
index 7e18cbdc56..a085a8f44d 100644
--- a/internal/datastore/common/validation.go
+++ b/internal/datastore/common/validation.go
@@ -18,7 +18,7 @@ func ValidateUpdatesToWrite(updates []*v1.RelationshipUpdate) error {
if update.Relationship.Subject.Object.ObjectId == tuple.PublicWildcard && update.Relationship.Subject.OptionalRelation != "" {
return fmt.Errorf(
- "Attempt to write a wildcard relationship (`%s`) with a non-empty relation. Please report this bug",
+ "attempt to write a wildcard relationship (`%s`) with a non-empty relation. Please report this bug",
tuple.RelString(update.Relationship),
)
}
diff --git a/internal/datastore/crdb/migrations/driver.go b/internal/datastore/crdb/migrations/driver.go
index e00e5a2dd3..fda21032b5 100644
--- a/internal/datastore/crdb/migrations/driver.go
+++ b/internal/datastore/crdb/migrations/driver.go
@@ -2,6 +2,7 @@ package migrations
import (
"context"
+ "errors"
"fmt"
"github.com/jackc/pgconn"
@@ -49,7 +50,8 @@ func (apd *CRDBDriver) Version() (string, error) {
var loaded string
if err := apd.db.QueryRow(context.Background(), queryLoadVersion).Scan(&loaded); err != nil {
- if pgErr, ok := err.(*pgconn.PgError); ok && pgErr.Code == postgresMissingTableErrorCode {
+ var pgErr *pgconn.PgError
+ if errors.As(err, &pgErr) && pgErr.Code == postgresMissingTableErrorCode {
return "", nil
}
return "", fmt.Errorf("unable to load alembic revision: %w", err)
diff --git a/internal/datastore/crdb/namespace.go b/internal/datastore/crdb/namespace.go
index ca4643704b..07a543295c 100644
--- a/internal/datastore/crdb/namespace.go
+++ b/internal/datastore/crdb/namespace.go
@@ -148,7 +148,7 @@ func loadNamespace(ctx context.Context, tx pgx.Tx, nsName string) (*v0.Namespace
var config []byte
var timestamp time.Time
if err := tx.QueryRow(ctx, sql, args...).Scan(&config, ×tamp); err != nil {
- if err == pgx.ErrNoRows {
+ if errors.Is(err, pgx.ErrNoRows) {
err = datastore.NewNamespaceNotFoundErr(nsName)
}
return nil, time.Time{}, err
diff --git a/internal/datastore/crdb/tuple.go b/internal/datastore/crdb/tuple.go
index 3a5e253f19..c241aed364 100644
--- a/internal/datastore/crdb/tuple.go
+++ b/internal/datastore/crdb/tuple.go
@@ -279,7 +279,7 @@ func (cds *crdbDatastore) DeleteRelationships(ctx context.Context, preconditions
}
if err := tx.QueryRow(ctx, sql, args...).Scan(&nowRevision); err != nil {
- if err == pgx.ErrNoRows {
+ if errors.Is(err, pgx.ErrNoRows) {
// CRDB doesn't return the cluster_logical_timestamp if no rows were deleted
// so we have to read it manually in the same transaction.
nowRevision, err = readCRDBNow(ctx, tx)
diff --git a/internal/datastore/crdb/watch.go b/internal/datastore/crdb/watch.go
index c7ad79e4fd..2b00d1c88b 100644
--- a/internal/datastore/crdb/watch.go
+++ b/internal/datastore/crdb/watch.go
@@ -3,6 +3,7 @@ package crdb
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"sort"
@@ -16,22 +17,22 @@ const queryChangefeed = "EXPERIMENTAL CHANGEFEED FOR %s WITH updated, cursor = '
func (cds *crdbDatastore) Watch(ctx context.Context, afterRevision datastore.Revision) (<-chan *datastore.RevisionChanges, <-chan error) {
updates := make(chan *datastore.RevisionChanges, cds.watchBufferLength)
- errors := make(chan error, 1)
+ errs := make(chan error, 1)
interpolated := fmt.Sprintf(queryChangefeed, tableTuple, afterRevision)
go func() {
defer close(updates)
- defer close(errors)
+ defer close(errs)
pendingChanges := make(map[string]*datastore.RevisionChanges)
changes, err := cds.conn.Query(ctx, interpolated)
if err != nil {
- if ctx.Err() == context.Canceled {
- errors <- datastore.NewWatchCanceledErr()
+ if errors.Is(ctx.Err(), context.Canceled) {
+ errs <- datastore.NewWatchCanceledErr()
} else {
- errors <- err
+ errs <- err
}
return
}
@@ -46,10 +47,10 @@ func (cds *crdbDatastore) Watch(ctx context.Context, afterRevision datastore.Rev
var primaryKeyValuesJSON []byte
if err := changes.Scan(&unused, &primaryKeyValuesJSON, &changeJSON); err != nil {
- if ctx.Err() == context.Canceled {
- errors <- datastore.NewWatchCanceledErr()
+ if errors.Is(ctx.Err(), context.Canceled) {
+ errs <- datastore.NewWatchCanceledErr()
} else {
- errors <- err
+ errs <- err
}
return
}
@@ -60,7 +61,7 @@ func (cds *crdbDatastore) Watch(ctx context.Context, afterRevision datastore.Rev
After interface{}
}
if err := json.Unmarshal(changeJSON, &changeDetails); err != nil {
- errors <- err
+ errs <- err
return
}
@@ -68,7 +69,7 @@ func (cds *crdbDatastore) Watch(ctx context.Context, afterRevision datastore.Rev
// This entry indicates that we are ready to potentially emit some changes
resolved, err := decimal.NewFromString(changeDetails.Resolved)
if err != nil {
- errors <- err
+ errs <- err
return
}
@@ -89,7 +90,7 @@ func (cds *crdbDatastore) Watch(ctx context.Context, afterRevision datastore.Rev
select {
case updates <- change:
default:
- errors <- datastore.NewWatchDisconnectedErr()
+ errs <- datastore.NewWatchDisconnectedErr()
return
}
}
@@ -99,13 +100,13 @@ func (cds *crdbDatastore) Watch(ctx context.Context, afterRevision datastore.Rev
var pkValues [6]string
if err := json.Unmarshal(primaryKeyValuesJSON, &pkValues); err != nil {
- errors <- err
+ errs <- err
return
}
revision, err := decimal.NewFromString(changeDetails.Updated)
if err != nil {
- errors <- fmt.Errorf("malformed update timestamp: %w", err)
+ errs <- fmt.Errorf("malformed update timestamp: %w", err)
return
}
@@ -144,13 +145,13 @@ func (cds *crdbDatastore) Watch(ctx context.Context, afterRevision datastore.Rev
pending.Changes = append(pending.Changes, oneChange)
}
if changes.Err() != nil {
- if ctx.Err() == context.Canceled {
- errors <- datastore.NewWatchCanceledErr()
+ if errors.Is(ctx.Err(), context.Canceled) {
+ errs <- datastore.NewWatchCanceledErr()
} else {
- errors <- changes.Err()
+ errs <- changes.Err()
}
return
}
}()
- return updates, errors
+ return updates, errs
}
diff --git a/internal/datastore/memdb/namespace.go b/internal/datastore/memdb/namespace.go
index f2d323f769..058b374a4a 100644
--- a/internal/datastore/memdb/namespace.go
+++ b/internal/datastore/memdb/namespace.go
@@ -46,7 +46,7 @@ func (mds *memdbDatastore) WriteNamespace(
if foundRaw != nil {
// Mark the old one as deleted
- var toDelete namespace = *(foundRaw.(*namespace))
+ toDelete := *(foundRaw.(*namespace))
toDelete.deletedTxn = newVersion
if err := txn.Insert(tableNamespace, &toDelete); err != nil {
return datastore.NoRevision, fmt.Errorf(errUnableToWriteConfig, err)
@@ -144,7 +144,7 @@ func (mds *memdbDatastore) DeleteNamespace(ctx context.Context, nsName string) (
// Mark the namespace as deleted
time.Sleep(mds.simulatedLatency)
- var markedDeleted namespace = *found
+ markedDeleted := *found
markedDeleted.deletedTxn = newChangelogID
err = txn.Insert(tableNamespace, &markedDeleted)
if err != nil {
diff --git a/internal/datastore/memdb/query.go b/internal/datastore/memdb/query.go
index bff01e4b4d..826d3d633c 100644
--- a/internal/datastore/memdb/query.go
+++ b/internal/datastore/memdb/query.go
@@ -102,8 +102,8 @@ type memdbTupleIterator struct {
}
func filterFuncForFilters(optionalObjectType, optionalObjectID, optionalRelation string,
- optionalSubjectFilter *v1.SubjectFilter, usersets []*v0.ObjectAndRelation) memdb.FilterFunc {
-
+ optionalSubjectFilter *v1.SubjectFilter, usersets []*v0.ObjectAndRelation,
+) memdb.FilterFunc {
return func(tupleRaw interface{}) bool {
tuple := tupleRaw.(*relationship)
diff --git a/internal/datastore/memdb/watch.go b/internal/datastore/memdb/watch.go
index 18ef192784..17ca164c3a 100644
--- a/internal/datastore/memdb/watch.go
+++ b/internal/datastore/memdb/watch.go
@@ -2,6 +2,7 @@ package memdb
import (
"context"
+ "errors"
"fmt"
v0 "github.com/authzed/authzed-go/proto/authzed/api/v0"
@@ -15,11 +16,11 @@ const errWatchError = "watch error: %w"
func (mds *memdbDatastore) Watch(ctx context.Context, afterRevision datastore.Revision) (<-chan *datastore.RevisionChanges, <-chan error) {
updates := make(chan *datastore.RevisionChanges, mds.watchBufferLength)
- errors := make(chan error, 1)
+ errs := make(chan error, 1)
go func() {
defer close(updates)
- defer close(errors)
+ defer close(errs)
currentTxn := uint64(afterRevision.IntPart())
@@ -29,7 +30,7 @@ func (mds *memdbDatastore) Watch(ctx context.Context, afterRevision datastore.Re
var err error
stagedUpdates, currentTxn, watchChan, err = mds.loadChanges(ctx, currentTxn)
if err != nil {
- errors <- err
+ errs <- err
return
}
@@ -38,7 +39,7 @@ func (mds *memdbDatastore) Watch(ctx context.Context, afterRevision datastore.Re
select {
case updates <- changeToWrite:
default:
- errors <- datastore.NewWatchDisconnectedErr()
+ errs <- datastore.NewWatchDisconnectedErr()
return
}
}
@@ -49,18 +50,18 @@ func (mds *memdbDatastore) Watch(ctx context.Context, afterRevision datastore.Re
err = ws.WatchCtx(ctx)
if err != nil {
- switch err {
- case context.Canceled:
- errors <- datastore.NewWatchCanceledErr()
+ switch {
+ case errors.Is(err, context.Canceled):
+ errs <- datastore.NewWatchCanceledErr()
default:
- errors <- fmt.Errorf(errWatchError, err)
+ errs <- fmt.Errorf(errWatchError, err)
}
return
}
}
}()
- return updates, errors
+ return updates, errs
}
func (mds *memdbDatastore) loadChanges(ctx context.Context, currentTxn uint64) ([]*datastore.RevisionChanges, uint64, <-chan struct{}, error) {
diff --git a/internal/datastore/postgres/migrations/driver.go b/internal/datastore/postgres/migrations/driver.go
index 81406515b9..3f6f1cb7fa 100644
--- a/internal/datastore/postgres/migrations/driver.go
+++ b/internal/datastore/postgres/migrations/driver.go
@@ -1,6 +1,7 @@
package migrations
import (
+ "errors"
"fmt"
"github.com/jmoiron/sqlx"
@@ -40,7 +41,8 @@ func (apd *AlembicPostgresDriver) Version() (string, error) {
var loaded string
if err := apd.db.QueryRowx("SELECT version_num from alembic_version").Scan(&loaded); err != nil {
- if pqErr, ok := err.(*pq.Error); ok && pqErr.Code == postgresMissingTableErrorCode {
+ var pqErr *pq.Error
+ if errors.As(err, &pqErr) && pqErr.Code == postgresMissingTableErrorCode {
return "", nil
}
return "", fmt.Errorf("unable to load alembic revision: %w", err)
diff --git a/internal/datastore/postgres/namespace.go b/internal/datastore/postgres/namespace.go
index 7b4d52263a..258b4449bf 100644
--- a/internal/datastore/postgres/namespace.go
+++ b/internal/datastore/postgres/namespace.go
@@ -193,7 +193,7 @@ func loadNamespace(ctx context.Context, namespace string, tx pgx.Tx, baseQuery s
var version datastore.Revision
err = tx.QueryRow(ctx, sql, args...).Scan(&config, &version)
if err != nil {
- if err == pgx.ErrNoRows {
+ if errors.Is(err, pgx.ErrNoRows) {
err = datastore.NewNamespaceNotFoundErr(namespace)
}
return nil, datastore.NoRevision, err
diff --git a/internal/datastore/postgres/postgres.go b/internal/datastore/postgres/postgres.go
index 2740f63f3d..51651c691e 100644
--- a/internal/datastore/postgres/postgres.go
+++ b/internal/datastore/postgres/postgres.go
@@ -3,6 +3,7 @@ package postgres
import (
"context"
dbsql "database/sql"
+ "errors"
"fmt"
"math/rand"
"time"
@@ -259,7 +260,7 @@ func (pgd *pgDatastore) getNow(ctx context.Context) (time.Time, error) {
func (pgd *pgDatastore) collectGarbage() error {
startTime := time.Now()
defer func() {
- gcDurationHistogram.Observe(float64(time.Since(startTime).Seconds()))
+ gcDurationHistogram.Observe(time.Since(startTime).Seconds())
}()
ctx, cancel := context.WithTimeout(context.Background(), pgd.gcMaxOperationTime)
@@ -313,7 +314,7 @@ func (pgd *pgDatastore) collectGarbageBefore(ctx context.Context, before time.Ti
return 0, 0, err
}
- log.Ctx(ctx).Trace().Uint64("highest_transaction_id", highest).Msg("retrieved transaction ID for GC")
+ log.Ctx(ctx).Trace().Uint64("highestTransactionId", highest).Msg("retrieved transaction ID for GC")
return pgd.collectGarbageForTransaction(ctx, highest)
}
@@ -325,7 +326,7 @@ func (pgd *pgDatastore) collectGarbageForTransaction(ctx context.Context, highes
return 0, 0, err
}
- log.Ctx(ctx).Trace().Uint64("highest_transaction_id", highest).Int64("relationships_deleted", relCount).Msg("deleted stale relationships")
+ log.Ctx(ctx).Trace().Uint64("highestTransactionId", highest).Int64("relationshipsDeleted", relCount).Msg("deleted stale relationships")
gcRelationshipsClearedGauge.Set(float64(relCount))
// Delete all transaction rows with ID < the transaction ID. We don't delete the transaction
@@ -335,7 +336,7 @@ func (pgd *pgDatastore) collectGarbageForTransaction(ctx context.Context, highes
return relCount, 0, err
}
- log.Ctx(ctx).Trace().Uint64("highest_transaction_id", highest).Int64("transactions_deleted", transactionCount).Msg("deleted stale transactions")
+ log.Ctx(ctx).Trace().Uint64("highestTransactionId", highest).Int64("transactionsDeleted", transactionCount).Msg("deleted stale transactions")
gcTransactionsClearedGauge.Set(float64(transactionCount))
return relCount, transactionCount, nil
}
@@ -405,11 +406,11 @@ func (pgd *pgDatastore) OptimizedRevision(ctx context.Context) (datastore.Revisi
defer span.End()
lower, upper, err := pgd.computeRevisionRange(ctx, -1*pgd.revisionFuzzingTimedelta)
- if err != nil && err != pgx.ErrNoRows {
+ if err != nil && !errors.Is(err, pgx.ErrNoRows) {
return datastore.NoRevision, fmt.Errorf(errRevision, err)
}
- if err == pgx.ErrNoRows {
+ if errors.Is(err, pgx.ErrNoRows) {
revision, err := pgd.loadRevision(ctx)
if err != nil {
return datastore.NoRevision, err
@@ -442,7 +443,7 @@ func (pgd *pgDatastore) CheckRevision(ctx context.Context, revision datastore.Re
return nil
}
- if err != pgx.ErrNoRows {
+ if !errors.Is(err, pgx.ErrNoRows) {
return fmt.Errorf(errCheckRevision, err)
}
@@ -456,7 +457,7 @@ func (pgd *pgDatastore) CheckRevision(ctx context.Context, revision datastore.Re
err = pgd.dbpool.QueryRow(
datastore.SeparateContextWithTracing(ctx), sql, args...,
).Scan(&highest)
- if err == pgx.ErrNoRows {
+ if errors.Is(err, pgx.ErrNoRows) {
return datastore.NewInvalidRevisionErr(revision, datastore.CouldNotDetermineRevision)
}
if err != nil {
@@ -484,7 +485,7 @@ func (pgd *pgDatastore) loadRevision(ctx context.Context) (uint64, error) {
var revision uint64
err = pgd.dbpool.QueryRow(datastore.SeparateContextWithTracing(ctx), sql, args...).Scan(&revision)
if err != nil {
- if err == pgx.ErrNoRows {
+ if errors.Is(err, pgx.ErrNoRows) {
return 0, nil
}
return 0, fmt.Errorf(errRevision, err)
diff --git a/internal/datastore/postgres/watch.go b/internal/datastore/postgres/watch.go
index fe6073f18f..bbd7844e47 100644
--- a/internal/datastore/postgres/watch.go
+++ b/internal/datastore/postgres/watch.go
@@ -29,11 +29,11 @@ var queryChanged = psql.Select(
func (pgd *pgDatastore) Watch(ctx context.Context, afterRevision datastore.Revision) (<-chan *datastore.RevisionChanges, <-chan error) {
updates := make(chan *datastore.RevisionChanges, pgd.watchBufferLength)
- errors := make(chan error, 1)
+ errs := make(chan error, 1)
go func() {
defer close(updates)
- defer close(errors)
+ defer close(errs)
currentTxn := transactionFromRevision(afterRevision)
@@ -42,10 +42,10 @@ func (pgd *pgDatastore) Watch(ctx context.Context, afterRevision datastore.Revis
var err error
stagedUpdates, currentTxn, err = pgd.loadChanges(ctx, currentTxn)
if err != nil {
- if ctx.Err() == context.Canceled {
- errors <- datastore.NewWatchCanceledErr()
+ if errors.Is(ctx.Err(), context.Canceled) {
+ errs <- datastore.NewWatchCanceledErr()
} else {
- errors <- err
+ errs <- err
}
return
}
@@ -55,7 +55,7 @@ func (pgd *pgDatastore) Watch(ctx context.Context, afterRevision datastore.Revis
select {
case updates <- changeToWrite:
default:
- errors <- datastore.NewWatchDisconnectedErr()
+ errs <- datastore.NewWatchDisconnectedErr()
return
}
}
@@ -68,21 +68,20 @@ func (pgd *pgDatastore) Watch(ctx context.Context, afterRevision datastore.Revis
case <-sleep.C:
break
case <-ctx.Done():
- errors <- datastore.NewWatchCanceledErr()
+ errs <- datastore.NewWatchCanceledErr()
return
}
}
}
}()
- return updates, errors
+ return updates, errs
}
func (pgd *pgDatastore) loadChanges(
ctx context.Context,
afterRevision uint64,
) (changes []*datastore.RevisionChanges, newRevision uint64, err error) {
-
newRevision, err = pgd.loadRevision(ctx)
if err != nil {
return
diff --git a/internal/dispatch/caching/caching.go b/internal/dispatch/caching/caching.go
index e909980529..9fa4e7d1f6 100644
--- a/internal/dispatch/caching/caching.go
+++ b/internal/dispatch/caching/caching.go
@@ -20,7 +20,7 @@ const (
prometheusNamespace = "spicedb"
)
-type CachingDispatcher struct {
+type Dispatcher struct {
d dispatch.Dispatcher
c *ristretto.Cache
@@ -48,7 +48,7 @@ var (
func NewCachingDispatcher(
cacheConfig *ristretto.Config,
prometheusSubsystem string,
-) (*CachingDispatcher, error) {
+) (*Dispatcher, error) {
if cacheConfig == nil {
cacheConfig = &ristretto.Config{
NumCounters: 1e4, // number of keys to track frequency of (10k).
@@ -128,7 +128,7 @@ func NewCachingDispatcher(
}
}
- return &CachingDispatcher{fakeDelegate{}, cache, checkTotalCounter, checkFromCacheCounter, lookupTotalCounter, lookupFromCacheCounter}, nil
+ return &Dispatcher{fakeDelegate{}, cache, checkTotalCounter, checkFromCacheCounter, lookupTotalCounter, lookupFromCacheCounter}, nil
}
func registerMetricsFunc(name string, subsystem string, metricsFunc func() uint64) error {
@@ -142,12 +142,12 @@ func registerMetricsFunc(name string, subsystem string, metricsFunc func() uint6
}
// SetDelegate sets the internal delegate to the specific dispatcher instance.
-func (cd *CachingDispatcher) SetDelegate(delegate dispatch.Dispatcher) {
+func (cd *Dispatcher) SetDelegate(delegate dispatch.Dispatcher) {
cd.d = delegate
}
// DispatchCheck implements dispatch.Check interface
-func (cd *CachingDispatcher) DispatchCheck(ctx context.Context, req *v1.DispatchCheckRequest) (*v1.DispatchCheckResponse, error) {
+func (cd *Dispatcher) DispatchCheck(ctx context.Context, req *v1.DispatchCheckRequest) (*v1.DispatchCheckResponse, error) {
cd.checkTotalCounter.Inc()
requestKey := dispatch.CheckRequestToKey(req)
@@ -177,20 +177,20 @@ func (cd *CachingDispatcher) DispatchCheck(ctx context.Context, req *v1.Dispatch
}
// DispatchExpand implements dispatch.Expand interface and does not do any caching yet.
-func (cd *CachingDispatcher) DispatchExpand(ctx context.Context, req *v1.DispatchExpandRequest) (*v1.DispatchExpandResponse, error) {
+func (cd *Dispatcher) DispatchExpand(ctx context.Context, req *v1.DispatchExpandRequest) (*v1.DispatchExpandResponse, error) {
resp, err := cd.d.DispatchExpand(ctx, req)
return resp, err
}
// DispatchLookup implements dispatch.Lookup interface and does not do any caching yet.
-func (cd *CachingDispatcher) DispatchLookup(ctx context.Context, req *v1.DispatchLookupRequest) (*v1.DispatchLookupResponse, error) {
+func (cd *Dispatcher) DispatchLookup(ctx context.Context, req *v1.DispatchLookupRequest) (*v1.DispatchLookupResponse, error) {
cd.lookupTotalCounter.Inc()
requestKey := dispatch.LookupRequestToKey(req)
if cachedResultRaw, found := cd.c.Get(requestKey); found {
cachedResult := cachedResultRaw.(lookupResultEntry)
if req.Metadata.DepthRemaining >= cachedResult.response.Metadata.DepthRequired {
- log.Trace().Object("using cached lookup", req).Int("result count", len(cachedResult.response.ResolvedOnrs)).Send()
+ log.Trace().Object("cachedLookup", req).Int("resultCount", len(cachedResult.response.ResolvedOnrs)).Send()
cd.lookupFromCacheCounter.Inc()
return cachedResult.response, nil
}
@@ -200,7 +200,7 @@ func (cd *CachingDispatcher) DispatchLookup(ctx context.Context, req *v1.Dispatc
// We only want to cache the result if there was no error and nothing was excluded.
if err == nil && len(computed.Metadata.LookupExcludedDirect) == 0 && len(computed.Metadata.LookupExcludedTtu) == 0 {
- log.Trace().Object("caching lookup", req).Int("result count", len(computed.ResolvedOnrs)).Send()
+ log.Trace().Object("cachingLookup", req).Int("resultCount", len(computed.ResolvedOnrs)).Send()
adjustedComputed := proto.Clone(computed).(*v1.DispatchLookupResponse)
adjustedComputed.Metadata.CachedDispatchCount = adjustedComputed.Metadata.DispatchCount
@@ -224,9 +224,8 @@ func (cd *CachingDispatcher) DispatchLookup(ctx context.Context, req *v1.Dispatc
return computed, err
}
-func (cd *CachingDispatcher) Close() error {
- cache := cd.c
- if cache != nil {
+func (cd *Dispatcher) Close() error {
+ if cache := cd.c; cache != nil {
cache.Close()
}
diff --git a/internal/dispatch/dispatch.go b/internal/dispatch/dispatch.go
index b15bdeed88..f3dee739f3 100644
--- a/internal/dispatch/dispatch.go
+++ b/internal/dispatch/dispatch.go
@@ -55,7 +55,7 @@ type HasMetadata interface {
func CheckDepth(ctx context.Context, req HasMetadata) error {
metadata := req.GetMetadata()
if metadata == nil {
- log.Ctx(ctx).Warn().Object("req", req).Msg("request missing metadata")
+ log.Ctx(ctx).Warn().Object("request", req).Msg("request missing metadata")
return fmt.Errorf("request missing metadata")
}
diff --git a/internal/dispatch/graph/graph.go b/internal/dispatch/graph/graph.go
index e995bc9d80..9124cc67db 100644
--- a/internal/dispatch/graph/graph.go
+++ b/internal/dispatch/graph/graph.go
@@ -44,7 +44,6 @@ func NewDispatcher(
nsm namespace.Manager,
ds datastore.Datastore,
) dispatch.Dispatcher {
-
checker := graph.NewConcurrentChecker(redispatcher, ds, nsm)
expander := graph.NewConcurrentExpander(redispatcher, ds, nsm)
lookupHandler := graph.NewConcurrentLookup(redispatcher, ds, nsm)
diff --git a/internal/graph/check.go b/internal/graph/check.go
index a3d3904a28..560dc97e1e 100644
--- a/internal/graph/check.go
+++ b/internal/graph/check.go
@@ -143,7 +143,7 @@ func (cc *ConcurrentChecker) checkSetOperation(ctx context.Context, req Validate
}
}
return func(ctx context.Context, resultChan chan<- CheckResult) {
- log.Ctx(ctx).Trace().Object("set operation", req).Stringer("operation", so).Send()
+ log.Ctx(ctx).Trace().Object("setOperation", req).Stringer("operation", so).Send()
resultChan <- reducer(ctx, requests)
}
}
@@ -296,7 +296,7 @@ func any(ctx context.Context, requests []ReduceableCheckFunc) CheckResult {
for i := 0; i < len(requests); i++ {
select {
case result := <-resultChan:
- log.Ctx(ctx).Trace().Object("any result", result.Resp).Send()
+ log.Ctx(ctx).Trace().Object("anyResult", result.Resp).Send()
responseMetadata = combineResponseMetadata(responseMetadata, result.Resp.Metadata)
if result.Err == nil && result.Resp.Membership == v1.DispatchCheckResponse_MEMBER {
@@ -306,7 +306,7 @@ func any(ctx context.Context, requests []ReduceableCheckFunc) CheckResult {
return checkResultError(result.Err, result.Resp.Metadata)
}
case <-ctx.Done():
- log.Ctx(ctx).Trace().Msg("any canceled")
+ log.Ctx(ctx).Trace().Msg("anyCanceled")
return checkResultError(NewRequestCanceledErr(), responseMetadata)
}
}
diff --git a/internal/graph/expand.go b/internal/graph/expand.go
index 0917df980c..4e240ea88c 100644
--- a/internal/graph/expand.go
+++ b/internal/graph/expand.go
@@ -63,7 +63,6 @@ func (ce *ConcurrentExpander) expandDirect(
req ValidatedExpandRequest,
startBehavior startInclusion,
) ReduceableExpandFunc {
-
log.Ctx(ctx).Trace().Object("direct", req).Send()
return func(ctx context.Context, resultChan chan<- ExpandResult) {
it, err := ce.ds.QueryTuples(ctx, &v1_proto.RelationshipFilter{
@@ -184,7 +183,7 @@ func (ce *ConcurrentExpander) expandSetOperation(ctx context.Context, req Valida
func (ce *ConcurrentExpander) dispatch(req ValidatedExpandRequest) ReduceableExpandFunc {
return func(ctx context.Context, resultChan chan<- ExpandResult) {
- log.Ctx(ctx).Trace().Object("dispatch expand", req).Send()
+ log.Ctx(ctx).Trace().Object("dispatchExpand", req).Send()
result, err := ce.d.DispatchExpand(ctx, req.DispatchExpandRequest)
resultChan <- ExpandResult{result, err}
}
@@ -219,7 +218,6 @@ func (ce *ConcurrentExpander) expandComputedUserset(ctx context.Context, req Val
}
return ce.dispatch(ValidatedExpandRequest{
-
&v1.DispatchExpandRequest{
ObjectAndRelation: &v0.ObjectAndRelation{
Namespace: start.Namespace,
diff --git a/internal/graph/lookup.go b/internal/graph/lookup.go
index f9eb817fb9..91d08b602f 100644
--- a/internal/graph/lookup.go
+++ b/internal/graph/lookup.go
@@ -340,7 +340,7 @@ func (cl *ConcurrentLookup) processSetOperation(ctx context.Context, req Validat
}
}
return func(ctx context.Context, resultChan chan<- LookupResult) {
- log.Ctx(ctx).Trace().Object("set operation", req).Stringer("operation", so).Send()
+ log.Ctx(ctx).Trace().Object("setOperation", req).Stringer("operation", so).Send()
resultChan <- reducer(ctx, req, req.Limit, requests)
}
}
@@ -525,7 +525,6 @@ func (cl *ConcurrentLookup) processTupleToUserset(ctx context.Context, req Valid
func (cl *ConcurrentLookup) lookupComputed(ctx context.Context, req ValidatedLookupRequest, cu *v0.ComputedUserset) ReduceableLookupFunc {
result := lookupOne(ctx, req, cl.dispatch(ValidatedLookupRequest{
-
&v1.DispatchLookupRequest{
Subject: req.Subject,
ObjectRelation: &v0.RelationReference{
@@ -574,7 +573,7 @@ func (cl *ConcurrentLookup) lookupComputed(ctx context.Context, req ValidatedLoo
func (cl *ConcurrentLookup) dispatch(req ValidatedLookupRequest) ReduceableLookupFunc {
return func(ctx context.Context, resultChan chan<- LookupResult) {
- log.Ctx(ctx).Trace().Object("dispatch lookup", req).Send()
+ log.Ctx(ctx).Trace().Object("dispatchLookup", req).Send()
result, err := cl.d.DispatchLookup(ctx, req.DispatchLookupRequest)
resultChan <- LookupResult{result, err}
}
diff --git a/internal/middleware/consistency/consistency_test.go b/internal/middleware/consistency/consistency_test.go
index 90ec25277e..47af049ab3 100644
--- a/internal/middleware/consistency/consistency_test.go
+++ b/internal/middleware/consistency/consistency_test.go
@@ -2,6 +2,7 @@ package consistency
import (
"context"
+ "errors"
"io"
"testing"
@@ -163,9 +164,9 @@ func (s *ConsistencyTestSuite) TestValidPasses_ServerStream() {
require.NoError(err)
for {
_, err := stream.Recv()
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
break
}
- assert.NoError(s.T(), err, "no error on messages sent occured")
+ assert.NoError(s.T(), err, "no error on messages sent occurred")
}
}
diff --git a/internal/middleware/usagemetrics/usagemetrics.go b/internal/middleware/usagemetrics/usagemetrics.go
index 59f156a32d..6e5926d32f 100644
--- a/internal/middleware/usagemetrics/usagemetrics.go
+++ b/internal/middleware/usagemetrics/usagemetrics.go
@@ -20,7 +20,7 @@ var dispatchBuckets = []float64{1, 5, 10, 25, 50, 100, 250}
var dispatchedCountHistogram = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "spicedb",
Subsystem: "services",
- Name: "dispatched_count_histogram",
+ Name: "dispatched_count",
Help: "dispatch count per api call distribution in seconds.",
Buckets: dispatchBuckets,
}, []string{"method"})
@@ -28,7 +28,7 @@ var dispatchedCountHistogram = promauto.NewHistogramVec(prometheus.HistogramOpts
var cachedCountHistogram = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "spicedb",
Subsystem: "services",
- Name: "cached_count_histogram",
+ Name: "cached_count",
Help: "dispatches avoid by caching per api call in seconds.",
Buckets: dispatchBuckets,
}, []string{"method"})
@@ -36,14 +36,14 @@ var cachedCountHistogram = promauto.NewHistogramVec(prometheus.HistogramOpts{
var dispatchedCounter = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "spicedb",
Subsystem: "services",
- Name: "dispatched_counter",
+ Name: "dispatched_total",
Help: "dispatch counts.",
}, []string{"method"})
var cachedCounter = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: "spicedb",
Subsystem: "services",
- Name: "cached_counter",
+ Name: "cached_total",
Help: "dispatches avoid by caching.",
}, []string{"method"})
diff --git a/internal/namespace/typesystem.go b/internal/namespace/typesystem.go
index 0b73053add..dc2ca92791 100644
--- a/internal/namespace/typesystem.go
+++ b/internal/namespace/typesystem.go
@@ -264,8 +264,7 @@ func (nts *NamespaceTypeSystem) referencesWildcardType(ctx context.Context, rela
func (nts *NamespaceTypeSystem) computeReferencesWildcardType(ctx context.Context, relationName string, encountered map[string]bool) (*WildcardTypeReference, error) {
relString := fmt.Sprintf("%s#%s", nts.nsDef.Name, relationName)
- _, ok := encountered[relString]
- if ok {
+ if _, ok := encountered[relString]; ok {
return nil, nil
}
encountered[relString] = true
diff --git a/internal/services/consistency_test.go b/internal/services/consistency_test.go
index 18e0b55f89..ee07ff9193 100644
--- a/internal/services/consistency_test.go
+++ b/internal/services/consistency_test.go
@@ -29,6 +29,7 @@ import (
v1svc "github.com/authzed/spicedb/internal/services/v1"
"github.com/authzed/spicedb/internal/testfixtures"
graphpkg "github.com/authzed/spicedb/pkg/graph"
+ "github.com/authzed/spicedb/pkg/testutil"
"github.com/authzed/spicedb/pkg/tuple"
"github.com/authzed/spicedb/pkg/validationfile"
)
@@ -153,8 +154,8 @@ func runCrossVersionTests(t *testing.T,
dispatch dispatch.Dispatcher,
fullyResolved *validationfile.FullyParsedValidationFile,
tuplesPerNamespace *slicemultimap.MultiMap,
- revision decimal.Decimal) {
-
+ revision decimal.Decimal,
+) {
for _, nsDef := range fullyResolved.NamespaceDefinitions {
for _, relation := range nsDef.Relation {
verifyCrossVersion(t, "read", testers, func(tester serviceTester) (interface{}, error) {
@@ -200,7 +201,7 @@ func verifyCrossVersion(t *testing.T, name string, testers []serviceTester, runA
if result == nil {
result = value
} else {
- require.Equal(t, result, value, "Found mismatch between versions")
+ testutil.RequireEqualEmptyNil(t, result, value, "found mismatch between versions")
}
}
})
diff --git a/internal/services/dispatch/server.go b/internal/services/dispatch/server.go
index 16cdb0877f..980960a4f7 100644
--- a/internal/services/dispatch/server.go
+++ b/internal/services/dispatch/server.go
@@ -8,7 +8,6 @@ import (
"github.com/authzed/spicedb/internal/dispatch"
dispatch_v1 "github.com/authzed/spicedb/internal/services/dispatch/v1"
- v1svc "github.com/authzed/spicedb/internal/services/dispatch/v1"
)
// RegisterGrpcServices registers an internal dispatch service with the specified server.
@@ -18,7 +17,7 @@ func RegisterGrpcServices(
) {
healthSrv := grpcutil.NewAuthlessHealthServer()
healthSrv.SetServicesHealthy(
- v1svc.RegisterDispatchServer(srv, dispatch_v1.NewDispatchServer(d)),
+ dispatch_v1.RegisterDispatchServer(srv, dispatch_v1.NewDispatchServer(d)),
)
healthpb.RegisterHealthServer(srv, healthSrv)
reflection.Register(srv)
diff --git a/internal/services/servicetester.go b/internal/services/servicetester.go
index 9c320cdb4a..2ce4dc4961 100644
--- a/internal/services/servicetester.go
+++ b/internal/services/servicetester.go
@@ -2,6 +2,7 @@ package services
import (
"context"
+ "errors"
"io"
"sort"
@@ -203,7 +204,7 @@ func (v1st v1ServiceTester) Read(ctx context.Context, namespaceName string, atRe
var tuples []*v0.RelationTuple
for {
resp, err := readResp.Recv()
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
break
}
@@ -256,7 +257,7 @@ func (v1st v1ServiceTester) Lookup(ctx context.Context, resourceRelation *v0.Rel
var objectIds []string
for {
resp, err := lookupResp.Recv()
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
break
}
diff --git a/internal/services/shared/schema.go b/internal/services/shared/schema.go
index 22d29e69bf..15523331c3 100644
--- a/internal/services/shared/schema.go
+++ b/internal/services/shared/schema.go
@@ -147,8 +147,7 @@ func ErrorIfTupleIteratorReturnsTuples(ctx context.Context, qy datastore.TupleIt
}
defer qy.Close()
- rt := qy.Next()
- if rt != nil {
+ if rt := qy.Next(); rt != nil {
if qy.Err() != nil {
return qy.Err()
}
diff --git a/internal/services/v0/acl.go b/internal/services/v0/acl.go
index 158e274cdb..f39e8994bf 100644
--- a/internal/services/v0/acl.go
+++ b/internal/services/v0/acl.go
@@ -185,8 +185,7 @@ func (as *aclServer) Read(ctx context.Context, req *v0.ReadRequest) (*v0.ReadRes
return nil, rewriteACLError(ctx, err)
}
- var allTuplesetResults []*v0.ReadResponse_Tupleset
-
+ allTuplesetResults := make([]*v0.ReadResponse_Tupleset, 0, len(req.Tuplesets))
for _, tuplesetFilter := range req.Tuplesets {
queryFilter := &v1_api.RelationshipFilter{
ResourceType: tuplesetFilter.Namespace,
@@ -376,7 +375,7 @@ func (as *aclServer) Lookup(ctx context.Context, req *v0.LookupRequest) (*v0.Loo
return nil, rewriteACLError(ctx, err)
}
- var resolvedObjectIDs []string
+ resolvedObjectIDs := make([]string, 0, len(resp.ResolvedOnrs))
for _, found := range resp.ResolvedOnrs {
if found.Namespace != req.ObjectRelation.Namespace {
return nil, rewriteACLError(
@@ -421,7 +420,7 @@ func rewriteACLError(ctx context.Context, err error) error {
var relNotFoundError sharederrors.UnknownRelationError
switch {
- case err == errInvalidZookie:
+ case errors.Is(err, errInvalidZookie):
return status.Errorf(codes.InvalidArgument, "invalid argument: %s", err)
case errors.As(err, &nsNotFoundError):
@@ -448,8 +447,8 @@ func rewriteACLError(ctx context.Context, err error) error {
return status.Errorf(codes.Internal, "internal error: %s", err)
default:
- if _, ok := err.(invalidRelationError); ok {
- return status.Errorf(codes.InvalidArgument, "%s", err.Error())
+ if errors.As(err, &invalidRelationError{}) {
+ return status.Errorf(codes.InvalidArgument, "%s", err)
}
log.Ctx(ctx).Err(err)
diff --git a/internal/services/v0/acl_test.go b/internal/services/v0/acl_test.go
index 43609aaf57..fdfd115088 100644
--- a/internal/services/v0/acl_test.go
+++ b/internal/services/v0/acl_test.go
@@ -25,7 +25,6 @@ import (
"github.com/authzed/spicedb/internal/datastore/memdb"
"github.com/authzed/spicedb/internal/dispatch/graph"
"github.com/authzed/spicedb/internal/namespace"
- "github.com/authzed/spicedb/internal/testfixtures"
tf "github.com/authzed/spicedb/internal/testfixtures"
g "github.com/authzed/spicedb/pkg/graph"
ns "github.com/authzed/spicedb/pkg/namespace"
@@ -876,7 +875,7 @@ func newACLServicer(
dispatch := graph.NewLocalOnlyDispatcher(ns, ds)
lis := bufconn.Listen(1024 * 1024)
- s := testfixtures.NewTestServer()
+ s := tf.NewTestServer()
v0.RegisterACLServiceServer(s, NewACLServer(ds, ns, dispatch, 50))
go func() {
if err := s.Serve(lis); err != nil {
diff --git a/internal/services/v0/devcontext.go b/internal/services/v0/devcontext.go
index fd6a402ca2..e8a334ec07 100644
--- a/internal/services/v0/devcontext.go
+++ b/internal/services/v0/devcontext.go
@@ -139,7 +139,7 @@ func compile(schema string) ([]*v0.NamespaceDefinition, *v0.DeveloperError, erro
empty := ""
namespaces, err := compiler.Compile([]compiler.InputSchema{
{
- Source: input.InputSource("schema"),
+ Source: input.Source("schema"),
SchemaString: schema,
},
}, &empty)
@@ -168,8 +168,8 @@ func compile(schema string) ([]*v0.NamespaceDefinition, *v0.DeveloperError, erro
}
func loadTuples(ctx context.Context, tuples []*v0.RelationTuple, nsm namespace.Manager, ds datastore.Datastore, revision decimal.Decimal) (decimal.Decimal, []*v0.DeveloperError, error) {
- var errors []*v0.DeveloperError
- var updates []*v1.RelationshipUpdate
+ errors := make([]*v0.DeveloperError, 0, len(tuples))
+ updates := make([]*v1.RelationshipUpdate, 0, len(tuples))
for _, tpl := range tuples {
verr := tpl.Validate()
if verr != nil {
@@ -209,7 +209,7 @@ func loadNamespaces(
nsm namespace.Manager,
ds datastore.Datastore,
) ([]*v0.DeveloperError, decimal.Decimal, error) {
- var errors []*v0.DeveloperError
+ errors := make([]*v0.DeveloperError, 0, len(namespaces))
var lastRevision decimal.Decimal
for _, nsDef := range namespaces {
ts, terr := namespace.BuildNamespaceTypeSystemForDefs(nsDef, namespaces)
diff --git a/internal/services/v0/developer.go b/internal/services/v0/developer.go
index 84747234b4..18d9b1c8b3 100644
--- a/internal/services/v0/developer.go
+++ b/internal/services/v0/developer.go
@@ -152,7 +152,7 @@ func (ds *devServer) EditCheck(ctx context.Context, req *v0.EditCheckRequest) (*
defer devContext.dispose()
// Run the checks and store their output.
- var results []*v0.EditCheckResult
+ results := make([]*v0.EditCheckResult, 0, len(req.CheckRelationships))
for _, checkTpl := range req.CheckRelationships {
cr, err := devContext.Dispatcher.DispatchCheck(ctx, &v1.DispatchCheckRequest{
ObjectAndRelation: checkTpl.ObjectAndRelation,
@@ -313,11 +313,11 @@ func runAssertions(ctx context.Context, devContext *DevContext, assertions []val
return failures, nil
}
-func generateValidation(membershipSet *membership.MembershipSet) (string, error) {
+func generateValidation(membershipSet *membership.Set) (string, error) {
validationMap := validationfile.ValidationMap{}
subjectsByONR := membershipSet.SubjectsByONR()
- var onrStrings []string
+ onrStrings := make([]string, 0, len(subjectsByONR))
for onrString := range subjectsByONR {
onrStrings = append(onrStrings, onrString)
}
@@ -350,7 +350,7 @@ func generateValidation(membershipSet *membership.MembershipSet) (string, error)
return validationMap.AsYAML()
}
-func runValidation(ctx context.Context, devContext *DevContext, validation validationfile.ValidationMap) (*membership.MembershipSet, []*v0.DeveloperError, error) {
+func runValidation(ctx context.Context, devContext *DevContext, validation validationfile.ValidationMap) (*membership.Set, []*v0.DeveloperError, error) {
var failures []*v0.DeveloperError
membershipSet := membership.NewMembershipSet()
@@ -409,7 +409,7 @@ func runValidation(ctx context.Context, devContext *DevContext, validation valid
}
func wrapRelationships(onrStrings []string) []string {
- var wrapped []string
+ wrapped := make([]string, 0, len(onrStrings))
for _, str := range onrStrings {
wrapped = append(wrapped, fmt.Sprintf("<%s>", str))
}
diff --git a/internal/services/v0/developer_test.go b/internal/services/v0/developer_test.go
index 2d1388bca6..3b6a58a38d 100644
--- a/internal/services/v0/developer_test.go
+++ b/internal/services/v0/developer_test.go
@@ -8,6 +8,7 @@ import (
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
+ "github.com/authzed/spicedb/pkg/testutil"
"github.com/authzed/spicedb/pkg/tuple"
)
@@ -212,8 +213,6 @@ func TestEditCheck(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- require := require.New(t)
-
store := NewInMemoryShareStore("flavored")
srv := NewDeveloperServer(store)
@@ -224,15 +223,14 @@ func TestEditCheck(t *testing.T) {
},
CheckRelationships: tc.checkRelationships,
})
- require.NoError(err)
+ require.NoError(t, err)
if tc.expectedError != nil {
- require.Equal(tc.expectedError, resp.RequestErrors[0])
- require.Equal(tc.expectedResults, resp.CheckResults)
+ require.Equal(t, tc.expectedError, resp.RequestErrors[0])
} else {
- require.Equal(0, len(resp.RequestErrors), "Found error(s): %v", resp.RequestErrors)
- require.Equal(tc.expectedResults, resp.CheckResults)
+ require.Len(t, resp.RequestErrors, 0, "found error(s): %v", resp.RequestErrors)
}
+ testutil.RequireEqualEmptyNil(t, tc.expectedResults, resp.CheckResults)
})
}
}
diff --git a/internal/services/v0/sharestore.go b/internal/services/v0/sharestore.go
index 5275dc47e5..b081314baa 100644
--- a/internal/services/v0/sharestore.go
+++ b/internal/services/v0/sharestore.go
@@ -6,6 +6,7 @@ import (
"crypto/sha256"
"encoding/base64"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -163,8 +164,8 @@ func (s3s *s3ShareStore) LookupSharedByReference(reference string) (SharedDataV2
Key: aws.String(key),
})
if err != nil {
- aerr, ok := err.(awserr.Error)
- if ok && aerr.Code() == s3.ErrCodeNoSuchKey {
+ var aerr awserr.Error
+ if errors.As(err, &aerr) && aerr.Code() == s3.ErrCodeNoSuchKey {
return SharedDataV2{}, LookupNotFound, nil
}
return SharedDataV2{}, LookupError, aerr
diff --git a/internal/services/v0/validation.go b/internal/services/v0/validation.go
index c36d6a9123..75d30bdbe1 100644
--- a/internal/services/v0/validation.go
+++ b/internal/services/v0/validation.go
@@ -23,7 +23,6 @@ func validateTupleWrite(
nsm namespace.Manager,
revision decimal.Decimal,
) error {
-
err := tuple.ValidateResourceID(tpl.ObjectAndRelation.ObjectId)
if err != nil {
return err
diff --git a/internal/services/v1/permissions.go b/internal/services/v1/permissions.go
index 4acd169ac9..67cf424b9b 100644
--- a/internal/services/v1/permissions.go
+++ b/internal/services/v1/permissions.go
@@ -125,7 +125,7 @@ func TranslateRelationshipTree(tree *v1.PermissionRelationshipTree) *v0.Relation
switch t := tree.TreeType.(type) {
case *v1.PermissionRelationshipTree_Intermediate:
- operation := v0.SetOperationUserset_INVALID
+ var operation v0.SetOperationUserset_Operation
switch t.Intermediate.Operation {
case v1.AlgebraicSubjectSet_OPERATION_EXCLUSION:
operation = v0.SetOperationUserset_EXCLUSION
@@ -134,7 +134,7 @@ func TranslateRelationshipTree(tree *v1.PermissionRelationshipTree) *v0.Relation
case v1.AlgebraicSubjectSet_OPERATION_UNION:
operation = v0.SetOperationUserset_UNION
default:
- panic("Unknown set operation")
+ panic("unknown set operation")
}
children := []*v0.RelationTupleTreeNode{}
@@ -174,15 +174,14 @@ func TranslateRelationshipTree(tree *v1.PermissionRelationshipTree) *v0.Relation
}
default:
- panic("Unknown type of expansion tree node")
+ panic("unknown type of expansion tree node")
}
}
func translateExpansionTree(node *v0.RelationTupleTreeNode) *v1.PermissionRelationshipTree {
switch t := node.NodeType.(type) {
case *v0.RelationTupleTreeNode_IntermediateNode:
- operation := v1.AlgebraicSubjectSet_OPERATION_UNSPECIFIED
-
+ var operation v1.AlgebraicSubjectSet_Operation
switch t.IntermediateNode.Operation {
case v0.SetOperationUserset_EXCLUSION:
operation = v1.AlgebraicSubjectSet_OPERATION_EXCLUSION
@@ -191,7 +190,7 @@ func translateExpansionTree(node *v0.RelationTupleTreeNode) *v1.PermissionRelati
case v0.SetOperationUserset_UNION:
operation = v1.AlgebraicSubjectSet_OPERATION_UNION
default:
- panic("Unknown set operation")
+ panic("unknown set operation")
}
var children []*v1.PermissionRelationshipTree
@@ -256,7 +255,7 @@ func translateExpansionTree(node *v0.RelationTupleTreeNode) *v1.PermissionRelati
}
default:
- panic("Unknown type of expansion tree node")
+ panic("unknown type of expansion tree node")
}
}
@@ -319,7 +318,6 @@ func (ps *permissionServer) LookupResources(req *v1.LookupResourcesRequest, resp
if err != nil {
return err
}
-
}
return nil
}
diff --git a/internal/services/v1/permissions_test.go b/internal/services/v1/permissions_test.go
index 2df1db8c46..4ec4ecc1fa 100644
--- a/internal/services/v1/permissions_test.go
+++ b/internal/services/v1/permissions_test.go
@@ -2,6 +2,7 @@ package v1
import (
"context"
+ "errors"
"fmt"
"io"
"net"
@@ -416,7 +417,7 @@ func TestLookupResources(t *testing.T) {
var resolvedObjectIds []string
for {
resp, err := lookupClient.Recv()
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
break
}
diff --git a/internal/services/v1/relationships_test.go b/internal/services/v1/relationships_test.go
index b4c5a54cbf..c032e444a7 100644
--- a/internal/services/v1/relationships_test.go
+++ b/internal/services/v1/relationships_test.go
@@ -2,6 +2,7 @@ package v1
import (
"context"
+ "errors"
"fmt"
"io"
"strings"
@@ -221,7 +222,7 @@ func TestReadRelationships(t *testing.T) {
for {
rel, err := stream.Recv()
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
break
}
@@ -817,7 +818,7 @@ func readAll(require *require.Assertions, client v1.PermissionsServiceClient, to
for {
rel, err := stream.Recv()
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
break
}
require.NoError(err)
diff --git a/internal/services/v1/schema.go b/internal/services/v1/schema.go
index 64814111e4..0726f330fc 100644
--- a/internal/services/v1/schema.go
+++ b/internal/services/v1/schema.go
@@ -54,7 +54,7 @@ func (ss *schemaServer) ReadSchema(ctx context.Context, in *v1.ReadSchemaRequest
return nil, status.Errorf(codes.NotFound, "No schema has been defined; please call WriteSchema to start")
}
- var objectDefs []string
+ objectDefs := make([]string, 0, len(nsDefs))
for _, nsDef := range nsDefs {
objectDef, _ := generator.GenerateSource(nsDef)
objectDefs = append(objectDefs, objectDef)
@@ -74,7 +74,7 @@ func (ss *schemaServer) WriteSchema(ctx context.Context, in *v1.WriteSchemaReque
}
inputSchema := compiler.InputSchema{
- Source: input.InputSource("schema"),
+ Source: input.Source("schema"),
SchemaString: in.GetSchema(),
}
@@ -95,7 +95,7 @@ func (ss *schemaServer) WriteSchema(ctx context.Context, in *v1.WriteSchemaReque
if err != nil {
return nil, rewriteSchemaError(ctx, err)
}
- log.Ctx(ctx).Trace().Interface("namespace definitions", nsdefs).Msg("compiled namespace definitions")
+ log.Ctx(ctx).Trace().Interface("namespaceDefinitions", nsdefs).Msg("compiled namespace definitions")
// For each definition, perform a diff and ensure the changes will not result in any
// relationships left without associated schema.
@@ -115,7 +115,7 @@ func (ss *schemaServer) WriteSchema(ctx context.Context, in *v1.WriteSchemaReque
existingDefMap[nsdef.Name] = false
}
- log.Ctx(ctx).Trace().Interface("namespace definitions", nsdefs).Msg("validated namespace definitions")
+ log.Ctx(ctx).Trace().Interface("namespaceDefinitions", nsdefs).Msg("validated namespace definitions")
// Ensure that deleting namespaces will not result in any relationships left without associated
// schema.
@@ -131,7 +131,7 @@ func (ss *schemaServer) WriteSchema(ctx context.Context, in *v1.WriteSchemaReque
}
// Write the new namespaces.
- var names []string
+ names := make([]string, 0, len(nsdefs))
for _, nsdef := range nsdefs {
if _, err := ss.ds.WriteNamespace(ctx, nsdef); err != nil {
return nil, rewriteSchemaError(ctx, err)
@@ -141,7 +141,7 @@ func (ss *schemaServer) WriteSchema(ctx context.Context, in *v1.WriteSchemaReque
}
// Delete the removed namespaces.
- var removedNames []string
+ removedNames := make([]string, 0, len(existingDefMap))
for nsdefName, removed := range existingDefMap {
if !removed {
continue
@@ -152,7 +152,7 @@ func (ss *schemaServer) WriteSchema(ctx context.Context, in *v1.WriteSchemaReque
removedNames = append(removedNames, nsdefName)
}
- log.Ctx(ctx).Trace().Interface("namespace definitions", nsdefs).Strs("added/changed", names).Strs("removed", removedNames).Msg("wrote namespace definitions")
+ log.Ctx(ctx).Trace().Interface("namespaceDefinitions", nsdefs).Strs("addedOrChanged", names).Strs("removed", removedNames).Msg("wrote namespace definitions")
return &v1.WriteSchemaResponse{}, nil
}
diff --git a/internal/services/v1/watch_test.go b/internal/services/v1/watch_test.go
index 529484eb47..3ddf961a86 100644
--- a/internal/services/v1/watch_test.go
+++ b/internal/services/v1/watch_test.go
@@ -2,6 +2,7 @@ package v1
import (
"context"
+ "errors"
"fmt"
"io"
"net"
@@ -130,7 +131,6 @@ func TestWatch(t *testing.T) {
require.NoError(err)
if tc.expectedCode == codes.OK {
-
updatesChan := make(chan []*v1.RelationshipUpdate, len(tc.mutations))
go func() {
@@ -146,11 +146,11 @@ func TestWatch(t *testing.T) {
resp, err := stream.Recv()
if err != nil {
errStatus, ok := status.FromError(err)
- if (ok && (errStatus.Code() == codes.Canceled || errStatus.Code() == codes.Unavailable)) || err == io.EOF {
+ if (ok && (errStatus.Code() == codes.Canceled || errStatus.Code() == codes.Unavailable)) || errors.Is(err, io.EOF) {
break
}
- panic(fmt.Errorf("received a stream read error: %v", err))
+ panic(fmt.Errorf("received a stream read error: %w", err))
}
updatesChan <- resp.Updates
@@ -186,7 +186,6 @@ func newWatchServicer(
require *require.Assertions,
ds datastore.Datastore,
) (v1.WatchServiceClient, func()) {
-
lis := bufconn.Listen(1024 * 1024)
s := testfixtures.NewTestServer()
diff --git a/internal/services/v1alpha1/schema.go b/internal/services/v1alpha1/schema.go
index 7c90d3d57e..6ec50ba5ba 100644
--- a/internal/services/v1alpha1/schema.go
+++ b/internal/services/v1alpha1/schema.go
@@ -64,7 +64,7 @@ func (ss *schemaServiceServer) ReadSchema(ctx context.Context, in *v1alpha1.Read
return nil, rewriteError(ctx, err)
}
- var objectDefs []string
+ objectDefs := make([]string, 0, len(in.GetObjectDefinitionsNames()))
createdRevisions := make(map[string]datastore.Revision, len(in.GetObjectDefinitionsNames()))
for _, objectDefName := range in.GetObjectDefinitionsNames() {
found, createdAt, err := ss.ds.ReadNamespace(ctx, objectDefName, headRevision)
@@ -97,7 +97,7 @@ func (ss *schemaServiceServer) WriteSchema(ctx context.Context, in *v1alpha1.Wri
}
inputSchema := compiler.InputSchema{
- Source: input.InputSource("schema"),
+ Source: input.Source("schema"),
SchemaString: in.GetSchema(),
}
@@ -117,7 +117,7 @@ func (ss *schemaServiceServer) WriteSchema(ctx context.Context, in *v1alpha1.Wri
return nil, rewriteError(ctx, err)
}
- log.Ctx(ctx).Trace().Interface("namespace definitions", nsdefs).Msg("compiled namespace definitions")
+ log.Ctx(ctx).Trace().Interface("namespaceDefinitions", nsdefs).Msg("compiled namespace definitions")
for _, nsdef := range nsdefs {
ts, err := namespace.BuildNamespaceTypeSystemWithFallback(nsdef, nsm, nsdefs, headRevision)
@@ -133,7 +133,7 @@ func (ss *schemaServiceServer) WriteSchema(ctx context.Context, in *v1alpha1.Wri
return nil, rewriteError(ctx, err)
}
}
- log.Ctx(ctx).Trace().Interface("namespace definitions", nsdefs).Msg("validated namespace definitions")
+ log.Ctx(ctx).Trace().Interface("namespaceDefinitions", nsdefs).Msg("validated namespace definitions")
// If a precondition was given, decode it, and verify that none of the namespaces specified
// have changed in any way.
@@ -163,10 +163,10 @@ func (ss *schemaServiceServer) WriteSchema(ctx context.Context, in *v1alpha1.Wri
}
}
- log.Trace().Interface("namespace definitions", nsdefs).Msg("checked schema revision")
+ log.Trace().Interface("namespaceDefinitions", nsdefs).Msg("checked schema revision")
}
- var names []string
+ names := make([]string, 0, len(nsdefs))
revisions := make(map[string]datastore.Revision, len(nsdefs))
for _, nsdef := range nsdefs {
revision, err := ss.ds.WriteNamespace(ctx, nsdef)
@@ -183,7 +183,7 @@ func (ss *schemaServiceServer) WriteSchema(ctx context.Context, in *v1alpha1.Wri
return nil, rewriteError(ctx, err)
}
- log.Ctx(ctx).Trace().Interface("namespace definitions", nsdefs).Str("computed revision", computedRevision).Msg("wrote namespace definitions")
+ log.Ctx(ctx).Trace().Interface("namespaceDefinitions", nsdefs).Str("computedRevision", computedRevision).Msg("wrote namespace definitions")
return &v1alpha1.WriteSchemaResponse{
ObjectDefinitionsNames: names,
diff --git a/internal/services/v1alpha1/schema_test.go b/internal/services/v1alpha1/schema_test.go
index e66bee9ebe..95e0d570cd 100644
--- a/internal/services/v1alpha1/schema_test.go
+++ b/internal/services/v1alpha1/schema_test.go
@@ -179,7 +179,7 @@ func upgrade(t *testing.T, nsdefs []*v0.NamespaceDefinition) (*v1alpha1.ReadSche
})
require.NoError(t, err)
- var nsdefNames []string
+ nsdefNames := make([]string, 0, len(nsdefs))
for _, nsdef := range nsdefs {
nsdefNames = append(nsdefNames, nsdef.Name)
}
diff --git a/internal/testfixtures/datastore.go b/internal/testfixtures/datastore.go
index 988c2252d1..0dd077adf2 100644
--- a/internal/testfixtures/datastore.go
+++ b/internal/testfixtures/datastore.go
@@ -149,7 +149,7 @@ func (tc TupleChecker) VerifyIteratorCount(iter datastore.TupleIterator, count i
foundCount := 0
for found := iter.Next(); found != nil; found = iter.Next() {
- foundCount += 1
+ foundCount++
}
tc.Require.NoError(iter.Err())
tc.Require.Equal(count, foundCount)
diff --git a/internal/testfixtures/validating.go b/internal/testfixtures/validating.go
index 96aa9a18d3..34ee7bf37f 100644
--- a/internal/testfixtures/validating.go
+++ b/internal/testfixtures/validating.go
@@ -38,8 +38,7 @@ func (vd validatingDatastore) DeleteRelationships(ctx context.Context, precondit
}
}
- err := filter.Validate()
- if err != nil {
+ if err := filter.Validate(); err != nil {
return datastore.NoRevision, err
}
@@ -82,8 +81,7 @@ func (vd validatingDatastore) Watch(ctx context.Context, afterRevision datastore
}
func (vd validatingDatastore) WriteNamespace(ctx context.Context, newConfig *v0.NamespaceDefinition) (datastore.Revision, error) {
- err := newConfig.Validate()
- if err != nil {
+ if err := newConfig.Validate(); err != nil {
return datastore.NoRevision, err
}
return vd.delegate.WriteNamespace(ctx, newConfig)
diff --git a/pkg/cmd/migrate/migrate.go b/pkg/cmd/migrate/migrate.go
index 632c77850f..759a36914b 100644
--- a/pkg/cmd/migrate/migrate.go
+++ b/pkg/cmd/migrate/migrate.go
@@ -71,7 +71,7 @@ func migrateRun(cmd *cobra.Command, args []string) error {
}
func RegisterHeadFlags(cmd *cobra.Command) {
- cmd.Flags().String("datastore-engine", "postgres", "type of datastore to initialize (e.g. postgres, cockroachdb, memory")
+ cmd.Flags().String("engine", "postgres", "type of datastore to initialize (e.g. postgres, cockroachdb, memory")
}
func NewHeadCommand(programName string) *cobra.Command {
diff --git a/pkg/cmd/serve/devtools.go b/pkg/cmd/serve/devtools.go
index 1154500e07..530dbf9efc 100644
--- a/pkg/cmd/serve/devtools.go
+++ b/pkg/cmd/serve/devtools.go
@@ -143,7 +143,7 @@ func shareStoreFromCmd(cmd *cobra.Command) (v0svc.ShareStore, error) {
return nil, fmt.Errorf("failed to create S3 share store: %w", err)
}
- event = event.Str("endpoint", endpoint).Str("region", region).Str("bucket-name", bucketName).Str("access-key", accessKey)
+ event = event.Str("endpoint", endpoint).Str("region", region).Str("bucketName", bucketName).Str("accessKey", accessKey)
default:
return nil, errors.New("unknown share store")
diff --git a/pkg/cmd/serve/testing.go b/pkg/cmd/serve/testing.go
index 6250fc771d..780c990347 100644
--- a/pkg/cmd/serve/testing.go
+++ b/pkg/cmd/serve/testing.go
@@ -2,6 +2,7 @@ package serve
import (
"context"
+ "errors"
"fmt"
"io"
"net"
@@ -339,7 +340,7 @@ func copyStream(in grpc.ClientStream, out grpc.ServerStream) error {
// It appears that it doesn't matter what kind of proto this actually is
message := &v1.CheckPermissionResponse{}
err := in.RecvMsg(message)
- if err == io.EOF {
+ if errors.Is(err, io.EOF) {
return nil
}
diff --git a/pkg/membership/membership.go b/pkg/membership/membership.go
index 3a85379df9..2c3bb30d4d 100644
--- a/pkg/membership/membership.go
+++ b/pkg/membership/membership.go
@@ -8,15 +8,15 @@ import (
"github.com/authzed/spicedb/pkg/tuple"
)
-// MembershipSet represents the set of membership for one or more ONRs, based on expansion
+// Set represents the set of membership for one or more ONRs, based on expansion
// trees.
-type MembershipSet struct {
+type Set struct {
// objectsAndRelations is a map from an ONR (as a string) to the subjects found for that ONR.
objectsAndRelations map[string]FoundSubjects
}
// SubjectsByONR returns a map from ONR (as a string) to the FoundSubjects for that ONR.
-func (ms *MembershipSet) SubjectsByONR() map[string]FoundSubjects {
+func (ms *Set) SubjectsByONR() map[string]FoundSubjects {
return ms.objectsAndRelations
}
@@ -67,8 +67,8 @@ func (fs FoundSubject) Relationships() []*v0.ObjectAndRelation {
//
// NOTE: This is designed solely for the developer API and should *not* be used in any performance
// sensitive code.
-func NewMembershipSet() *MembershipSet {
- return &MembershipSet{
+func NewMembershipSet() *Set {
+ return &Set{
objectsAndRelations: map[string]FoundSubjects{},
}
}
@@ -76,7 +76,7 @@ func NewMembershipSet() *MembershipSet {
// AddExpansion adds the expansion of an ONR to the membership set. Returns false if the ONR was already added.
//
// NOTE: The expansion tree *should* be the fully recursive expansion.
-func (ms *MembershipSet) AddExpansion(onr *v0.ObjectAndRelation, expansion *v0.RelationTupleTreeNode) (FoundSubjects, bool, error) {
+func (ms *Set) AddExpansion(onr *v0.ObjectAndRelation, expansion *v0.RelationTupleTreeNode) (FoundSubjects, bool, error) {
onrString := tuple.StringONR(onr)
existing, ok := ms.objectsAndRelations[onrString]
if ok {
@@ -115,7 +115,7 @@ func populateFoundSubjects(foundSubjectsMap map[string]FoundSubject, rootONR *v0
case v0.SetOperationUserset_INTERSECTION:
if len(typed.IntermediateNode.ChildNodes) == 0 {
- return fmt.Errorf("Found intersection with no children")
+ return fmt.Errorf("found intersection with no children")
}
fsm := map[string]FoundSubject{}
@@ -139,7 +139,7 @@ func populateFoundSubjects(foundSubjectsMap map[string]FoundSubject, rootONR *v0
case v0.SetOperationUserset_EXCLUSION:
if len(typed.IntermediateNode.ChildNodes) == 0 {
- return fmt.Errorf("Found exclusion with no children")
+ return fmt.Errorf("found exclusion with no children")
}
fsm := map[string]FoundSubject{}
diff --git a/pkg/membership/membership_test.go b/pkg/membership/membership_test.go
index 524a14a7f5..bea4f36261 100644
--- a/pkg/membership/membership_test.go
+++ b/pkg/membership/membership_test.go
@@ -79,16 +79,15 @@ func TestMembershipSetIntersection(t *testing.T) {
require := require.New(t)
ms := NewMembershipSet()
- intersection :=
- graph.Intersection(ONR("folder", "company", "viewer"),
- graph.Leaf(_this,
- tuple.User(ONR("user", "legal", "...")),
- ),
- graph.Leaf(_this,
- tuple.User(ONR("user", "owner", "...")),
- tuple.User(ONR("user", "legal", "...")),
- ),
- )
+ intersection := graph.Intersection(ONR("folder", "company", "viewer"),
+ graph.Leaf(_this,
+ tuple.User(ONR("user", "legal", "...")),
+ ),
+ graph.Leaf(_this,
+ tuple.User(ONR("user", "owner", "...")),
+ tuple.User(ONR("user", "legal", "...")),
+ ),
+ )
fso, ok, err := ms.AddExpansion(ONR("folder", "company", "viewer"), intersection)
require.True(ok)
@@ -100,16 +99,15 @@ func TestMembershipSetExclusion(t *testing.T) {
require := require.New(t)
ms := NewMembershipSet()
- intersection :=
- graph.Exclusion(ONR("folder", "company", "viewer"),
- graph.Leaf(_this,
- tuple.User(ONR("user", "owner", "...")),
- tuple.User(ONR("user", "legal", "...")),
- ),
- graph.Leaf(_this,
- tuple.User(ONR("user", "legal", "...")),
- ),
- )
+ intersection := graph.Exclusion(ONR("folder", "company", "viewer"),
+ graph.Leaf(_this,
+ tuple.User(ONR("user", "owner", "...")),
+ tuple.User(ONR("user", "legal", "...")),
+ ),
+ graph.Leaf(_this,
+ tuple.User(ONR("user", "legal", "...")),
+ ),
+ )
fso, ok, err := ms.AddExpansion(ONR("folder", "company", "viewer"), intersection)
require.True(ok)
diff --git a/pkg/middleware/logging/context.go b/pkg/middleware/logging/context.go
index 01a1bb9bb3..1f1df67e03 100644
--- a/pkg/middleware/logging/context.go
+++ b/pkg/middleware/logging/context.go
@@ -30,7 +30,6 @@ type extractMetadata struct {
func (r *extractMetadata) ServerReporter(ctx context.Context, _ interceptors.CallMeta) (interceptors.Reporter, context.Context) {
md, ok := metadata.FromIncomingContext(ctx)
if ok {
-
fields := []string{}
logContext := log.With()
for _, field := range r.fields {
diff --git a/pkg/migrate/migrate.go b/pkg/migrate/migrate.go
index ae5ee0560b..43044123ce 100644
--- a/pkg/migrate/migrate.go
+++ b/pkg/migrate/migrate.go
@@ -147,7 +147,7 @@ func (m *Manager) HeadRevision() (string, error) {
delete(candidates, eliminateReplaces.replaces)
}
- var allHeads []string
+ allHeads := make([]string, 0, len(candidates))
for headRevision := range candidates {
allHeads = append(allHeads, headRevision)
}
diff --git a/pkg/namespace/metadata_test.go b/pkg/namespace/metadata_test.go
index de315e940b..a6074e6a0b 100644
--- a/pkg/namespace/metadata_test.go
+++ b/pkg/namespace/metadata_test.go
@@ -18,7 +18,7 @@ func TestMetadata(t *testing.T) {
})
require.Nil(err)
- marshalled_kind, err := anypb.New(&iv1.RelationMetadata{
+ marshalledKind, err := anypb.New(&iv1.RelationMetadata{
Kind: iv1.RelationMetadata_PERMISSION,
})
require.Nil(err)
@@ -30,7 +30,7 @@ func TestMetadata(t *testing.T) {
Name: "somerelation",
Metadata: &v0.Metadata{
MetadataMessage: []*anypb.Any{
- marshalled_kind, marshalled,
+ marshalledKind, marshalled,
},
},
},
diff --git a/pkg/schemadsl/compiler/compiler.go b/pkg/schemadsl/compiler/compiler.go
index c7bd0c5215..e190da57ce 100644
--- a/pkg/schemadsl/compiler/compiler.go
+++ b/pkg/schemadsl/compiler/compiler.go
@@ -14,7 +14,7 @@ import (
// InputSchema defines the input for a Compile.
type InputSchema struct {
// Source is the source of the schema being compiled.
- Source input.InputSource
+ Source input.Source
// Schema is the contents being compiled.
SchemaString string
@@ -24,7 +24,7 @@ type InputSchema struct {
type ErrorWithContext struct {
error
SourceRange input.SourceRange
- Source input.InputSource
+ Source input.Source
}
type errorWithNode struct {
@@ -96,7 +96,7 @@ func toContextError(errMessage string, node *dslNode, mapper input.PositionMappe
return ErrorWithContext{
error: fmt.Errorf("parse error in %s: %s", formattedRange, errMessage),
SourceRange: sourceRange,
- Source: input.InputSource(source),
+ Source: input.Source(source),
}
}
diff --git a/pkg/schemadsl/compiler/compiler_test.go b/pkg/schemadsl/compiler/compiler_test.go
index 4b97215a77..8b4c93ecf1 100644
--- a/pkg/schemadsl/compiler/compiler_test.go
+++ b/pkg/schemadsl/compiler/compiler_test.go
@@ -485,7 +485,7 @@ func TestCompile(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
require := require.New(t)
defs, err := Compile([]InputSchema{
- {input.InputSource(test.name), test.input},
+ {input.Source(test.name), test.input},
}, test.implicitTenant)
if test.expectedError != "" {
diff --git a/pkg/schemadsl/compiler/node.go b/pkg/schemadsl/compiler/node.go
index 63cfdba04b..bba56d9b6d 100644
--- a/pkg/schemadsl/compiler/node.go
+++ b/pkg/schemadsl/compiler/node.go
@@ -15,7 +15,7 @@ type dslNode struct {
children map[string]*list.List
}
-func createAstNode(source input.InputSource, kind dslshape.NodeType) parser.AstNode {
+func createAstNode(source input.Source, kind dslshape.NodeType) parser.AstNode {
return &dslNode{
nodeType: kind,
properties: make(map[string]interface{}),
@@ -60,7 +60,7 @@ func (tn *dslNode) Range(mapper input.PositionMapper) (input.SourceRange, error)
return nil, err
}
- source := input.InputSource(sourceStr)
+ source := input.Source(sourceStr)
startRune, err := tn.GetInt(dslshape.NodePredicateStartRune)
if err != nil {
diff --git a/pkg/schemadsl/compiler/positionmapper.go b/pkg/schemadsl/compiler/positionmapper.go
index 21e6cfc408..9be7d59407 100644
--- a/pkg/schemadsl/compiler/positionmapper.go
+++ b/pkg/schemadsl/compiler/positionmapper.go
@@ -9,11 +9,11 @@ import (
type positionMapper struct {
schemas []InputSchema
- mappersBySource map[input.InputSource]input.SourcePositionMapper
+ mappersBySource map[input.Source]input.SourcePositionMapper
}
func newPositionMapper(schemas []InputSchema) input.PositionMapper {
- mappersBySource := map[input.InputSource]input.SourcePositionMapper{}
+ mappersBySource := map[input.Source]input.SourcePositionMapper{}
for _, schema := range schemas {
mappersBySource[schema.Source] = input.CreateSourcePositionMapper([]byte(schema.SchemaString))
}
@@ -24,17 +24,17 @@ func newPositionMapper(schemas []InputSchema) input.PositionMapper {
}
}
-func (pm *positionMapper) RunePositionToLineAndCol(runePosition int, source input.InputSource) (int, int, error) {
+func (pm *positionMapper) RunePositionToLineAndCol(runePosition int, source input.Source) (int, int, error) {
sourceMapper := pm.mappersBySource[source]
return sourceMapper.RunePositionToLineAndCol(runePosition)
}
-func (pm *positionMapper) LineAndColToRunePosition(lineNumber int, colPosition int, source input.InputSource) (int, error) {
+func (pm *positionMapper) LineAndColToRunePosition(lineNumber int, colPosition int, source input.Source) (int, error) {
sourceMapper := pm.mappersBySource[source]
return sourceMapper.LineAndColToRunePosition(lineNumber, colPosition)
}
-func (pm *positionMapper) TextForLine(lineNumber int, source input.InputSource) (string, error) {
+func (pm *positionMapper) TextForLine(lineNumber int, source input.Source) (string, error) {
for _, schema := range pm.schemas {
if schema.Source == source {
lines := strings.Split(schema.SchemaString, "\n")
diff --git a/pkg/schemadsl/dslshape/dslshape.go b/pkg/schemadsl/dslshape/dslshape.go
index 53657bc7d7..9474a6d9a9 100644
--- a/pkg/schemadsl/dslshape/dslshape.go
+++ b/pkg/schemadsl/dslshape/dslshape.go
@@ -1,4 +1,4 @@
-//go:generate stringer -type=NodeType -output zz_generated.nodetype_string.go
+//go:generate go run golang.org/x/tools/cmd/stringer -type=NodeType -output zz_generated.nodetype_string.go
// Package dslshape defines the types representing the structure of schema DSL.
package dslshape
diff --git a/pkg/schemadsl/generator/generator_impl.go b/pkg/schemadsl/generator/generator_impl.go
index 42f082e4a0..5bc11c0ba5 100644
--- a/pkg/schemadsl/generator/generator_impl.go
+++ b/pkg/schemadsl/generator/generator_impl.go
@@ -16,63 +16,63 @@ type sourceGenerator struct {
// ensureBlankLineOrNewScope ensures that there is a blank line or new scope at the tail of the buffer. If not,
// a new line is added.
-func (sf *sourceGenerator) ensureBlankLineOrNewScope() {
- if !sf.hasBlankline && !sf.hasNewScope {
- sf.appendLine()
+func (sg *sourceGenerator) ensureBlankLineOrNewScope() {
+ if !sg.hasBlankline && !sg.hasNewScope {
+ sg.appendLine()
}
}
// indent increases the current indentation.
-func (sf *sourceGenerator) indent() {
- sf.indentationLevel = sf.indentationLevel + 1
+func (sg *sourceGenerator) indent() {
+ sg.indentationLevel = sg.indentationLevel + 1
}
// dedent decreases the current indentation.
-func (sf *sourceGenerator) dedent() {
- sf.indentationLevel = sf.indentationLevel - 1
+func (sg *sourceGenerator) dedent() {
+ sg.indentationLevel = sg.indentationLevel - 1
}
// appendIssue adds an issue found in generation.
-func (sf *sourceGenerator) appendIssue(description string) {
- sf.append("/* ")
- sf.append(description)
- sf.append(" */")
- sf.hasIssue = true
+func (sg *sourceGenerator) appendIssue(description string) {
+ sg.append("/* ")
+ sg.append(description)
+ sg.append(" */")
+ sg.hasIssue = true
}
// append adds the given value to the buffer, indenting as necessary.
-func (sf *sourceGenerator) append(value string) {
+func (sg *sourceGenerator) append(value string) {
for _, currentRune := range value {
if currentRune == '\n' {
- if sf.hasNewline {
- sf.hasBlankline = true
+ if sg.hasNewline {
+ sg.hasBlankline = true
}
- sf.buf.WriteRune('\n')
- sf.hasNewline = true
- sf.existingLineLength = 0
+ sg.buf.WriteRune('\n')
+ sg.hasNewline = true
+ sg.existingLineLength = 0
continue
}
- sf.hasBlankline = false
- sf.hasNewScope = false
+ sg.hasBlankline = false
+ sg.hasNewScope = false
- if sf.hasNewline {
- sf.buf.WriteString(strings.Repeat("\t", sf.indentationLevel))
- sf.hasNewline = false
- sf.existingLineLength += sf.indentationLevel
+ if sg.hasNewline {
+ sg.buf.WriteString(strings.Repeat("\t", sg.indentationLevel))
+ sg.hasNewline = false
+ sg.existingLineLength += sg.indentationLevel
}
- sf.existingLineLength++
- sf.buf.WriteRune(currentRune)
+ sg.existingLineLength++
+ sg.buf.WriteRune(currentRune)
}
}
// appendLine adds a newline.
-func (sf *sourceGenerator) appendLine() {
- sf.append("\n")
+func (sg *sourceGenerator) appendLine() {
+ sg.append("\n")
}
-func (sf *sourceGenerator) markNewScope() {
- sf.hasNewScope = true
+func (sg *sourceGenerator) markNewScope() {
+ sg.hasNewScope = true
}
diff --git a/pkg/schemadsl/generator/generator_test.go b/pkg/schemadsl/generator/generator_test.go
index feb3780f7d..a2bdf62806 100644
--- a/pkg/schemadsl/generator/generator_test.go
+++ b/pkg/schemadsl/generator/generator_test.go
@@ -253,7 +253,7 @@ definition foos/document {
t.Run(test.name, func(t *testing.T) {
require := require.New(t)
defs, err := compiler.Compile([]compiler.InputSchema{{
- Source: input.InputSource(test.name),
+ Source: input.Source(test.name),
SchemaString: test.input,
}}, nil)
require.NoError(err)
diff --git a/pkg/schemadsl/input/inputsource.go b/pkg/schemadsl/input/inputsource.go
index 3acd596653..3bd043730c 100644
--- a/pkg/schemadsl/input/inputsource.go
+++ b/pkg/schemadsl/input/inputsource.go
@@ -16,31 +16,31 @@ type Position struct {
ColumnPosition int
}
-// InputSource represents the path of a source file.
-type InputSource string
+// Source represents the path of a source file.
+type Source string
// RangeForRunePosition returns a source range over this source file.
-func (is InputSource) RangeForRunePosition(runePosition int, mapper PositionMapper) SourceRange {
+func (is Source) RangeForRunePosition(runePosition int, mapper PositionMapper) SourceRange {
return is.RangeForRunePositions(runePosition, runePosition, mapper)
}
// PositionForRunePosition returns a source position over this source file.
-func (is InputSource) PositionForRunePosition(runePosition int, mapper PositionMapper) SourcePosition {
+func (is Source) PositionForRunePosition(runePosition int, mapper PositionMapper) SourcePosition {
return runeIndexedPosition{is, mapper, runePosition}
}
// PositionFromLineAndColumn returns a source position at the given line and column in this source file.
-func (is InputSource) PositionFromLineAndColumn(lineNumber int, columnPosition int, mapper PositionMapper) SourcePosition {
+func (is Source) PositionFromLineAndColumn(lineNumber int, columnPosition int, mapper PositionMapper) SourcePosition {
return lcIndexedPosition{is, mapper, Position{lineNumber, columnPosition}}
}
// RangeForRunePositions returns a source range over this source file.
-func (is InputSource) RangeForRunePositions(startRune int, endRune int, mapper PositionMapper) SourceRange {
+func (is Source) RangeForRunePositions(startRune int, endRune int, mapper PositionMapper) SourceRange {
return sourceRange{is, runeIndexedPosition{is, mapper, startRune}, runeIndexedPosition{is, mapper, endRune}}
}
// RangeForLineAndColPositions returns a source range over this source file.
-func (is InputSource) RangeForLineAndColPositions(start Position, end Position, mapper PositionMapper) SourceRange {
+func (is Source) RangeForLineAndColPositions(start Position, end Position, mapper PositionMapper) SourceRange {
return sourceRange{is, lcIndexedPosition{is, mapper, start}, lcIndexedPosition{is, mapper, end}}
}
@@ -49,20 +49,20 @@ func (is InputSource) RangeForLineAndColPositions(start Position, end Position,
type PositionMapper interface {
// RunePositionToLineAndCol converts the given 0-indexed rune position under the given source file
// into a 0-indexed line number and column position.
- RunePositionToLineAndCol(runePosition int, path InputSource) (int, int, error)
+ RunePositionToLineAndCol(runePosition int, path Source) (int, int, error)
// LineAndColToRunePosition converts the given 0-indexed line number and column position under the
// given source file into a 0-indexed rune position.
- LineAndColToRunePosition(lineNumber int, colPosition int, path InputSource) (int, error)
+ LineAndColToRunePosition(lineNumber int, colPosition int, path Source) (int, error)
// TextForLine returns the text for the specified line number.
- TextForLine(lineNumber int, path InputSource) (string, error)
+ TextForLine(lineNumber int, path Source) (string, error)
}
// SourceRange represents a range inside a source file.
type SourceRange interface {
// Source is the input source for this range.
- Source() InputSource
+ Source() Source
// Start is the starting position of the source range.
Start() SourcePosition
@@ -84,7 +84,7 @@ type SourceRange interface {
// SourcePosition represents a single position in a source file.
type SourcePosition interface {
// Source is the input source for this position.
- Source() InputSource
+ Source() Source
// RunePosition returns the 0-indexed rune position in the source file.
RunePosition() (int, error)
@@ -101,12 +101,12 @@ type SourcePosition interface {
// sourceRange implements the SourceRange interface.
type sourceRange struct {
- source InputSource
+ source Source
start SourcePosition
end SourcePosition
}
-func (sr sourceRange) Source() InputSource {
+func (sr sourceRange) Source() Source {
return sr.source
}
@@ -151,12 +151,12 @@ func (sr sourceRange) String() string {
// runeIndexedPosition implements the SourcePosition interface over a rune position.
type runeIndexedPosition struct {
- source InputSource
+ source Source
mapper PositionMapper
runePosition int
}
-func (ris runeIndexedPosition) Source() InputSource {
+func (ris runeIndexedPosition) Source() Source {
return ris.source
}
@@ -189,12 +189,12 @@ func (ris runeIndexedPosition) LineText() (string, error) {
// lcIndexedPosition implements the SourcePosition interface over a line and colu,n position.
type lcIndexedPosition struct {
- source InputSource
+ source Source
mapper PositionMapper
lcPosition Position
}
-func (lcip lcIndexedPosition) Source() InputSource {
+func (lcip lcIndexedPosition) Source() Source {
return lcip.source
}
diff --git a/pkg/schemadsl/input/sourcepositionmapper.go b/pkg/schemadsl/input/sourcepositionmapper.go
index 885288d0f9..1bca03c81a 100644
--- a/pkg/schemadsl/input/sourcepositionmapper.go
+++ b/pkg/schemadsl/input/sourcepositionmapper.go
@@ -31,9 +31,9 @@ func CreateSourcePositionMapper(contents []byte) SourcePositionMapper {
currentStart := int(0)
for index, line := range lines {
- lineEnd := currentStart + int(len(line))
- rangeTree.Put(inclusiveRange{currentStart, lineEnd}, lineAndStart{int(index), currentStart})
- lineMap[int(index)] = inclusiveRange{currentStart, lineEnd}
+ lineEnd := currentStart + len(line)
+ rangeTree.Put(inclusiveRange{currentStart, lineEnd}, lineAndStart{index, currentStart})
+ lineMap[index] = inclusiveRange{currentStart, lineEnd}
currentStart = lineEnd + 1
}
@@ -73,7 +73,7 @@ func inclusiveComparator(a, b interface{}) int {
func (spm SourcePositionMapper) RunePositionToLineAndCol(runePosition int) (int, int, error) {
ls, found := spm.rangeTree.Get(inclusiveRange{runePosition, runePosition})
if !found {
- return 0, 0, fmt.Errorf("Unknown rune position %v in source file", runePosition)
+ return 0, 0, fmt.Errorf("unknown rune position %v in source file", runePosition)
}
las := ls.(lineAndStart)
@@ -84,11 +84,11 @@ func (spm SourcePositionMapper) RunePositionToLineAndCol(runePosition int) (int,
func (spm SourcePositionMapper) LineAndColToRunePosition(lineNumber int, colPosition int) (int, error) {
lineRuneInfo, hasLine := spm.lineMap[lineNumber]
if !hasLine {
- return 0, fmt.Errorf("Unknown line %v in source file", lineNumber)
+ return 0, fmt.Errorf("unknown line %v in source file", lineNumber)
}
if colPosition > lineRuneInfo.end-lineRuneInfo.start {
- return 0, fmt.Errorf("Column position %v not found on line %v in source file", colPosition, lineNumber)
+ return 0, fmt.Errorf("column position %v not found on line %v in source file", colPosition, lineNumber)
}
return lineRuneInfo.start + colPosition, nil
diff --git a/pkg/schemadsl/lexer/lex.go b/pkg/schemadsl/lexer/lex.go
index 1ec1678a5d..a49a5b0b7d 100644
--- a/pkg/schemadsl/lexer/lex.go
+++ b/pkg/schemadsl/lexer/lex.go
@@ -15,7 +15,7 @@ import (
const EOFRUNE = -1
// createLexer creates a new scanner for the input string.
-func createLexer(source input.InputSource, input string) *Lexer {
+func createLexer(source input.Source, input string) *Lexer {
l := &Lexer{
source: source,
input: input,
@@ -84,7 +84,7 @@ type stateFn func(*Lexer) stateFn
// Lexer holds the state of the scanner.
type Lexer struct {
sync.RWMutex
- source input.InputSource // the name of the input; used only for error reports
+ source input.Source // the name of the input; used only for error reports
input string // the string being scanned
state stateFn // the next lexing function to enter
pos input.BytePosition // current position in the input
@@ -184,8 +184,7 @@ func (l *Lexer) peekValue(value string) bool {
// accept consumes the next rune if it's from the valid set.
func (l *Lexer) accept(valid string) bool {
- nextRune := l.next()
- if strings.ContainsRune(valid, nextRune) {
+ if nextRune := l.next(); strings.ContainsRune(valid, nextRune) {
return true
}
l.backup()
diff --git a/pkg/schemadsl/lexer/lex_def.go b/pkg/schemadsl/lexer/lex_def.go
index 117291b65b..13b132f646 100644
--- a/pkg/schemadsl/lexer/lex_def.go
+++ b/pkg/schemadsl/lexer/lex_def.go
@@ -1,4 +1,4 @@
-//go:generate stringer -type=TokenType
+//go:generate go run golang.org/x/tools/cmd/stringer -type=TokenType
package lexer
@@ -9,7 +9,7 @@ import (
)
// Lex creates a new scanner for the input string.
-func Lex(source input.InputSource, input string) *Lexer {
+func Lex(source input.Source, input string) *Lexer {
return createLexer(source, input)
}
@@ -207,10 +207,10 @@ func lexIdentifierOrKeyword(l *Lexer) stateFn {
l.next()
}
- _, is_keyword := keywords[l.value()]
+ _, isKeyword := keywords[l.value()]
switch {
- case is_keyword:
+ case isKeyword:
l.emit(TokenTypeKeyword)
default:
diff --git a/pkg/schemadsl/lexer/lex_test.go b/pkg/schemadsl/lexer/lex_test.go
index 15049a4ea1..5efec33110 100644
--- a/pkg/schemadsl/lexer/lex_test.go
+++ b/pkg/schemadsl/lexer/lex_test.go
@@ -139,6 +139,7 @@ var lexerTests = []lexerTest{
func TestLexer(t *testing.T) {
for _, test := range lexerTests {
t.Run(test.name, func(t *testing.T) {
+ test := test // Close over test and not the pointer that is reused.
tokens := performLex(&test)
if !equal(tokens, test.tokens) {
t.Errorf("%s: got\n\t%+v\nexpected\n\t%v", test.name, tokens, test.tokens)
@@ -148,7 +149,7 @@ func TestLexer(t *testing.T) {
}
func performLex(t *lexerTest) (tokens []Lexeme) {
- l := Lex(input.InputSource(t.name), t.input)
+ l := Lex(input.Source(t.name), t.input)
for {
token := l.nextToken()
tokens = append(tokens, token)
diff --git a/pkg/schemadsl/parser/parser.go b/pkg/schemadsl/parser/parser.go
index d661c976f4..c63c0dae8e 100644
--- a/pkg/schemadsl/parser/parser.go
+++ b/pkg/schemadsl/parser/parser.go
@@ -10,7 +10,7 @@ import (
)
// Parse parses the given Schema DSL source into a parse tree.
-func Parse(builder NodeBuilder, source input.InputSource, input string) AstNode {
+func Parse(builder NodeBuilder, source input.Source, input string) AstNode {
lx := lexer.Lex(source, input)
parser := buildParser(lx, builder, source, input)
defer parser.close()
@@ -34,7 +34,7 @@ func (p *sourceParser) consumeTopLevel() AstNode {
p.consumeToken()
if p.currentToken.Kind == lexer.TokenTypeError {
- p.emitError("%s", p.currentToken.Value)
+ p.emitErrorf("%s", p.currentToken.Value)
return rootNode
}
@@ -59,7 +59,7 @@ Loop:
rootNode.Connect(dslshape.NodePredicateChild, p.consumeDefinition())
default:
- p.emitError("Unexpected token at root level: %v", p.currentToken.Kind)
+ p.emitErrorf("Unexpected token at root level: %v", p.currentToken.Kind)
break Loop
}
}
@@ -254,7 +254,7 @@ func (p *sourceParser) consumeComputeExpression() AstNode {
binaryParser := p.buildBinaryOperatorExpressionFnTree(ComputeExpressionOperators)
found, ok := binaryParser()
if !ok {
- return p.createErrorNode("Expected compute expression for permission")
+ return p.createErrorNodef("Expected compute expression for permission")
}
return found
}
@@ -316,13 +316,14 @@ func (p *sourceParser) tryConsumeBaseExpression() (AstNode, bool) {
// Identifier.
case p.isToken(lexer.TokenTypeIdentifier):
return p.tryConsumeIdentifierLiteral()
-
}
return nil, false
}
-// tryConsumeIdentifierLiteral attempts to consume an identifer as a literal expression.
+// tryConsumeIdentifierLiteral attempts to consume an identifier as a literal
+// expression.
+//
/// ```foo```
func (p *sourceParser) tryConsumeIdentifierLiteral() (AstNode, bool) {
if !p.isToken(lexer.TokenTypeIdentifier) {
diff --git a/pkg/schemadsl/parser/parser_impl.go b/pkg/schemadsl/parser/parser_impl.go
index e01674a974..a1c18ce125 100644
--- a/pkg/schemadsl/parser/parser_impl.go
+++ b/pkg/schemadsl/parser/parser_impl.go
@@ -24,7 +24,7 @@ type AstNode interface {
}
// NodeBuilder is a function for building AST nodes.
-type NodeBuilder func(source input.InputSource, kind dslshape.NodeType) AstNode
+type NodeBuilder func(source input.Source, kind dslshape.NodeType) AstNode
// tryParserFn is a function that attempts to build an AST node.
type tryParserFn func() (AstNode, bool)
@@ -45,7 +45,7 @@ type commentedLexeme struct {
// sourceParser holds the state of the parser.
type sourceParser struct {
- source input.InputSource // the name of the input; used only for error reports
+ source input.Source // the name of the input; used only for error reports
lex *lexer.PeekableLexer // a reference to the lexer used for tokenization
builder NodeBuilder // the builder function for creating AstNode instances
nodes *nodeStack // the stack of the current nodes
@@ -54,7 +54,7 @@ type sourceParser struct {
}
// buildParser returns a new sourceParser instance.
-func buildParser(lx *lexer.Lexer, builder NodeBuilder, source input.InputSource, input string) *sourceParser {
+func buildParser(lx *lexer.Lexer, builder NodeBuilder, source input.Source, input string) *sourceParser {
l := lexer.NewPeekableLexer(lx)
return &sourceParser{
source: source,
@@ -75,8 +75,8 @@ func (p *sourceParser) createNode(kind dslshape.NodeType) AstNode {
return p.builder(p.source, kind)
}
-// createErrorNode creates a new error node and returns it.
-func (p *sourceParser) createErrorNode(format string, args ...interface{}) AstNode {
+// createErrorNodef creates a new error node and returns it.
+func (p *sourceParser) createErrorNodef(format string, args ...interface{}) AstNode {
message := fmt.Sprintf(format, args...)
node := p.startNode(dslshape.NodeTypeError).Decorate(dslshape.NodePredicateErrorMessage, message)
p.finishNode()
@@ -167,17 +167,17 @@ func (p *sourceParser) isKeyword(keyword string) bool {
return p.isToken(lexer.TokenTypeKeyword) && p.currentToken.Value == keyword
}
-// emitError creates a new error node and attachs it as a child of the current
+// emitErrorf creates a new error node and attachs it as a child of the current
// node.
-func (p *sourceParser) emitError(format string, args ...interface{}) {
- errorNode := p.createErrorNode(format, args...)
+func (p *sourceParser) emitErrorf(format string, args ...interface{}) {
+ errorNode := p.createErrorNodef(format, args...)
p.currentNode().Connect(dslshape.NodePredicateChild, errorNode)
}
// consumeKeyword consumes an expected keyword token or adds an error node.
func (p *sourceParser) consumeKeyword(keyword string) bool {
if !p.tryConsumeKeyword(keyword) {
- p.emitError("Expected keyword %s, found token %v", keyword, p.currentToken.Kind)
+ p.emitErrorf("Expected keyword %s, found token %v", keyword, p.currentToken.Kind)
return false
}
return true
@@ -197,7 +197,7 @@ func (p *sourceParser) tryConsumeKeyword(keyword string) bool {
func (p *sourceParser) consumeIdentifier() (string, bool) {
token, ok := p.tryConsume(lexer.TokenTypeIdentifier)
if !ok {
- p.emitError("Expected identifier, found token %v", p.currentToken.Kind)
+ p.emitErrorf("Expected identifier, found token %v", p.currentToken.Kind)
return "", false
}
return token.Value, true
@@ -208,7 +208,7 @@ func (p *sourceParser) consumeIdentifier() (string, bool) {
func (p *sourceParser) consume(types ...lexer.TokenType) (lexer.Lexeme, bool) {
token, ok := p.tryConsume(types...)
if !ok {
- p.emitError("Expected one of: %v, found: %v", types, p.currentToken.Kind)
+ p.emitErrorf("Expected one of: %v, found: %v", types, p.currentToken.Kind)
}
return token, ok
}
@@ -282,7 +282,7 @@ func (p *sourceParser) performLeftRecursiveParsing(subTryExprFn tryParserFn, rig
// Consume the right hand expression and build an expression node (if applicable).
exprNode, ok := rightNodeBuilder(currentLeftNode, operatorToken.Lexeme)
if !ok {
- p.emitError("Expected right hand expression, found: %v", p.currentToken.Kind)
+ p.emitErrorf("Expected right hand expression, found: %v", p.currentToken.Kind)
return currentLeftNode, true
}
@@ -308,7 +308,7 @@ func (p *sourceParser) consumeStatementTerminator() bool {
return true
}
- p.emitError("Expected end of statement or definition, found: %s", p.currentToken.Kind)
+ p.emitErrorf("Expected end of statement or definition, found: %s", p.currentToken.Kind)
return false
}
diff --git a/pkg/schemadsl/parser/parser_test.go b/pkg/schemadsl/parser/parser_test.go
index c9f2089a79..dc6167c380 100644
--- a/pkg/schemadsl/parser/parser_test.go
+++ b/pkg/schemadsl/parser/parser_test.go
@@ -45,13 +45,13 @@ func (pt *parserTest) tree() string {
}
func (pt *parserTest) writeTree(value string) {
- err := ioutil.WriteFile(fmt.Sprintf("tests/%s.zed.expected", pt.filename), []byte(value), 0o644)
+ err := ioutil.WriteFile(fmt.Sprintf("tests/%s.zed.expected", pt.filename), []byte(value), 0o600)
if err != nil {
panic(err)
}
}
-func createAstNode(source input.InputSource, kind dslshape.NodeType) AstNode {
+func createAstNode(source input.Source, kind dslshape.NodeType) AstNode {
return &testNode{
nodeType: kind,
properties: make(map[string]interface{}),
@@ -112,7 +112,7 @@ func TestParser(t *testing.T) {
for _, test := range parserTests {
t.Run(test.name, func(t *testing.T) {
- root := Parse(createAstNode, input.InputSource(test.name), test.input())
+ root := Parse(createAstNode, input.Source(test.name), test.input())
parseTree := getParseTree((root).(*testNode), 0)
assert := assert.New(t)
diff --git a/pkg/testutil/require.go b/pkg/testutil/require.go
new file mode 100644
index 0000000000..6059d3b996
--- /dev/null
+++ b/pkg/testutil/require.go
@@ -0,0 +1,30 @@
+// Package testutil implements various utilities to reduce boilerplate in unit
+// tests a la testify.
+package testutil
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// RequireEqualEmptyNil is a version of require.Equal, but considers nil
+// slices/maps to be equal to empty slices/maps.
+func RequireEqualEmptyNil(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) {
+ expectedVal := reflect.ValueOf(expected)
+ actualVal := reflect.ValueOf(actual)
+
+ if hasLength(expectedVal) && hasLength(actualVal) && expectedVal.Len() == 0 && actualVal.Len() == 0 {
+ return
+ }
+ require.Equal(t, expected, actual, msgAndArgs...)
+}
+
+func hasLength(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Map:
+ return true
+ }
+ return false
+}
diff --git a/pkg/tuple/onr.go b/pkg/tuple/onr.go
index 8031a12a5c..d60b3aeabc 100644
--- a/pkg/tuple/onr.go
+++ b/pkg/tuple/onr.go
@@ -92,7 +92,7 @@ func StringONR(onr *v0.ObjectAndRelation) string {
// StringsONRs converts ONR objects to a string slice, sorted.
func StringsONRs(onrs []*v0.ObjectAndRelation) []string {
- var onrstrings []string
+ onrstrings := make([]string, 0, len(onrs))
for _, onr := range onrs {
onrstrings = append(onrstrings, StringONR(onr))
}
diff --git a/pkg/tuple/onrset.go b/pkg/tuple/onrset.go
index d3fa7a1bec..22caf53f42 100644
--- a/pkg/tuple/onrset.go
+++ b/pkg/tuple/onrset.go
@@ -37,8 +37,7 @@ func (ons *ONRSet) Has(onr *v0.ObjectAndRelation) bool {
// Add adds the given ONR to the set. Returns true if the object was not in the set before this
// call and false otherwise.
func (ons *ONRSet) Add(onr *v0.ObjectAndRelation) bool {
- _, ok := ons.onrs[StringONR(onr)]
- if ok {
+ if _, ok := ons.onrs[StringONR(onr)]; ok {
return false
}
diff --git a/pkg/validationfile/fileformat.go b/pkg/validationfile/fileformat.go
index 7b4c68960c..7b0f86bcd0 100644
--- a/pkg/validationfile/fileformat.go
+++ b/pkg/validationfile/fileformat.go
@@ -18,29 +18,32 @@ import (
// NOTE: This struct does not contain the `validation` block produced
// by the playground, as it is currently unused in Go-side code.
//
-// Parsing for those blocks' *contents* can be found in this module, since they are parsed
-// by the developer API.
+// Parsing for those blocks' *contents* can be found in this module, since they
+// are parsed by the developer API.
type ValidationFile struct {
- // Schema is the defined schema, in DSL format. Optional if at least one NamespaceConfig is specified.
+ // Schema is the defined schema, in DSL format. Optional if at least one
+ // NamespaceConfig is specified.
Schema string `yaml:"schema"`
- // Relationships are the validation relationships, as a single string of newline separated tuple
- // string syntax. Optional if ValidationTuples is specified.
+ // Relationships are the validation relationships, as a single string of
+ // newline separated tuple string syntax.
+ // Optional if ValidationTuples is specified.
Relationships string `yaml:"relationships"`
- // NamespaceConfigs are the namespace configuration protos, in text format. Optional if Schema
- // is specified.
+ // NamespaceConfigs are the namespace configuration protos, in text format.
+ // Optional if Schema is specified.
NamespaceConfigs []string `yaml:"namespace_configs"`
- // ValidationTuples are the validation tuples, in tuple string syntax. Optional if Relationships
- // are specified.
+ // ValidationTuples are the validation tuples, in tuple string syntax.
+ // Optional if Relationships are specified.
ValidationTuples []string `yaml:"validation_tuples"`
// Assertions are the (optional) assertions for the validation file.
Assertions SimpleAssertions `yaml:"assertions"`
}
-// ErrorWithSource is an error that includes the source text and position information.
+// ErrorWithSource is an error that includes the source text and position
+// information.
type ErrorWithSource struct {
error
@@ -50,7 +53,8 @@ type ErrorWithSource struct {
// LineNumber is the (1-indexed) line number of the error, or 0 if unknown.
LineNumber uint32
- // ColumnPosition is the (1-indexed) column position of the error, or 0 if unknown.
+ // ColumnPosition is the (1-indexed) column position of the error, or 0 if
+ // unknown.
ColumnPosition uint32
}
@@ -70,7 +74,8 @@ func ParseValidationBlock(contents []byte) (ValidationMap, error) {
return block, err
}
-// ParseAssertionsBlock attempts to parse the given contents as a YAML assertions block.
+// ParseAssertionsBlock attempts to parse the given contents as a YAML
+// assertions block.
func ParseAssertionsBlock(contents []byte) (Assertions, error) {
var node yamlv3.Node
err := yamlv3.Unmarshal(contents, &node)
@@ -119,8 +124,8 @@ func ParseAssertionsBlock(contents []byte) (Assertions, error) {
return parsed, nil
}
-// ValidationMap is a map from an Object Relation (as a Relationship) to the validation strings containing
-// the Subjects for that Object Relation.
+// ValidationMap is a map from an Object Relation (as a Relationship) to the
+// validation strings containing the Subjects for that Object Relation.
type ValidationMap map[ObjectRelationString][]ValidationString
// AsYAML returns the ValidationMap in its YAML form.
@@ -129,11 +134,12 @@ func (vm ValidationMap) AsYAML() (string, error) {
return string(data), err
}
-// ObjectRelationString represents an ONR defined as a string in the key for the ValidationMap.
+// ObjectRelationString represents an ONR defined as a string in the key for
+// the ValidationMap.
type ObjectRelationString string
-// ONR returns the ObjectAndRelation parsed from this string, if valid, or an error on failure
-// to parse.
+// ONR returns the ObjectAndRelation parsed from this string, if valid, or an
+// error on failure to parse.
func (ors ObjectRelationString) ONR() (*v0.ObjectAndRelation, *ErrorWithSource) {
parsed := tuple.ParseONR(string(ors))
if parsed == nil {
@@ -147,8 +153,8 @@ var (
vsObjectAndRelationRegex = regexp.MustCompile(`(.*?)<(?P