Skip to content

Commit

Permalink
model datastore cli options
Browse files Browse the repository at this point in the history
  • Loading branch information
ecordell committed Dec 6, 2021
1 parent 1015f37 commit 3dbe72f
Show file tree
Hide file tree
Showing 6 changed files with 272 additions and 84 deletions.
105 changes: 22 additions & 83 deletions cmd/spicedb/serve.go
Expand Up @@ -8,7 +8,6 @@ import (
"syscall"
"time"

"github.com/alecthomas/units"
"github.com/fatih/color"
grpcauth "github.com/grpc-ecosystem/go-grpc-middleware/auth"
grpczerolog "github.com/grpc-ecosystem/go-grpc-middleware/providers/zerolog/v2"
Expand All @@ -24,17 +23,14 @@ import (
"github.com/authzed/spicedb/internal/auth"
"github.com/authzed/spicedb/internal/dashboard"
"github.com/authzed/spicedb/internal/datastore"
"github.com/authzed/spicedb/internal/datastore/common"
"github.com/authzed/spicedb/internal/datastore/crdb"
"github.com/authzed/spicedb/internal/datastore/memdb"
"github.com/authzed/spicedb/internal/datastore/postgres"
"github.com/authzed/spicedb/internal/datastore/proxy"
combineddispatch "github.com/authzed/spicedb/internal/dispatch/combined"
"github.com/authzed/spicedb/internal/gateway"
"github.com/authzed/spicedb/internal/middleware/servicespecific"
"github.com/authzed/spicedb/internal/namespace"
"github.com/authzed/spicedb/internal/services"
v1alpha1svc "github.com/authzed/spicedb/internal/services/v1alpha1"
"github.com/authzed/spicedb/pkg/cmd/serve"
logmw "github.com/authzed/spicedb/pkg/middleware/logging"
"github.com/authzed/spicedb/pkg/middleware/requestid"
"github.com/authzed/spicedb/pkg/validationfile"
Expand Down Expand Up @@ -66,26 +62,11 @@ func registerServeCmd(rootCmd *cobra.Command) {
}

// Flags for the datastore
serveCmd.Flags().String("datastore-engine", "memory", `type of datastore to initialize ("memory", "postgres", "cockroachdb")`)
serveCmd.Flags().String("datastore-conn-uri", "", `connection string used by remote datastores (e.g. "postgres://postgres:password@localhost:5432/spicedb")`)
var datastoreOptions serve.Options
serve.RegisterDatastoreFlags(serveCmd, &datastoreOptions)
serveCmd.Flags().Bool("datastore-readonly", false, "set the service to read-only mode")
serveCmd.Flags().Int("datastore-conn-max-open", 20, "number of concurrent connections open in a remote datastore's connection pool")
serveCmd.Flags().Int("datastore-conn-min-open", 10, "number of minimum concurrent connections open in a remote datastore's connection pool")
serveCmd.Flags().Duration("datastore-conn-max-lifetime", 30*time.Minute, "maximum amount of time a connection can live in a remote datastore's connection pool")
serveCmd.Flags().Duration("datastore-conn-max-idletime", 30*time.Minute, "maximum amount of time a connection can idle in a remote datastore's connection pool")
serveCmd.Flags().Duration("datastore-conn-healthcheck-interval", 30*time.Second, "time between a remote datastore's connection pool health checks")
serveCmd.Flags().Duration("datastore-gc-window", 24*time.Hour, "amount of time before revisions are garbage collected")
serveCmd.Flags().Duration("datastore-gc-interval", 3*time.Minute, "amount of time between passes of garbage collection (postgres driver only)")
serveCmd.Flags().Duration("datastore-gc-max-operation-time", 1*time.Minute, "maximum amount of time a garbage collection pass can operate before timing out (postgres driver only)")
serveCmd.Flags().Duration("datastore-revision-fuzzing-duration", 5*time.Second, "amount of time to advertize stale revisions")
// See crdb doc for info about follower reads and how it is configured: https://www.cockroachlabs.com/docs/stable/follower-reads.html
serveCmd.Flags().Duration("datastore-follower-read-delay-duration", 4_800*time.Millisecond, "amount of time to subtract from non-sync revision timestamps to ensure they are sufficiently in the past to enable follower reads (cockroach driver only)")
serveCmd.Flags().String("datastore-query-split-size", common.DefaultSplitAtEstimatedQuerySize.String(), "estimated number of bytes at which a query is split when using a remote datastore")
serveCmd.Flags().StringSlice("datastore-bootstrap-files", []string{}, "bootstrap data yaml files to load")
serveCmd.Flags().Bool("datastore-bootstrap-overwrite", false, "overwrite any existing data with bootstrap data")
serveCmd.Flags().Int("datastore-max-tx-retries", 50, "number of times a retriable transaction should be retried (cockroach driver only)")
serveCmd.Flags().String("datastore-tx-overlap-strategy", "static", `strategy to generate transaction overlap keys ("prefix", "static", "insecure") (cockroach driver only)`)
serveCmd.Flags().String("datastore-tx-overlap-key", "key", "static key to touch when writing to ensure transactions overlap (only used if --datastore-tx-overlap-strategy=static is set; cockroach driver only)")

serveCmd.Flags().Bool("datastore-request-hedging", true, "enable request hedging")
serveCmd.Flags().Duration("datastore-request-hedging-initial-slow-value", 10*time.Millisecond, "initial value to use for slow datastore requests, before statistics have been collected")
Expand Down Expand Up @@ -128,68 +109,26 @@ func serveRun(cmd *cobra.Command, args []string) {
}

datastoreEngine := cobrautil.MustGetStringExpanded(cmd, "datastore-engine")
datastoreURI := cobrautil.MustGetStringExpanded(cmd, "datastore-conn-uri")

revisionFuzzingTimedelta := cobrautil.MustGetDuration(cmd, "datastore-revision-fuzzing-duration")
gcWindow := cobrautil.MustGetDuration(cmd, "datastore-gc-window")
maxRetries := cobrautil.MustGetInt(cmd, "datastore-max-tx-retries")
overlapKey := cobrautil.MustGetStringExpanded(cmd, "datastore-tx-overlap-key")
overlapStrategy := cobrautil.MustGetStringExpanded(cmd, "datastore-tx-overlap-strategy")

splitQuerySize, err := units.ParseBase2Bytes(cobrautil.MustGetStringExpanded(cmd, "datastore-query-split-size"))
ds, err := serve.NewDatastore(
datastore.Engine(datastoreEngine),
serve.WithRevisionQuantization(cobrautil.MustGetDuration(cmd, "datastore-revision-fuzzing-duration")),
serve.WithGCWindow(cobrautil.MustGetDuration(cmd, "datastore-gc-window")),
serve.WithURI(cobrautil.MustGetStringExpanded(cmd, "datastore-conn-uri")),
serve.WithMaxIdleTime(cobrautil.MustGetDuration(cmd, "datastore-conn-max-idletime")),
serve.WithMaxLifetime(cobrautil.MustGetDuration(cmd, "datastore-conn-max-lifetime")),
serve.WithMaxOpenConns(cobrautil.MustGetInt(cmd, "datastore-conn-max-open")),
serve.WithMinOpenConns(cobrautil.MustGetInt(cmd, "datastore-conn-min-open")),
serve.WithSplitQuerySize(cobrautil.MustGetStringExpanded(cmd, "datastore-query-split-size")),
serve.WithFollowerReadDelay(cobrautil.MustGetDuration(cmd, "datastore-follower-read-delay-duration")),
serve.WithMaxRetries(cobrautil.MustGetInt(cmd, "datastore-max-tx-retries")),
serve.WithOverlapKey(cobrautil.MustGetStringExpanded(cmd, "datastore-tx-overlap-key")),
serve.WithOverlapStrategy(cobrautil.MustGetStringExpanded(cmd, "datastore-tx-overlap-strategy")),
serve.WithHealthCheckPeriod(cobrautil.MustGetDuration(cmd, "datastore-conn-healthcheck-interval")),
serve.WithGCInterval(cobrautil.MustGetDuration(cmd, "datastore-gc-interval")),
serve.WithGCMaxOperationTime(cobrautil.MustGetDuration(cmd, "datastore-gc-max-operation-time")),
)
if err != nil {
log.Fatal().Err(err).Msg("failed to parse datastore-query-split-size")
}

var ds datastore.Datastore
if datastoreEngine == "memory" {
log.Info().Msg("using in-memory datastore")
log.Warn().Msg("in-memory datastore is not persistent and not feasible to run in a high availability fashion")
ds, err = memdb.NewMemdbDatastore(0, revisionFuzzingTimedelta, gcWindow, 0)
if err != nil {
log.Fatal().Err(err).Msg("failed to init datastore")
}
} else if datastoreEngine == "cockroachdb" {
log.Info().Msg("using cockroachdb datastore")
ds, err = crdb.NewCRDBDatastore(
datastoreURI,
crdb.ConnMaxIdleTime(cobrautil.MustGetDuration(cmd, "datastore-conn-max-idletime")),
crdb.ConnMaxLifetime(cobrautil.MustGetDuration(cmd, "datastore-conn-max-lifetime")),
crdb.MaxOpenConns(cobrautil.MustGetInt(cmd, "datastore-conn-max-open")),
crdb.MinOpenConns(cobrautil.MustGetInt(cmd, "datastore-conn-min-open")),
crdb.RevisionQuantization(revisionFuzzingTimedelta),
crdb.FollowerReadDelay(cobrautil.MustGetDuration(cmd, "datastore-follower-read-delay-duration")),
crdb.GCWindow(gcWindow),
crdb.MaxRetries(maxRetries),
crdb.SplitAtEstimatedQuerySize(splitQuerySize),
crdb.OverlapKey(overlapKey),
crdb.OverlapStrategy(overlapStrategy),
)
if err != nil {
log.Fatal().Err(err).Msg("failed to init datastore")
}
} else if datastoreEngine == "postgres" {
log.Info().Msg("using postgres datastore")
ds, err = postgres.NewPostgresDatastore(
datastoreURI,
postgres.ConnMaxIdleTime(cobrautil.MustGetDuration(cmd, "datastore-conn-max-idletime")),
postgres.ConnMaxLifetime(cobrautil.MustGetDuration(cmd, "datastore-conn-max-lifetime")),
postgres.HealthCheckPeriod(cobrautil.MustGetDuration(cmd, "datastore-conn-healthcheck-interval")),
postgres.MaxOpenConns(cobrautil.MustGetInt(cmd, "datastore-conn-max-open")),
postgres.MinOpenConns(cobrautil.MustGetInt(cmd, "datastore-conn-min-open")),
postgres.RevisionFuzzingTimedelta(revisionFuzzingTimedelta),
postgres.GCInterval(cobrautil.MustGetDuration(cmd, "datastore-gc-interval")),
postgres.GCMaxOperationTime(cobrautil.MustGetDuration(cmd, "datastore-gc-max-operation-time")),
postgres.GCWindow(gcWindow),
postgres.EnablePrometheusStats(),
postgres.EnableTracing(),
postgres.SplitAtEstimatedQuerySize(splitQuerySize),
)
if err != nil {
log.Fatal().Err(err).Msg("failed to init datastore")
}
} else {
log.Fatal().Str("datastore-engine", datastoreEngine).Msg("unknown datastore engine type")
log.Fatal().Err(err).Msg("failed to init datastore")
}

bootstrapFilePaths := cobrautil.MustGetStringSlice(cmd, "datastore-bootstrap-files")
Expand Down
28 changes: 28 additions & 0 deletions internal/datastore/crdb/crdb.go
Expand Up @@ -18,6 +18,7 @@ import (

"github.com/authzed/spicedb/internal/datastore"
"github.com/authzed/spicedb/internal/datastore/crdb/migrations"
"github.com/authzed/spicedb/pkg/cmd/serve"
)

var (
Expand Down Expand Up @@ -49,8 +50,35 @@ const (
querySelectNow = "SELECT cluster_logical_timestamp()"
queryReturningTimestamp = "RETURNING cluster_logical_timestamp()"
queryShowZoneConfig = "SHOW ZONE CONFIGURATION FOR RANGE default;"

cockroachEngine datastore.Engine = "cockroach"
)

func init() {
serve.RegisterEngine(cockroachEngine, newFromDatastoreOptions)
}

func newFromDatastoreOptions(opts serve.Options) (datastore.Datastore, error) {
splitQuerySize, err := units.ParseBase2Bytes(opts.SplitQuerySize)
if err != nil {
return nil, fmt.Errorf("failed to parse split query size: %w", err)
}
return NewCRDBDatastore(
opts.URI,
GCWindow(opts.GCWindow),
RevisionQuantization(opts.RevisionQuantization),
ConnMaxIdleTime(opts.MaxIdleTime),
ConnMaxLifetime(opts.MaxLifetime),
MaxOpenConns(opts.MaxOpenConns),
MinOpenConns(opts.MinOpenConns),
SplitAtEstimatedQuerySize(splitQuerySize),
FollowerReadDelay(opts.FollowerReadDelay),
MaxRetries(opts.MaxRetries),
OverlapKey(opts.OverlapKey),
OverlapStrategy(opts.OverlapStrategy),
)
}

// NewCRDBDatastore initializes a SpiceDB datastore that uses a CockroachDB
// database while leveraging its AOST functionality.
func NewCRDBDatastore(url string, options ...Option) (datastore.Datastore, error) {
Expand Down
5 changes: 4 additions & 1 deletion internal/datastore/datastore.go
Expand Up @@ -80,7 +80,7 @@ type GraphDatastore interface {
// from a subject relation onward from the datastore.
ReverseQueryTuplesFromSubjectRelation(subjectNamespace, subjectRelation string, revision Revision) ReverseTupleQuery

// ReverseQueryTuplesFromSubjectNamespace creates a builder for reading
//  ReverseQueryTuplesFromSubjectNamespace creates a builder for reading
// tuples from a subject namespace onward from the datastore.
ReverseQueryTuplesFromSubjectNamespace(subjectNamespace string, revision Revision) ReverseTupleQuery

Expand Down Expand Up @@ -150,3 +150,6 @@ type Revision = decimal.Decimal
// revision type in the future a bit easier if necessary. Implementations
// should use any time they want to signal an empty/error revision.
var NoRevision Revision

// Engine represents the type of a datastore engine, i.e. Cockroach or Postgres
type Engine string
13 changes: 13 additions & 0 deletions internal/datastore/memdb/memdb.go
Expand Up @@ -11,11 +11,24 @@ import (
v1 "github.com/authzed/authzed-go/proto/authzed/api/v1"
"github.com/hashicorp/go-memdb"
"github.com/jzelinskie/stringz"
"github.com/rs/zerolog/log"
"github.com/shopspring/decimal"

"github.com/authzed/spicedb/internal/datastore"
"github.com/authzed/spicedb/pkg/cmd/serve"
)

const memoryEngine datastore.Engine = "memory"

func init() {
serve.RegisterEngine(memoryEngine, newFromDatastoreOptions)
}

func newFromDatastoreOptions(opts serve.Options) (datastore.Datastore, error) {
log.Warn().Msg("in-memory datastore is not persistent and not feasible to run in a high availability fashion")
return NewMemdbDatastore(0, opts.RevisionQuantization, opts.GCWindow, 0)
}

// DisableGC is a convenient constant for setting the garbage collection
// interval high enough that it will never run.
const DisableGC = time.Duration(math.MaxInt64)
Expand Down
26 changes: 26 additions & 0 deletions internal/datastore/postgres/postgres.go
Expand Up @@ -24,6 +24,7 @@ import (

"github.com/authzed/spicedb/internal/datastore"
"github.com/authzed/spicedb/internal/datastore/postgres/migrations"
"github.com/authzed/spicedb/pkg/cmd/serve"
)

const (
Expand Down Expand Up @@ -55,6 +56,8 @@ const (
tracingDriverName = "postgres-tracing"

batchDeleteSize = 1000

postgresEngine datastore.Engine = "postgres"
)

var (
Expand Down Expand Up @@ -83,6 +86,29 @@ var (

func init() {
dbsql.Register(tracingDriverName, sqlmw.Driver(stdlib.GetDefaultDriver(), new(traceInterceptor)))
serve.RegisterEngine(postgresEngine, newFromDatastoreOptions)
}

func newFromDatastoreOptions(opts serve.Options) (datastore.Datastore, error) {
splitQuerySize, err := units.ParseBase2Bytes(opts.SplitQuerySize)
if err != nil {
return nil, fmt.Errorf("failed to parse split query size: %w", err)
}
return NewPostgresDatastore(
opts.URI,
GCWindow(opts.GCWindow),
RevisionFuzzingTimedelta(opts.RevisionQuantization),
ConnMaxIdleTime(opts.MaxIdleTime),
ConnMaxLifetime(opts.MaxLifetime),
MaxOpenConns(opts.MaxOpenConns),
MinOpenConns(opts.MinOpenConns),
SplitAtEstimatedQuerySize(splitQuerySize),
HealthCheckPeriod(opts.HealthCheckPeriod),
GCInterval(opts.GCInterval),
GCMaxOperationTime(opts.GCMaxOperationTime),
EnablePrometheusStats(),
EnableTracing(),
)
}

var (
Expand Down

0 comments on commit 3dbe72f

Please sign in to comment.