-
Notifications
You must be signed in to change notification settings - Fork 135
/
store.go
227 lines (187 loc) · 6.88 KB
/
store.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
// Code generated by pg-bindings generator. DO NOT EDIT.
package postgres
import (
"context"
"strings"
"time"
"github.com/jackc/pgx/v4"
"github.com/pkg/errors"
"github.com/stackrox/rox/central/metrics"
v1 "github.com/stackrox/rox/generated/api/v1"
"github.com/stackrox/rox/generated/storage"
"github.com/stackrox/rox/pkg/logging"
ops "github.com/stackrox/rox/pkg/metrics"
"github.com/stackrox/rox/pkg/postgres"
"github.com/stackrox/rox/pkg/postgres/pgutils"
pkgSchema "github.com/stackrox/rox/pkg/postgres/schema"
"github.com/stackrox/rox/pkg/sac"
"github.com/stackrox/rox/pkg/sac/resources"
pgSearch "github.com/stackrox/rox/pkg/search/postgres"
"github.com/stackrox/rox/pkg/sync"
"gorm.io/gorm"
)
const (
baseTable = "service_accounts"
storeName = "ServiceAccount"
// using copyFrom, we may not even want to batch. It would probably be simpler
// to deal with failures if we just sent it all. Something to think about as we
// proceed and move into more e2e and larger performance testing
batchSize = 10000
)
var (
log = logging.LoggerForModule()
schema = pkgSchema.ServiceAccountsSchema
targetResource = resources.ServiceAccount
)
type storeType = storage.ServiceAccount
// Store is the interface to interact with the storage for storage.ServiceAccount
type Store interface {
Upsert(ctx context.Context, obj *storeType) error
UpsertMany(ctx context.Context, objs []*storeType) error
Delete(ctx context.Context, id string) error
DeleteByQuery(ctx context.Context, q *v1.Query) error
DeleteMany(ctx context.Context, identifiers []string) error
Count(ctx context.Context) (int, error)
Exists(ctx context.Context, id string) (bool, error)
Get(ctx context.Context, id string) (*storeType, bool, error)
GetByQuery(ctx context.Context, query *v1.Query) ([]*storeType, error)
GetMany(ctx context.Context, identifiers []string) ([]*storeType, []int, error)
GetIDs(ctx context.Context) ([]string, error)
Walk(ctx context.Context, fn func(obj *storeType) error) error
}
type storeImpl struct {
*pgSearch.GenericStore[storeType, *storeType]
mutex sync.RWMutex
}
// New returns a new Store instance using the provided sql instance.
func New(db postgres.DB) Store {
return &storeImpl{
GenericStore: pgSearch.NewGenericStore[storeType, *storeType](
db,
schema,
pkGetter,
insertIntoServiceAccounts,
copyFromServiceAccounts,
metricsSetAcquireDBConnDuration,
metricsSetPostgresOperationDurationTime,
isUpsertAllowed,
targetResource,
),
}
}
// region Helper functions
func pkGetter(obj *storeType) string {
return obj.GetId()
}
func metricsSetPostgresOperationDurationTime(start time.Time, op ops.Op) {
metrics.SetPostgresOperationDurationTime(start, op, storeName)
}
func metricsSetAcquireDBConnDuration(start time.Time, op ops.Op) {
metrics.SetAcquireDBConnDuration(start, op, storeName)
}
func isUpsertAllowed(ctx context.Context, objs ...*storeType) error {
scopeChecker := sac.GlobalAccessScopeChecker(ctx).AccessMode(storage.Access_READ_WRITE_ACCESS).Resource(targetResource)
if scopeChecker.IsAllowed() {
return nil
}
var deniedIDs []string
for _, obj := range objs {
subScopeChecker := scopeChecker.ClusterID(obj.GetClusterId()).Namespace(obj.GetNamespace())
if !subScopeChecker.IsAllowed() {
deniedIDs = append(deniedIDs, obj.GetId())
}
}
if len(deniedIDs) != 0 {
return errors.Wrapf(sac.ErrResourceAccessDenied, "modifying serviceAccounts with IDs [%s] was denied", strings.Join(deniedIDs, ", "))
}
return nil
}
func insertIntoServiceAccounts(_ context.Context, batch *pgx.Batch, obj *storage.ServiceAccount) error {
serialized, marshalErr := obj.Marshal()
if marshalErr != nil {
return marshalErr
}
values := []interface{}{
// parent primary keys start
pgutils.NilOrUUID(obj.GetId()),
obj.GetName(),
obj.GetNamespace(),
obj.GetClusterName(),
pgutils.NilOrUUID(obj.GetClusterId()),
pgutils.EmptyOrMap(obj.GetLabels()),
pgutils.EmptyOrMap(obj.GetAnnotations()),
serialized,
}
finalStr := "INSERT INTO service_accounts (Id, Name, Namespace, ClusterName, ClusterId, Labels, Annotations, serialized) VALUES($1, $2, $3, $4, $5, $6, $7, $8) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, Name = EXCLUDED.Name, Namespace = EXCLUDED.Namespace, ClusterName = EXCLUDED.ClusterName, ClusterId = EXCLUDED.ClusterId, Labels = EXCLUDED.Labels, Annotations = EXCLUDED.Annotations, serialized = EXCLUDED.serialized"
batch.Queue(finalStr, values...)
return nil
}
func copyFromServiceAccounts(ctx context.Context, s pgSearch.Deleter, tx *postgres.Tx, objs ...*storage.ServiceAccount) error {
inputRows := make([][]interface{}, 0, batchSize)
// This is a copy so first we must delete the rows and re-add them
// Which is essentially the desired behaviour of an upsert.
deletes := make([]string, 0, batchSize)
copyCols := []string{
"id",
"name",
"namespace",
"clustername",
"clusterid",
"labels",
"annotations",
"serialized",
}
for idx, obj := range objs {
// Todo: ROX-9499 Figure out how to more cleanly template around this issue.
log.Debugf("This is here for now because there is an issue with pods_TerminatedInstances where the obj "+
"in the loop is not used as it only consists of the parent ID and the index. Putting this here as a stop gap "+
"to simply use the object. %s", obj)
serialized, marshalErr := obj.Marshal()
if marshalErr != nil {
return marshalErr
}
inputRows = append(inputRows, []interface{}{
pgutils.NilOrUUID(obj.GetId()),
obj.GetName(),
obj.GetNamespace(),
obj.GetClusterName(),
pgutils.NilOrUUID(obj.GetClusterId()),
obj.GetLabels(),
obj.GetAnnotations(),
serialized,
})
// Add the ID to be deleted.
deletes = append(deletes, obj.GetId())
// if we hit our batch size we need to push the data
if (idx+1)%batchSize == 0 || idx == len(objs)-1 {
// copy does not upsert so have to delete first. parent deletion cascades so only need to
// delete for the top level parent
if err := s.DeleteMany(ctx, deletes); err != nil {
return err
}
// clear the inserts and vals for the next batch
deletes = deletes[:0]
if _, err := tx.CopyFrom(ctx, pgx.Identifier{"service_accounts"}, copyCols, pgx.CopyFromRows(inputRows)); err != nil {
return err
}
// clear the input rows for the next batch
inputRows = inputRows[:0]
}
}
return nil
}
// endregion Helper functions
// region Used for testing
// CreateTableAndNewStore returns a new Store instance for testing.
func CreateTableAndNewStore(ctx context.Context, db postgres.DB, gormDB *gorm.DB) Store {
pkgSchema.ApplySchemaForTable(ctx, gormDB, baseTable)
return New(db)
}
// Destroy drops the tables associated with the target object type.
func Destroy(ctx context.Context, db postgres.DB) {
dropTableServiceAccounts(ctx, db)
}
func dropTableServiceAccounts(ctx context.Context, db postgres.DB) {
_, _ = db.Exec(ctx, "DROP TABLE IF EXISTS service_accounts CASCADE")
}
// endregion Used for testing