-
Notifications
You must be signed in to change notification settings - Fork 135
/
store.go
179 lines (140 loc) · 5.46 KB
/
store.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
// Code generated by pg-bindings generator. DO NOT EDIT.
package postgres
import (
"context"
"time"
"github.com/jackc/pgx/v5"
"github.com/stackrox/rox/central/metrics"
v1 "github.com/stackrox/rox/generated/api/v1"
"github.com/stackrox/rox/generated/storage"
"github.com/stackrox/rox/pkg/logging"
ops "github.com/stackrox/rox/pkg/metrics"
"github.com/stackrox/rox/pkg/postgres"
pkgSchema "github.com/stackrox/rox/pkg/postgres/schema"
"github.com/stackrox/rox/pkg/sac/resources"
pgSearch "github.com/stackrox/rox/pkg/search/postgres"
"gorm.io/gorm"
)
const (
baseTable = "compliance_operator_check_results"
storeName = "ComplianceOperatorCheckResult"
// using copyFrom, we may not even want to batch. It would probably be simpler
// to deal with failures if we just sent it all. Something to think about as we
// proceed and move into more e2e and larger performance testing
batchSize = 10000
)
var (
log = logging.LoggerForModule()
schema = pkgSchema.ComplianceOperatorCheckResultsSchema
targetResource = resources.ComplianceOperator
)
type storeType = storage.ComplianceOperatorCheckResult
// Store is the interface to interact with the storage for storage.ComplianceOperatorCheckResult
type Store interface {
Upsert(ctx context.Context, obj *storeType) error
UpsertMany(ctx context.Context, objs []*storeType) error
Delete(ctx context.Context, id string) error
DeleteByQuery(ctx context.Context, q *v1.Query) error
DeleteMany(ctx context.Context, identifiers []string) error
Count(ctx context.Context) (int, error)
Exists(ctx context.Context, id string) (bool, error)
Get(ctx context.Context, id string) (*storeType, bool, error)
GetMany(ctx context.Context, identifiers []string) ([]*storeType, []int, error)
GetIDs(ctx context.Context) ([]string, error)
Walk(ctx context.Context, fn func(obj *storeType) error) error
}
// New returns a new Store instance using the provided sql instance.
func New(db postgres.DB) Store {
return pgSearch.NewGenericStore[storeType, *storeType](
db,
schema,
pkGetter,
insertIntoComplianceOperatorCheckResults,
copyFromComplianceOperatorCheckResults,
metricsSetAcquireDBConnDuration,
metricsSetPostgresOperationDurationTime,
pgSearch.GloballyScopedUpsertChecker[storeType, *storeType](targetResource),
targetResource,
)
}
// region Helper functions
func pkGetter(obj *storeType) string {
return obj.GetId()
}
func metricsSetPostgresOperationDurationTime(start time.Time, op ops.Op) {
metrics.SetPostgresOperationDurationTime(start, op, storeName)
}
func metricsSetAcquireDBConnDuration(start time.Time, op ops.Op) {
metrics.SetAcquireDBConnDuration(start, op, storeName)
}
func insertIntoComplianceOperatorCheckResults(batch *pgx.Batch, obj *storage.ComplianceOperatorCheckResult) error {
serialized, marshalErr := obj.Marshal()
if marshalErr != nil {
return marshalErr
}
values := []interface{}{
// parent primary keys start
obj.GetId(),
serialized,
}
finalStr := "INSERT INTO compliance_operator_check_results (Id, serialized) VALUES($1, $2) ON CONFLICT(Id) DO UPDATE SET Id = EXCLUDED.Id, serialized = EXCLUDED.serialized"
batch.Queue(finalStr, values...)
return nil
}
func copyFromComplianceOperatorCheckResults(ctx context.Context, s pgSearch.Deleter, tx *postgres.Tx, objs ...*storage.ComplianceOperatorCheckResult) error {
inputRows := make([][]interface{}, 0, batchSize)
// This is a copy so first we must delete the rows and re-add them
// Which is essentially the desired behaviour of an upsert.
deletes := make([]string, 0, batchSize)
copyCols := []string{
"id",
"serialized",
}
for idx, obj := range objs {
// Todo: ROX-9499 Figure out how to more cleanly template around this issue.
log.Debugf("This is here for now because there is an issue with pods_TerminatedInstances where the obj "+
"in the loop is not used as it only consists of the parent ID and the index. Putting this here as a stop gap "+
"to simply use the object. %s", obj)
serialized, marshalErr := obj.Marshal()
if marshalErr != nil {
return marshalErr
}
inputRows = append(inputRows, []interface{}{
obj.GetId(),
serialized,
})
// Add the ID to be deleted.
deletes = append(deletes, obj.GetId())
// if we hit our batch size we need to push the data
if (idx+1)%batchSize == 0 || idx == len(objs)-1 {
// copy does not upsert so have to delete first. parent deletion cascades so only need to
// delete for the top level parent
if err := s.DeleteMany(ctx, deletes); err != nil {
return err
}
// clear the inserts and vals for the next batch
deletes = deletes[:0]
if _, err := tx.CopyFrom(ctx, pgx.Identifier{"compliance_operator_check_results"}, copyCols, pgx.CopyFromRows(inputRows)); err != nil {
return err
}
// clear the input rows for the next batch
inputRows = inputRows[:0]
}
}
return nil
}
// endregion Helper functions
// region Used for testing
// CreateTableAndNewStore returns a new Store instance for testing.
func CreateTableAndNewStore(ctx context.Context, db postgres.DB, gormDB *gorm.DB) Store {
pkgSchema.ApplySchemaForTable(ctx, gormDB, baseTable)
return New(db)
}
// Destroy drops the tables associated with the target object type.
func Destroy(ctx context.Context, db postgres.DB) {
dropTableComplianceOperatorCheckResults(ctx, db)
}
func dropTableComplianceOperatorCheckResults(ctx context.Context, db postgres.DB) {
_, _ = db.Exec(ctx, "DROP TABLE IF EXISTS compliance_operator_check_results CASCADE")
}
// endregion Used for testing