From efad2cebac75ae7ed2a3b9e4f881d72b91496477 Mon Sep 17 00:00:00 2001 From: priyawadhwa Date: Thu, 10 Nov 2022 07:24:39 -0800 Subject: [PATCH] fix SearchLogQuery behavior to conform to openapi spec (#1145) (#1191) --- openapi.yaml | 6 + pkg/api/entries.go | 71 ++- pkg/api/error.go | 28 +- .../entries/search_log_query_responses.go | 69 +++ pkg/generated/restapi/embedded_spec.go | 21 + .../entries/search_log_query_responses.go | 45 ++ tests/e2e_test.go | 502 +++++++++++++++++- 7 files changed, 677 insertions(+), 65 deletions(-) diff --git a/openapi.yaml b/openapi.yaml index 0b89b10c4..ab873d25f 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -236,6 +236,8 @@ paths: $ref: '#/definitions/LogEntry' 400: $ref: '#/responses/BadContent' + 422: + $ref: '#/responses/UnprocessableEntity' default: $ref: '#/responses/InternalServerError' @@ -643,3 +645,7 @@ responses: description: There was an internal error in the server while processing the request schema: $ref: "#/definitions/Error" + UnprocessableEntity: + description: The server understood the request but is unable to process the contained instructions + schema: + $ref: "#/definitions/Error" diff --git a/pkg/api/entries.go b/pkg/api/entries.go index 600c04637..81a00af56 100644 --- a/pkg/api/entries.go +++ b/pkg/api/entries.go @@ -361,27 +361,29 @@ func SearchLogQueryHandler(params entries.SearchLogQueryParams) middleware.Respo g, _ := errgroup.WithContext(httpReqCtx) var searchHashes [][]byte - code := http.StatusBadRequest for _, entryID := range params.Entry.EntryUUIDs { - if sharding.ValidateEntryID(entryID) == nil { + // if we got this far, then entryID is either a 64 or 80 character hex string + err := sharding.ValidateEntryID(entryID) + if err == nil { logEntry, err := retrieveLogEntry(httpReqCtx, entryID) - if errors.Is(err, ErrNotFound) { - code = http.StatusNotFound + if err != nil && !errors.Is(err, ErrNotFound) { + return handleRekorAPIError(params, http.StatusInternalServerError, err, fmt.Sprintf("error getting log entry for %s", entryID)) + } else if err == nil { + resultPayload = append(resultPayload, logEntry) } - if err != nil { - return handleRekorAPIError(params, code, err, fmt.Sprintf("error getting log entry for %s", entryID)) - } - resultPayload = append(resultPayload, logEntry) continue + } else if len(entryID) == sharding.EntryIDHexStringLen { + // if ValidateEntryID failed and this is a full length entryID, then we can't search for it + return handleRekorAPIError(params, http.StatusBadRequest, err, fmt.Sprintf("invalid entryID %s", entryID)) } // At this point, check if we got a uuid instead of an EntryID, so search for the hash later uuid := entryID if err := sharding.ValidateUUID(uuid); err != nil { - return handleRekorAPIError(params, code, err, fmt.Sprintf("validating uuid %s", uuid)) + return handleRekorAPIError(params, http.StatusBadRequest, err, fmt.Sprintf("invalid uuid %s", uuid)) } hash, err := hex.DecodeString(uuid) if err != nil { - return handleRekorAPIError(params, code, err, malformedUUID) + return handleRekorAPIError(params, http.StatusBadRequest, err, malformedUUID) } searchHashes = append(searchHashes, hash) } @@ -408,7 +410,7 @@ func SearchLogQueryHandler(params entries.SearchLogQueryParams) middleware.Respo } if err := g.Wait(); err != nil { - return handleRekorAPIError(params, code, err, err.Error()) + return handleRekorAPIError(params, http.StatusBadRequest, err, err.Error()) } close(searchHashesChan) for hash := range searchHashesChan { @@ -424,31 +426,30 @@ func SearchLogQueryHandler(params entries.SearchLogQueryParams) middleware.Respo for _, shard := range api.logRanges.AllShards() { tcs := NewTrillianClientFromTreeID(httpReqCtx, shard) resp := tcs.getLeafAndProofByHash(hash) - if resp.status != codes.OK { - continue - } - if resp.err != nil { - continue - } - leafResult := resp.getLeafAndProofResult - if leafResult != nil && leafResult.Leaf != nil { - if results == nil { - results = map[int64]*trillian.GetEntryAndProofResponse{} + switch resp.status { + case codes.OK: + leafResult := resp.getLeafAndProofResult + if leafResult != nil && leafResult.Leaf != nil { + if results == nil { + results = map[int64]*trillian.GetEntryAndProofResponse{} + } + results[shard] = resp.getLeafAndProofResult } - results[shard] = resp.getLeafAndProofResult + case codes.NotFound: + // do nothing here, do not throw 404 error + continue + default: + log.ContextLogger(httpReqCtx).Errorf("error getLeafAndProofByHash(%s): code: %v, msg %v", hex.EncodeToString(hash), resp.status, resp.err) + return fmt.Errorf(trillianCommunicationError) } } - if results == nil { - code = http.StatusNotFound - return fmt.Errorf("no responses found") - } searchByHashResults[i] = results return nil }) } if err := g.Wait(); err != nil { - return handleRekorAPIError(params, code, err, err.Error()) + return handleRekorAPIError(params, http.StatusInternalServerError, err, err.Error()) } for _, hashMap := range searchByHashResults { @@ -459,8 +460,7 @@ func SearchLogQueryHandler(params entries.SearchLogQueryParams) middleware.Respo tcs := NewTrillianClientFromTreeID(httpReqCtx, shard) logEntry, err := logEntryFromLeaf(httpReqCtx, api.signer, tcs, leafResp.Leaf, leafResp.SignedLogRoot, leafResp.Proof, shard, api.logRanges) if err != nil { - code = http.StatusInternalServerError - return handleRekorAPIError(params, code, err, err.Error()) + return handleRekorAPIError(params, http.StatusInternalServerError, err, err.Error()) } resultPayload = append(resultPayload, logEntry) } @@ -471,26 +471,21 @@ func SearchLogQueryHandler(params entries.SearchLogQueryParams) middleware.Respo g, _ := errgroup.WithContext(httpReqCtx) resultPayloadChan := make(chan models.LogEntry, len(params.Entry.LogIndexes)) - code := http.StatusInternalServerError for _, logIndex := range params.Entry.LogIndexes { logIndex := logIndex // https://golang.org/doc/faq#closures_and_goroutines g.Go(func() error { logEntry, err := retrieveLogEntryByIndex(httpReqCtx, int(swag.Int64Value(logIndex))) - if err != nil { - switch { - case errors.Is(err, ErrNotFound): - code = http.StatusNotFound - default: - } + if err != nil && !errors.Is(err, ErrNotFound) { return err + } else if err == nil { + resultPayloadChan <- logEntry } - resultPayloadChan <- logEntry return nil }) } if err := g.Wait(); err != nil { - return handleRekorAPIError(params, code, err, err.Error()) + return handleRekorAPIError(params, http.StatusInternalServerError, err, err.Error()) } close(resultPayloadChan) for result := range resultPayloadChan { diff --git a/pkg/api/error.go b/pkg/api/error.go index 765ddcd8f..c84cdddba 100644 --- a/pkg/api/error.go +++ b/pkg/api/error.go @@ -33,21 +33,21 @@ import ( ) const ( - trillianCommunicationError = "Unexpected error communicating with transparency log" - trillianUnexpectedResult = "Unexpected result from transparency log" - validationError = "Error processing entry: %v" - failedToGenerateCanonicalEntry = "Error generating canonicalized entry" - entryAlreadyExists = "An equivalent entry already exists in the transparency log with UUID %v" + trillianCommunicationError = "unexpected error communicating with transparency log" + trillianUnexpectedResult = "unexpected result from transparency log" + validationError = "error processing entry: %v" + failedToGenerateCanonicalEntry = "error generating canonicalized entry" + entryAlreadyExists = "an equivalent entry already exists in the transparency log with UUID %v" firstSizeLessThanLastSize = "firstSize(%d) must be less than lastSize(%d)" malformedUUID = "UUID must be a 64-character hexadecimal string" - malformedPublicKey = "Public key provided could not be parsed" - failedToGenerateCanonicalKey = "Error generating canonicalized public key" - redisUnexpectedResult = "Unexpected result from searching index" - lastSizeGreaterThanKnown = "The tree size requested(%d) was greater than what is currently observable(%d)" - signingError = "Error signing" - sthGenerateError = "Error generating signed tree head" - unsupportedPKIFormat = "The PKI format requested is not supported by this server" - unexpectedInactiveShardError = "Unexpected error communicating with inactive shard" + malformedPublicKey = "public key provided could not be parsed" + failedToGenerateCanonicalKey = "error generating canonicalized public key" + redisUnexpectedResult = "unexpected result from searching index" + lastSizeGreaterThanKnown = "the tree size requested(%d) was greater than what is currently observable(%d)" + signingError = "error signing" + sthGenerateError = "error generating signed tree head" + unsupportedPKIFormat = "the PKI format requested is not supported by this server" + unexpectedInactiveShardError = "unexpected error communicating with inactive shard" maxSearchQueryLimit = "more than max allowed %d entries in request" ) @@ -122,6 +122,8 @@ func handleRekorAPIError(params interface{}, code int, err error, message string switch code { case http.StatusBadRequest: return entries.NewSearchLogQueryBadRequest().WithPayload(errorMsg(message, code)) + case http.StatusUnprocessableEntity: + return entries.NewSearchLogQueryUnprocessableEntity().WithPayload(errorMsg(message, code)) default: return entries.NewSearchLogQueryDefault(code).WithPayload(errorMsg(message, code)) } diff --git a/pkg/generated/client/entries/search_log_query_responses.go b/pkg/generated/client/entries/search_log_query_responses.go index 803754e86..34013fd69 100644 --- a/pkg/generated/client/entries/search_log_query_responses.go +++ b/pkg/generated/client/entries/search_log_query_responses.go @@ -51,6 +51,12 @@ func (o *SearchLogQueryReader) ReadResponse(response runtime.ClientResponse, con return nil, err } return nil, result + case 422: + result := NewSearchLogQueryUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result default: result := NewSearchLogQueryDefault(response.Code()) if err := result.readResponse(response, consumer, o.formats); err != nil { @@ -187,6 +193,69 @@ func (o *SearchLogQueryBadRequest) readResponse(response runtime.ClientResponse, return nil } +// NewSearchLogQueryUnprocessableEntity creates a SearchLogQueryUnprocessableEntity with default headers values +func NewSearchLogQueryUnprocessableEntity() *SearchLogQueryUnprocessableEntity { + return &SearchLogQueryUnprocessableEntity{} +} + +/* +SearchLogQueryUnprocessableEntity describes a response with status code 422, with default header values. + +The server understood the request but is unable to process the contained instructions +*/ +type SearchLogQueryUnprocessableEntity struct { + Payload *models.Error +} + +// IsSuccess returns true when this search log query unprocessable entity response has a 2xx status code +func (o *SearchLogQueryUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this search log query unprocessable entity response has a 3xx status code +func (o *SearchLogQueryUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this search log query unprocessable entity response has a 4xx status code +func (o *SearchLogQueryUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this search log query unprocessable entity response has a 5xx status code +func (o *SearchLogQueryUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this search log query unprocessable entity response a status code equal to that given +func (o *SearchLogQueryUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +func (o *SearchLogQueryUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *SearchLogQueryUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *SearchLogQueryUnprocessableEntity) GetPayload() *models.Error { + return o.Payload +} + +func (o *SearchLogQueryUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Error) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + // NewSearchLogQueryDefault creates a SearchLogQueryDefault with default headers values func NewSearchLogQueryDefault(code int) *SearchLogQueryDefault { return &SearchLogQueryDefault{ diff --git a/pkg/generated/restapi/embedded_spec.go b/pkg/generated/restapi/embedded_spec.go index 964abab3b..d391b2645 100644 --- a/pkg/generated/restapi/embedded_spec.go +++ b/pkg/generated/restapi/embedded_spec.go @@ -220,6 +220,9 @@ func init() { "400": { "$ref": "#/responses/BadContent" }, + "422": { + "$ref": "#/responses/UnprocessableEntity" + }, "default": { "$ref": "#/responses/InternalServerError" } @@ -911,6 +914,12 @@ func init() { }, "NotFound": { "description": "The content requested could not be found" + }, + "UnprocessableEntity": { + "description": "The server understood the request but is unable to process the contained instructions", + "schema": { + "$ref": "#/definitions/Error" + } } } }`)) @@ -1132,6 +1141,12 @@ func init() { "$ref": "#/definitions/Error" } }, + "422": { + "description": "The server understood the request but is unable to process the contained instructions", + "schema": { + "$ref": "#/definitions/Error" + } + }, "default": { "description": "There was an internal error in the server while processing the request", "schema": { @@ -3892,6 +3907,12 @@ func init() { }, "NotFound": { "description": "The content requested could not be found" + }, + "UnprocessableEntity": { + "description": "The server understood the request but is unable to process the contained instructions", + "schema": { + "$ref": "#/definitions/Error" + } } } }`)) diff --git a/pkg/generated/restapi/operations/entries/search_log_query_responses.go b/pkg/generated/restapi/operations/entries/search_log_query_responses.go index 10d09ff2b..65336a114 100644 --- a/pkg/generated/restapi/operations/entries/search_log_query_responses.go +++ b/pkg/generated/restapi/operations/entries/search_log_query_responses.go @@ -122,6 +122,51 @@ func (o *SearchLogQueryBadRequest) WriteResponse(rw http.ResponseWriter, produce } } +// SearchLogQueryUnprocessableEntityCode is the HTTP code returned for type SearchLogQueryUnprocessableEntity +const SearchLogQueryUnprocessableEntityCode int = 422 + +/* +SearchLogQueryUnprocessableEntity The server understood the request but is unable to process the contained instructions + +swagger:response searchLogQueryUnprocessableEntity +*/ +type SearchLogQueryUnprocessableEntity struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewSearchLogQueryUnprocessableEntity creates SearchLogQueryUnprocessableEntity with default headers values +func NewSearchLogQueryUnprocessableEntity() *SearchLogQueryUnprocessableEntity { + + return &SearchLogQueryUnprocessableEntity{} +} + +// WithPayload adds the payload to the search log query unprocessable entity response +func (o *SearchLogQueryUnprocessableEntity) WithPayload(payload *models.Error) *SearchLogQueryUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the search log query unprocessable entity response +func (o *SearchLogQueryUnprocessableEntity) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *SearchLogQueryUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + /* SearchLogQueryDefault There was an internal error in the server while processing the request diff --git a/tests/e2e_test.go b/tests/e2e_test.go index ff5b7f0ac..4f96bbbc6 100644 --- a/tests/e2e_test.go +++ b/tests/e2e_test.go @@ -50,6 +50,7 @@ import ( "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/in-toto/in-toto-golang/in_toto" slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" "github.com/secure-systems-lab/go-securesystemslib/dsse" @@ -145,7 +146,7 @@ func TestUploadVerifyRekord(t *testing.T) { // Verify should fail initially out := runCliErr(t, "verify", "--artifact", artifactPath, "--signature", sigPath, "--public-key", pubPath) - outputContains(t, out, "404") + outputContains(t, out, "entry in log cannot be located") // It should upload successfully. out = runCli(t, "upload", "--artifact", artifactPath, "--signature", sigPath, "--public-key", pubPath) @@ -981,7 +982,7 @@ func TestGetNonExistantIndex(t *testing.T) { func TestVerifyNonExistantIndex(t *testing.T) { // this index is extremely likely to not exist out := runCliErr(t, "verify", "--log-index", "100000000") - outputContains(t, out, "404") + outputContains(t, out, "entry in log cannot be located") } func TestGetNonExistantUUID(t *testing.T) { @@ -993,7 +994,7 @@ func TestGetNonExistantUUID(t *testing.T) { func TestVerifyNonExistantUUID(t *testing.T) { // this uuid is extremely likely to not exist out := runCliErr(t, "verify", "--uuid", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") - outputContains(t, out, "404") + outputContains(t, out, "entry in log cannot be located") // Check response code tid := getTreeID(t) @@ -1011,9 +1012,11 @@ func TestVerifyNonExistantUUID(t *testing.T) { t.Fatal(err) } c, _ := ioutil.ReadAll(resp.Body) - t.Log(string(c)) - if resp.StatusCode != 404 { - t.Fatal("expected 404 status") + if resp.StatusCode != 200 { + t.Fatalf("expected status 200, got %d instead", resp.StatusCode) + } + if strings.TrimSpace(string(c)) != "[]" { + t.Fatalf("expected empty JSON array as response, got %s instead", string(c)) } } @@ -1224,10 +1227,8 @@ func TestSearchQueryMalformedEntry(t *testing.T) { if err != nil { t.Fatal(err) } - c, _ := ioutil.ReadAll(resp.Body) - t.Log(string(c)) if resp.StatusCode != 400 { - t.Fatal("expected status 400") + t.Fatalf("expected status 400, got %d instead", resp.StatusCode) } } @@ -1250,9 +1251,11 @@ func TestSearchQueryNonExistentEntry(t *testing.T) { t.Fatal(err) } c, _ := ioutil.ReadAll(resp.Body) - t.Log(string(c)) - if resp.StatusCode != 404 { - t.Fatal("expected 404 status") + if resp.StatusCode != 200 { + t.Fatalf("expected status 200, got %d instead", resp.StatusCode) + } + if strings.TrimSpace(string(c)) != "[]" { + t.Fatalf("expected empty JSON array as response, got %s instead", string(c)) } } @@ -1324,8 +1327,12 @@ func TestSearchValidateTreeID(t *testing.T) { t.Fatal(err) } // Not Found because currently we don't detect that an unused random tree ID is invalid. - if resp.StatusCode != 404 { - t.Fatalf("expected 404 status code but got %d", resp.StatusCode) + c, _ := ioutil.ReadAll(resp.Body) + if resp.StatusCode != 200 { + t.Fatalf("expected status 200, got %d instead", resp.StatusCode) + } + if strings.TrimSpace(string(c)) != "[]" { + t.Fatalf("expected empty JSON array as response, got %s instead", string(c)) } } @@ -1400,3 +1407,470 @@ func TestMetricsCounts(t *testing.T) { t.Error("rekor_qps_by_api did not increment") } } + +// TestSearchLogQuerySingleShard provides coverage testing on the searchLogQuery endpoint within a single shard +func TestSearchLogQuerySingleShard(t *testing.T) { + + // Write the shared public key to a file + pubPath := filepath.Join(t.TempDir(), "logQuery_pubKey.asc") + pubKeyBytes := []byte(publicKey) + if err := ioutil.WriteFile(pubPath, pubKeyBytes, 0644); err != nil { + t.Fatal(err) + } + + // Create two valid log entries to use for the test cases + firstArtifactPath := filepath.Join(t.TempDir(), "artifact1") + firstSigPath := filepath.Join(t.TempDir(), "signature1.asc") + createdPGPSignedArtifact(t, firstArtifactPath, firstSigPath) + firstArtifactBytes, _ := ioutil.ReadFile(firstArtifactPath) + firstSigBytes, _ := ioutil.ReadFile(firstSigPath) + + firstRekord := rekord.V001Entry{ + RekordObj: models.RekordV001Schema{ + Data: &models.RekordV001SchemaData{ + Content: strfmt.Base64(firstArtifactBytes), + }, + Signature: &models.RekordV001SchemaSignature{ + Content: (*strfmt.Base64)(&firstSigBytes), + Format: swag.String(models.RekordV001SchemaSignatureFormatPgp), + PublicKey: &models.RekordV001SchemaSignaturePublicKey{ + Content: (*strfmt.Base64)(&pubKeyBytes), + }, + }, + }, + } + firstEntry := &models.Rekord{ + APIVersion: swag.String(firstRekord.APIVersion()), + Spec: firstRekord.RekordObj, + } + + secondArtifactPath := filepath.Join(t.TempDir(), "artifact2") + secondSigPath := filepath.Join(t.TempDir(), "signature2.asc") + createdPGPSignedArtifact(t, secondArtifactPath, secondSigPath) + secondArtifactBytes, _ := ioutil.ReadFile(secondArtifactPath) + secondSigBytes, _ := ioutil.ReadFile(secondSigPath) + + secondRekord := rekord.V001Entry{ + RekordObj: models.RekordV001Schema{ + Data: &models.RekordV001SchemaData{ + Content: strfmt.Base64(secondArtifactBytes), + }, + Signature: &models.RekordV001SchemaSignature{ + Content: (*strfmt.Base64)(&secondSigBytes), + Format: swag.String(models.RekordV001SchemaSignatureFormatPgp), + PublicKey: &models.RekordV001SchemaSignaturePublicKey{ + Content: (*strfmt.Base64)(&pubKeyBytes), + }, + }, + }, + } + secondEntry := &models.Rekord{ + APIVersion: swag.String(secondRekord.APIVersion()), + Spec: secondRekord.RekordObj, + } + + // Now upload them to rekor! + firstOut := runCli(t, "upload", "--artifact", firstArtifactPath, "--signature", firstSigPath, "--public-key", pubPath) + secondOut := runCli(t, "upload", "--artifact", secondArtifactPath, "--signature", secondSigPath, "--public-key", pubPath) + + firstEntryID := getUUIDFromUploadOutput(t, firstOut) + firstUUID, _ := sharding.GetUUIDFromIDString(firstEntryID) + firstIndex := int64(getLogIndexFromUploadOutput(t, firstOut)) + secondEntryID := getUUIDFromUploadOutput(t, secondOut) + secondUUID, _ := sharding.GetUUIDFromIDString(secondEntryID) + secondIndex := int64(getLogIndexFromUploadOutput(t, secondOut)) + + // this is invalid because treeID is > int64 + invalidEntryID := "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeeefff" + invalidIndex := int64(-1) + invalidEntry := &models.Rekord{ + APIVersion: swag.String(secondRekord.APIVersion()), + } + + nonexistentArtifactPath := filepath.Join(t.TempDir(), "artifact3") + nonexistentSigPath := filepath.Join(t.TempDir(), "signature3.asc") + createdPGPSignedArtifact(t, nonexistentArtifactPath, nonexistentSigPath) + nonexistentArtifactBytes, _ := ioutil.ReadFile(nonexistentArtifactPath) + nonexistentSigBytes, _ := ioutil.ReadFile(nonexistentSigPath) + + nonexistentEntryID := "0000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeeefff" + nonexistentUUID := "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeeefff" + nonexistentIndex := int64(999999999) // assuming we don't put that many entries in the log + nonexistentRekord := rekord.V001Entry{ + RekordObj: models.RekordV001Schema{ + Data: &models.RekordV001SchemaData{ + Content: strfmt.Base64(nonexistentArtifactBytes), + }, + Signature: &models.RekordV001SchemaSignature{ + Content: (*strfmt.Base64)(&nonexistentSigBytes), + Format: swag.String(models.RekordV001SchemaSignatureFormatPgp), + PublicKey: &models.RekordV001SchemaSignaturePublicKey{ + Content: (*strfmt.Base64)(&pubKeyBytes), + }, + }, + }, + } + nonexistentEntry := &models.Rekord{ + APIVersion: swag.String("0.0.1"), + Spec: nonexistentRekord.RekordObj, + } + + type testCase struct { + name string + expectSuccess bool + expectedErrorResponseCode int64 + expectedEntryIDs []string + entryUUIDs []string + logIndexes []*int64 + entries []models.ProposedEntry + } + + testCases := []testCase{ + { + name: "empty entryUUIDs", + expectSuccess: true, + expectedEntryIDs: []string{}, + entryUUIDs: []string{}, + }, + { + name: "first in log (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID}, + entryUUIDs: []string{firstEntryID}, + }, + { + name: "first in log (using UUID in entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID}, + entryUUIDs: []string{firstUUID}, + }, + { + name: "second in log (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{secondEntryID}, + entryUUIDs: []string{secondEntryID}, + }, + { + name: "invalid entryID (using entryUUIDs)", + expectSuccess: false, + expectedErrorResponseCode: http.StatusBadRequest, + entryUUIDs: []string{invalidEntryID}, + }, + { + name: "valid entryID not in log (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{}, + entryUUIDs: []string{nonexistentEntryID}, + }, + { + name: "valid UUID not in log (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{}, + entryUUIDs: []string{nonexistentUUID}, + }, + { + name: "both valid entries in log (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entryUUIDs: []string{firstEntryID, secondEntryID}, + }, + { + name: "both valid entries in log (one with UUID, other with entryID) (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entryUUIDs: []string{firstEntryID, secondUUID}, + }, + { + name: "one valid entry in log, one malformed (using entryUUIDs)", + expectSuccess: false, + expectedErrorResponseCode: http.StatusBadRequest, + entryUUIDs: []string{firstEntryID, invalidEntryID}, + }, + { + name: "one existing, one valid entryID but not in log (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID}, + entryUUIDs: []string{firstEntryID, nonexistentEntryID}, + }, + { + name: "two existing, one valid entryID but not in log (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entryUUIDs: []string{firstEntryID, secondEntryID, nonexistentEntryID}, + }, + { + name: "two existing, one valid entryID but not in log (different ordering 1) (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entryUUIDs: []string{firstEntryID, nonexistentEntryID, secondEntryID}, + }, + { + name: "two existing, one valid entryID but not in log (different ordering 2) (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entryUUIDs: []string{nonexistentEntryID, firstEntryID, secondEntryID}, + }, + { + name: "two existing, one valid entryID but not in log (different ordering 3) (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entryUUIDs: []string{nonexistentUUID, firstEntryID, secondEntryID}, + }, + { + name: "two existing, one valid entryID but not in log (different ordering 4) (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entryUUIDs: []string{nonexistentEntryID, firstUUID, secondEntryID}, + }, + { + name: "two existing, one valid entryID but not in log (different ordering 5) (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entryUUIDs: []string{nonexistentEntryID, firstEntryID, secondUUID}, + }, + { + name: "two existing, one valid entryID but not in log (different ordering 6) (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entryUUIDs: []string{nonexistentUUID, firstEntryID, secondUUID}, + }, + { + name: "two existing, one valid entryID but not in log (different ordering 7) (using entryUUIDs)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entryUUIDs: []string{nonexistentEntryID, firstUUID, secondUUID}, + }, + { + name: "request more than 10 entries (using entryUUIDs)", + expectSuccess: false, + expectedErrorResponseCode: http.StatusUnprocessableEntity, + entryUUIDs: []string{firstEntryID, firstEntryID, firstEntryID, firstEntryID, firstEntryID, firstEntryID, firstEntryID, firstEntryID, firstEntryID, firstEntryID, firstEntryID}, + }, + { + name: "empty logIndexes", + expectSuccess: true, + expectedEntryIDs: []string{}, + logIndexes: []*int64{}, + }, + { + name: "first in log (using logIndexes)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID}, + logIndexes: []*int64{&firstIndex}, + }, + { + name: "second in log (using logIndexes)", + expectSuccess: true, + expectedEntryIDs: []string{secondEntryID}, + logIndexes: []*int64{&secondIndex}, + }, + { + name: "invalid logIndex (using logIndexes)", + expectSuccess: false, + expectedErrorResponseCode: http.StatusUnprocessableEntity, + logIndexes: []*int64{&invalidIndex}, + }, + { + name: "valid index not in log (using logIndexes)", + expectSuccess: true, + expectedEntryIDs: []string{}, + logIndexes: []*int64{&nonexistentIndex}, + }, + { + name: "both valid entries in log (using logIndexes)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + logIndexes: []*int64{&firstIndex, &secondIndex}, + }, + { + name: "one valid entry in log, one malformed (using logIndexes)", + expectSuccess: false, + expectedErrorResponseCode: http.StatusUnprocessableEntity, + logIndexes: []*int64{&firstIndex, &invalidIndex}, + }, + { + name: "one existing, one valid Index but not in log (using logIndexes)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID}, + logIndexes: []*int64{&firstIndex, &nonexistentIndex}, + }, + { + name: "two existing, one valid Index but not in log (using logIndexes)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + logIndexes: []*int64{&firstIndex, &secondIndex, &nonexistentIndex}, + }, + { + name: "two existing, one valid Index but not in log (different ordering 1) (using logIndexes)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + logIndexes: []*int64{&firstIndex, &nonexistentIndex, &secondIndex}, + }, + { + name: "two existing, one valid Index but not in log (different ordering 2) (using logIndexes)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + logIndexes: []*int64{&nonexistentIndex, &firstIndex, &secondIndex}, + }, + { + name: "request more than 10 entries (using logIndexes)", + expectSuccess: false, + expectedErrorResponseCode: http.StatusUnprocessableEntity, + logIndexes: []*int64{&firstIndex, &firstIndex, &firstIndex, &firstIndex, &firstIndex, &firstIndex, &firstIndex, &firstIndex, &firstIndex, &firstIndex, &firstIndex}, + }, + { + name: "empty entries", + expectSuccess: true, + expectedEntryIDs: []string{}, + entries: []models.ProposedEntry{}, + }, + { + name: "first in log (using entries)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID}, + entries: []models.ProposedEntry{firstEntry}, + }, + { + name: "second in log (using entries)", + expectSuccess: true, + expectedEntryIDs: []string{secondEntryID}, + entries: []models.ProposedEntry{secondEntry}, + }, + { + name: "invalid entry (using entries)", + expectSuccess: false, + expectedErrorResponseCode: http.StatusUnprocessableEntity, + entries: []models.ProposedEntry{invalidEntry}, + }, + { + name: "valid entry not in log (using entries)", + expectSuccess: true, + expectedEntryIDs: []string{}, + entries: []models.ProposedEntry{nonexistentEntry}, + }, + { + name: "both valid entries in log (using entries)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entries: []models.ProposedEntry{firstEntry, secondEntry}, + }, + { + name: "one valid entry in log, one malformed (using entries)", + expectSuccess: false, + expectedErrorResponseCode: http.StatusUnprocessableEntity, + entries: []models.ProposedEntry{firstEntry, invalidEntry}, + }, + { + name: "one existing, one valid Index but not in log (using entries)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID}, + entries: []models.ProposedEntry{firstEntry, nonexistentEntry}, + }, + { + name: "two existing, one valid Index but not in log (using entries)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entries: []models.ProposedEntry{firstEntry, secondEntry, nonexistentEntry}, + }, + { + name: "two existing, one valid Index but not in log (different ordering 1) (using entries)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entries: []models.ProposedEntry{firstEntry, nonexistentEntry, secondEntry}, + }, + { + name: "two existing, one valid Index but not in log (different ordering 2) (using entries)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID}, + entries: []models.ProposedEntry{nonexistentEntry, firstEntry, secondEntry}, + }, + { + name: "request more than 10 entries (using entries)", + expectSuccess: false, + expectedErrorResponseCode: http.StatusUnprocessableEntity, + entries: []models.ProposedEntry{firstEntry, firstEntry, firstEntry, firstEntry, firstEntry, firstEntry, firstEntry, firstEntry, firstEntry, firstEntry, firstEntry}, + }, + { + name: "request more than 10 entries (using mixture)", + expectSuccess: false, + expectedErrorResponseCode: http.StatusUnprocessableEntity, + entryUUIDs: []string{firstEntryID, firstEntryID, firstEntryID, firstEntryID}, + logIndexes: []*int64{&firstIndex, &firstIndex, &firstIndex}, + entries: []models.ProposedEntry{firstEntry, firstEntry, firstEntry, firstEntry}, + }, + { + name: "request valid and invalid (using mixture)", + expectSuccess: false, + expectedErrorResponseCode: http.StatusUnprocessableEntity, + entryUUIDs: []string{firstEntryID, firstEntryID, firstEntryID, firstEntryID}, + logIndexes: []*int64{&invalidIndex, &invalidIndex, &invalidIndex}, + entries: []models.ProposedEntry{firstEntry, firstEntry, firstEntry}, + }, + { + name: "request valid and nonexistent (using mixture)", + expectSuccess: true, + expectedEntryIDs: []string{firstEntryID, secondEntryID, firstEntryID, secondEntryID, firstEntryID, secondEntryID}, + entryUUIDs: []string{firstEntryID, secondEntryID, nonexistentEntryID}, + logIndexes: []*int64{&firstIndex, &secondIndex, &nonexistentIndex}, + entries: []models.ProposedEntry{firstEntry, secondEntry, nonexistentEntry}, + }, + } + + for _, test := range testCases { + rekorClient, err := client.GetRekorClient("http://localhost:3000", client.WithRetryCount(0)) + if err != nil { + t.Fatal(err) + } + t.Run(test.name, func(t *testing.T) { + params := entries.NewSearchLogQueryParams() + entry := &models.SearchLogQuery{} + if len(test.entryUUIDs) > 0 { + t.Log("trying with entryUUIDs: ", test.entryUUIDs) + entry.EntryUUIDs = test.entryUUIDs + } + if len(test.logIndexes) > 0 { + entry.LogIndexes = test.logIndexes + } + if len(test.entries) > 0 { + entry.SetEntries(test.entries) + } + params.SetEntry(entry) + + resp, err := rekorClient.Entries.SearchLogQuery(params) + if err != nil { + if !test.expectSuccess { + if _, ok := err.(*entries.SearchLogQueryBadRequest); ok { + if test.expectedErrorResponseCode != http.StatusBadRequest { + t.Fatalf("unexpected error code received: expected %d, got %d: %v", test.expectedErrorResponseCode, http.StatusBadRequest, err) + } + } else if _, ok := err.(*entries.SearchLogQueryUnprocessableEntity); ok { + if test.expectedErrorResponseCode != http.StatusUnprocessableEntity { + t.Fatalf("unexpected error code received: expected %d, got %d: %v", test.expectedErrorResponseCode, http.StatusUnprocessableEntity, err) + } + } else if e, ok := err.(*entries.SearchLogQueryDefault); ok { + t.Fatalf("unexpected error: %v", e) + } + } else { + t.Fatalf("unexpected error: %v", err) + } + } else { + if len(resp.Payload) != len(test.expectedEntryIDs) { + t.Fatalf("unexpected number of responses received: expected %d, got %d", len(test.expectedEntryIDs), len(resp.Payload)) + } + // walk responses, build up list of returned entry IDs + returnedEntryIDs := []string{} + for _, entry := range resp.Payload { + // do this for dynamic keyed entries + for entryID, _ := range entry { + t.Log("received entry: ", entryID) + returnedEntryIDs = append(returnedEntryIDs, entryID) + } + } + // we have the expected number of responses, let's ensure they're the ones we expected + if out := cmp.Diff(returnedEntryIDs, test.expectedEntryIDs, cmpopts.SortSlices(func(a, b string) bool { return a < b })); out != "" { + t.Fatalf("unexpected responses: %v", out) + } + } + }) + } +}