Skip to content

Commit

Permalink
add support for Kind attribute of LogpushJob
Browse files Browse the repository at this point in the history
  • Loading branch information
sbfaulkner committed Jun 17, 2022
1 parent d07e8d6 commit d70275c
Show file tree
Hide file tree
Showing 2 changed files with 166 additions and 45 deletions.
1 change: 1 addition & 0 deletions logpush.go
Expand Up @@ -15,6 +15,7 @@ type LogpushJob struct {
ID int `json:"id,omitempty"`
Dataset string `json:"dataset"`
Enabled bool `json:"enabled"`
Kind string `json:"kind,omitempty"`
Name string `json:"name"`
LogpullOptions string `json:"logpull_options"`
DestinationConf string `json:"destination_conf"`
Expand Down
210 changes: 165 additions & 45 deletions logpush_test.go
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
Expand All @@ -19,9 +20,24 @@ const (
serverLogpushJobDescription = `{
"id": %d,
"dataset": "http_requests",
"enabled": false,
"kind": "",
"enabled": false,
"name": "example.com",
"logpull_options": "fields=RayID,ClientIP,EdgeStartTimestamp&timestamps=rfc3339",
"logpull_options": "fields=RayID,ClientIP,EdgeStartTimestamp&timestamps=rfc3339",
"destination_conf": "s3://mybucket/logs?region=us-west-2",
"last_complete": "%[2]s",
"last_error": "%[2]s",
"error_message": "test",
"frequency": "high"
}
`
serverEdgeLogpushJobDescription = `{
"id": %d,
"dataset": "http_requests",
"kind": "edge",
"enabled": true,
"name": "example.com",
"logpull_options": "fields=RayID,ClientIP,EdgeStartTimestamp&timestamps=rfc3339",
"destination_conf": "s3://mybucket/logs?region=us-west-2",
"last_complete": "%[2]s",
"last_error": "%[2]s",
Expand All @@ -30,13 +46,13 @@ const (
}
`
serverLogpushGetOwnershipChallengeDescription = `{
"filename": "logs/challenge-filename.txt",
"filename": "logs/challenge-filename.txt",
"valid": true,
"message": ""
}
`
serverLogpushGetOwnershipChallengeInvalidResponseDescription = `{
"filename": "logs/challenge-filename.txt",
"filename": "logs/challenge-filename.txt",
"valid": false,
"message": "destination is invalid"
}
Expand All @@ -57,6 +73,19 @@ var (
ErrorMessage: "test",
Frequency: "high",
}
expectedEdgeLogpushJobStruct = LogpushJob{
ID: jobID,
Dataset: "http_requests",
Kind: "edge",
Enabled: true,
Name: "example.com",
LogpullOptions: "fields=RayID,ClientIP,EdgeStartTimestamp&timestamps=rfc3339",
DestinationConf: "s3://mybucket/logs?region=us-west-2",
LastComplete: &testLogpushTimestamp,
LastError: &testLogpushTimestamp,
ErrorMessage: "test",
Frequency: "high",
}
expectedLogpushGetOwnershipChallengeStruct = LogpushGetOwnershipChallenge{
Filename: "logs/challenge-filename.txt",
Valid: true,
Expand Down Expand Up @@ -98,58 +127,149 @@ func TestLogpushJobs(t *testing.T) {
}

func TestGetLogpushJob(t *testing.T) {
setup()
defer teardown()

handler := func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodGet, r.Method, "Expected method 'GET', got %s", r.Method)
w.Header().Set("content-type", "application/json")
fmt.Fprintf(w, `{
"result": %s,
"success": true,
"errors": null,
"messages": null
}
`, fmt.Sprintf(serverLogpushJobDescription, jobID, testLogpushTimestamp.Format(time.RFC3339Nano)))
testCases := map[string]struct {
result string
want LogpushJob
}{
"core logpush job": {
result: serverLogpushJobDescription,
want: expectedLogpushJobStruct,
},
"edge logpush job": {
result: serverEdgeLogpushJobDescription,
want: expectedEdgeLogpushJobStruct,
},
}

mux.HandleFunc("/zones/"+testZoneID+"/logpush/jobs/"+strconv.Itoa(jobID), handler)
want := expectedLogpushJobStruct
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
setup()
defer teardown()

actual, err := client.GetZoneLogpushJob(context.Background(), testZoneID, jobID)
if assert.NoError(t, err) {
assert.Equal(t, want, actual)
handler := func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodGet, r.Method, "Expected method 'GET', got %s", r.Method)
w.Header().Set("content-type", "application/json")
fmt.Fprintf(w, `{
"result": %s,
"success": true,
"errors": null,
"messages": null
}
`, fmt.Sprintf(tc.result, jobID, testLogpushTimestamp.Format(time.RFC3339Nano)))
}

mux.HandleFunc("/zones/"+testZoneID+"/logpush/jobs/"+strconv.Itoa(jobID), handler)

actual, err := client.GetZoneLogpushJob(context.Background(), testZoneID, jobID)
if assert.NoError(t, err) {
assert.Equal(t, tc.want, actual)
}
})
}
}

func TestCreateLogpushJob(t *testing.T) {
setup()
defer teardown()
newJob := LogpushJob{
Enabled: false,
Name: "example.com",
LogpullOptions: "fields=RayID,ClientIP,EdgeStartTimestamp&timestamps=rfc3339",
DestinationConf: "s3://mybucket/logs?region=us-west-2",
testCases := map[string]struct {
newJob LogpushJob
payload string
result string
want LogpushJob
}{
"core logpush job": {
newJob: LogpushJob{
Dataset: "http_requests",
Enabled: false,
Name: "example.com",
LogpullOptions: "fields=RayID,ClientIP,EdgeStartTimestamp&timestamps=rfc3339",
DestinationConf: "s3://mybucket/logs?region=us-west-2",
},
payload: `{
"dataset": "http_requests",
"enabled":false,
"name":"example.com",
"logpull_options":"fields=RayID,ClientIP,EdgeStartTimestamp&timestamps=rfc3339",
"destination_conf":"s3://mybucket/logs?region=us-west-2"
}`,
result: serverLogpushJobDescription,
want: expectedLogpushJobStruct,
},
"edge logpush job": {
newJob: LogpushJob{
Dataset: "http_requests",
Enabled: true,
Name: "example.com",
Kind: "edge",
LogpullOptions: "fields=RayID,ClientIP,EdgeStartTimestamp&timestamps=rfc3339",
DestinationConf: "s3://mybucket/logs?region=us-west-2",
},
payload: `{
"dataset": "http_requests",
"enabled":true,
"name":"example.com",
"kind":"edge",
"logpull_options":"fields=RayID,ClientIP,EdgeStartTimestamp&timestamps=rfc3339",
"destination_conf":"s3://mybucket/logs?region=us-west-2"
}`,
result: serverEdgeLogpushJobDescription,
want: expectedEdgeLogpushJobStruct,
},
"filtered edge logpush job": {
newJob: LogpushJob{
Dataset: "http_requests",
Enabled: true,
Name: "example.com",
Kind: "edge",
LogpullOptions: "fields=RayID,ClientIP,EdgeStartTimestamp&timestamps=rfc3339",
DestinationConf: "s3://mybucket/logs?region=us-west-2",
Filter: &LogpushJobFilters{
Where: LogpushJobFilter{Key: "ClientRequestHost", Operator: "eq", Value: "example.com"},
},
},
payload: `{
"dataset": "http_requests",
"enabled":true,
"name":"example.com",
"kind":"edge",
"logpull_options":"fields=RayID,ClientIP,EdgeStartTimestamp&timestamps=rfc3339",
"destination_conf":"s3://mybucket/logs?region=us-west-2",
"filter":"{\"where\":{\"key\":\"ClientRequestHost\",\"operator\":\"eq\",\"value\":\"example.com\"}}"
}`,
result: serverEdgeLogpushJobDescription,
want: expectedEdgeLogpushJobStruct,
},
}

handler := func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodPost, r.Method, "Expected method 'POST', got %s", r.Method)
w.Header().Set("content-type", "application/json")
fmt.Fprintf(w, `{
"result": %s,
"success": true,
"errors": null,
"messages": null
}
`, fmt.Sprintf(serverLogpushJobDescription, jobID, testLogpushTimestamp.Format(time.RFC3339Nano)))
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
setup()
defer teardown()

mux.HandleFunc("/zones/"+testZoneID+"/logpush/jobs", handler)
want := &expectedLogpushJobStruct
handler := func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodPost, r.Method, "Expected method 'POST', got %s", r.Method)
b, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()

actual, err := client.CreateZoneLogpushJob(context.Background(), testZoneID, newJob)
if assert.NoError(t, err) {
assert.Equal(t, want, actual)
if assert.NoError(t, err) {
assert.JSONEq(t, tc.payload, string(b), "JSON payload not equal")
}

w.Header().Set("content-type", "application/json")
fmt.Fprintf(w, `{
"result": %s,
"success": true,
"errors": null,
"messages": null
}
`, fmt.Sprintf(tc.result, jobID, testLogpushTimestamp.Format(time.RFC3339Nano)))
}

mux.HandleFunc("/zones/"+testZoneID+"/logpush/jobs", handler)

actual, err := client.CreateZoneLogpushJob(context.Background(), testZoneID, tc.newJob)
if assert.NoError(t, err) {
assert.Equal(t, tc.want, *actual)
}
})
}
}

Expand Down

0 comments on commit d70275c

Please sign in to comment.