Skip to content

Commit

Permalink
GODRIVER-2093 Add more parallelization to test suite. (#1131)
Browse files Browse the repository at this point in the history
  • Loading branch information
benjirewis committed Nov 16, 2022
1 parent 5edaf2b commit 48b0701
Show file tree
Hide file tree
Showing 9 changed files with 54 additions and 83 deletions.
42 changes: 11 additions & 31 deletions Makefile
@@ -1,12 +1,3 @@
# We list packages with shell scripts and loop through them to avoid testing with ./...
# Running go test ./... will run tests in all packages concurrently which can lead to
# unexpected errors.
#
# TODO(GODRIVER-2093): Use ./... to run tests in all packages with parallelism and remove
# these PKG variables and loops from all make targets.
PKGS = $(shell etc/list_pkgs.sh)
TEST_PKGS = $(shell etc/list_test_pkgs.sh)

ATLAS_URIS = "$(ATLAS_FREE)" "$(ATLAS_REPLSET)" "$(ATLAS_SHARD)" "$(ATLAS_TLS11)" "$(ATLAS_TLS12)" "$(ATLAS_FREE_SRV)" "$(ATLAS_REPLSET_SRV)" "$(ATLAS_SHARD_SRV)" "$(ATLAS_TLS11_SRV)" "$(ATLAS_TLS12_SRV)" "$(ATLAS_SERVERLESS)" "$(ATLAS_SERVERLESS_SRV)"
GODISTS=linux/amd64 linux/386 linux/arm64 linux/arm linux/s390x
TEST_TIMEOUT = 1800
Expand All @@ -25,25 +16,22 @@ add-license:

.PHONY: build
build:
go build $(BUILD_TAGS) $(PKGS)
go build $(BUILD_TAGS) ./...

.PHONY: build-examples
build-examples:
go build $(BUILD_TAGS) ./examples/...

.PHONY: build-no-tags
build-no-tags:
go build $(PKGS)
go build ./...

.PHONY: build-tests
build-tests:
for TEST in $(TEST_PKGS); do \
go test $(BUILD_TAGS) -c $$TEST ; \
if [ $$? -ne 0 ]; \
then \
exit 1; \
fi \
done
# Use ^$ to match no tests so that no tests are actually run but all tests are
# compiled. Run with -short to ensure none of the TestMain functions try to
# connect to a server.
go test -short $(BUILD_TAGS) -run ^$$ ./...

.PHONY: check-fmt
check-fmt:
Expand Down Expand Up @@ -88,25 +76,19 @@ update-notices:
### Local testing targets. ###
.PHONY: test
test:
for TEST in $(TEST_PKGS) ; do \
go test $(BUILD_TAGS) -timeout $(TEST_TIMEOUT)s $$TEST ; \
done
go test $(BUILD_TAGS) -timeout $(TEST_TIMEOUT)s -p 1 ./...

.PHONY: test-cover
test-cover:
for TEST in $(TEST_PKGS) ; do \
go test $(BUILD_TAGS) -timeout $(TEST_TIMEOUT)s -cover $(COVER_ARGS) $$TEST ; \
done
go test $(BUILD_TAGS) -timeout $(TEST_TIMEOUT)s -cover $(COVER_ARGS) -p 1 ./...

.PHONY: test-race
test-race:
for TEST in $(TEST_PKGS) ; do \
go test $(BUILD_TAGS) -timeout $(TEST_TIMEOUT)s -race $$TEST ; \
done
go test $(BUILD_TAGS) -timeout $(TEST_TIMEOUT)s -race -p 1 ./...

.PHONY: test-short
test-short:
go test $(BUILD_TAGS) -timeout 60s -short $(TEST_PKGS)
go test $(BUILD_TAGS) -timeout 60s -short -p 1 ./...

### Evergreen specific targets. ###
.PHONY: build-aws-ecs-test
Expand All @@ -115,9 +97,7 @@ build-aws-ecs-test:

.PHONY: evg-test
evg-test:
for TEST in $(TEST_PKGS); do \
go test -exec "env PKG_CONFIG_PATH=$(PKG_CONFIG_PATH) LD_LIBRARY_PATH=$(LD_LIBRARY_PATH)" $(BUILD_TAGS) -v -timeout $(TEST_TIMEOUT)s $$TEST >> test.suite ; \
done
go test -exec "env PKG_CONFIG_PATH=$(PKG_CONFIG_PATH) LD_LIBRARY_PATH=$(LD_LIBRARY_PATH)" $(BUILD_TAGS) -v -timeout $(TEST_TIMEOUT)s -p 1 ./... >> test.suite

.PHONY: evg-test-atlas
evg-test-atlas:
Expand Down
9 changes: 0 additions & 9 deletions etc/list_pkgs.sh

This file was deleted.

14 changes: 0 additions & 14 deletions etc/list_test_pkgs.sh

This file was deleted.

8 changes: 8 additions & 0 deletions internal/uuid/uuid_test.go
Expand Up @@ -18,7 +18,11 @@ import (
// Test that initializing many package-global UUID sources concurrently never leads to any duplicate
// UUIDs being generated.
func TestGlobalSource(t *testing.T) {
t.Parallel()

t.Run("exp rand 1 UUID x 1,000,000 goroutines using a global source", func(t *testing.T) {
t.Parallel()

if israce.Enabled {
t.Skip("skipping as race detector is enabled and test exceeds 8128 goroutine limit")
}
Expand All @@ -41,6 +45,8 @@ func TestGlobalSource(t *testing.T) {
wg.Wait()
})
t.Run("exp rand 1 UUID x 1,000,000 goroutines each initializing a new source", func(t *testing.T) {
t.Parallel()

if israce.Enabled {
t.Skip("skipping as race detector is enabled and test exceeds 8128 goroutine limit")
}
Expand All @@ -66,6 +72,8 @@ func TestGlobalSource(t *testing.T) {
wg.Wait()
})
t.Run("exp rand 1,000 UUIDs x 1,000 goroutines each initializing a new source", func(t *testing.T) {
t.Parallel()

// Read 1,000 UUIDs from each goroutine and assert that there is never a duplicate value, either
// from the same goroutine or from separate goroutines.
const iterations = 1000
Expand Down
9 changes: 8 additions & 1 deletion mongo/integration/client_side_encryption_prose_test.go
Expand Up @@ -53,6 +53,8 @@ const (
)

func TestClientSideEncryptionProse(t *testing.T) {
t.Parallel()

verifyClientSideEncryptionVarsSet(t)
mt := mtest.New(t, mtest.NewOptions().MinServerVersion("4.2").Enterprise(true).CreateClient(false))
defer mt.Close()
Expand Down Expand Up @@ -476,6 +478,8 @@ func TestClientSideEncryptionProse(t *testing.T) {
assert.NotNil(mt, err, "expected InsertOne error for document over 16MiB, got nil")
})
mt.Run("5. views are prohibited", func(mt *mtest.T) {
mt.Parallel()

kmsProviders := map[string]map[string]interface{}{
"local": {
"key": localMasterKey,
Expand Down Expand Up @@ -933,6 +937,8 @@ func TestClientSideEncryptionProse(t *testing.T) {
}
})
mt.RunOpts("8. bypass mongocryptd spawning", noClientOpts, func(mt *mtest.T) {
mt.Parallel()

kmsProviders := map[string]map[string]interface{}{
"local": {
"key": localMasterKey,
Expand Down Expand Up @@ -1081,6 +1087,8 @@ func TestClientSideEncryptionProse(t *testing.T) {
{"collection", mongo.CollectionStream},
}
mt.RunOpts("auto encryption errors", noClientOpts, func(mt *mtest.T) {
mt.Parallel()

for _, tc := range testCases {
mt.Run(tc.name, func(mt *mtest.T) {
autoEncryptionOpts := options.AutoEncryption().
Expand Down Expand Up @@ -1993,7 +2001,6 @@ func TestClientSideEncryptionProse(t *testing.T) {

mt.RunOpts("20. Bypass creating mongocryptd client when shared library is loaded",
noClientOpts, func(mt *mtest.T) {

cryptSharedLibPath := os.Getenv("CRYPT_SHARED_LIB_PATH")
if cryptSharedLibPath == "" {
mt.Skip("CRYPT_SHARED_LIB_PATH not set, skipping")
Expand Down
18 changes: 12 additions & 6 deletions mongo/integration/client_test.go
Expand Up @@ -515,6 +515,8 @@ func TestClient(t *testing.T) {
})

mt.Run("minimum RTT is monitored", func(mt *mtest.T) {
mt.Parallel()

// Reset the client with a dialer that delays all network round trips by 300ms and set the
// heartbeat interval to 100ms to reduce the time it takes to collect RTT samples.
mt.ResetClient(options.Client().
Expand Down Expand Up @@ -553,6 +555,8 @@ func TestClient(t *testing.T) {
// Test that if the minimum RTT is greater than the remaining timeout for an operation, the
// operation is not sent to the server and no connections are closed.
mt.Run("minimum RTT used to prevent sending requests", func(mt *mtest.T) {
mt.Parallel()

// Assert that we can call Ping with a 250ms timeout.
ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
defer cancel()
Expand Down Expand Up @@ -610,9 +614,7 @@ func TestClient(t *testing.T) {
})

mt.Run("RTT90 is monitored", func(mt *mtest.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
mt.Parallel()

// Reset the client with a dialer that delays all network round trips by 300ms and set the
// heartbeat interval to 100ms to reduce the time it takes to collect RTT samples.
Expand Down Expand Up @@ -652,9 +654,7 @@ func TestClient(t *testing.T) {
// Test that if Timeout is set and the RTT90 is greater than the remaining timeout for an operation, the
// operation is not sent to the server, fails with a timeout error, and no connections are closed.
mt.Run("RTT90 used to prevent sending requests", func(mt *mtest.T) {
if testing.Short() {
t.Skip("skipping integration test in short mode")
}
mt.Parallel()

// Assert that we can call Ping with a 250ms timeout.
ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond)
Expand Down Expand Up @@ -792,12 +792,16 @@ func TestClient(t *testing.T) {
}

func TestClientStress(t *testing.T) {
t.Parallel()

mtOpts := mtest.NewOptions().CreateClient(false)
mt := mtest.New(t, mtOpts)
defer mt.Close()

// Test that a Client can recover from a massive traffic spike after the traffic spike is over.
mt.Run("Client recovers from traffic spike", func(mt *mtest.T) {
mt.Parallel()

oid := primitive.NewObjectID()
doc := bson.D{{Key: "_id", Value: oid}, {Key: "key", Value: "value"}}
_, err := mt.Coll.InsertOne(context.Background(), doc)
Expand Down Expand Up @@ -852,6 +856,8 @@ func TestClientStress(t *testing.T) {
SetPoolMonitor(tpm.PoolMonitor).
SetMaxPoolSize(maxPoolSize))
mt.RunOpts(fmt.Sprintf("maxPoolSize %d", maxPoolSize), maxPoolSizeOpt, func(mt *mtest.T) {
mt.Parallel()

// Print the count of connection created, connection closed, and pool clear events
// collected during the test to help with debugging.
defer func() {
Expand Down
29 changes: 12 additions & 17 deletions mongo/integration/collection_test.go
Expand Up @@ -8,7 +8,6 @@ package integration

import (
"context"
"os"
"strings"
"testing"
"time"
Expand Down Expand Up @@ -120,7 +119,11 @@ func TestCollection(t *testing.T) {
})
})
mt.RunOpts("insert many", noClientOpts, func(mt *mtest.T) {
mt.Parallel()

mt.Run("success", func(mt *mtest.T) {
mt.Parallel()

want1 := int32(11)
want2 := int32(12)
docs := []interface{}{
Expand All @@ -137,11 +140,7 @@ func TestCollection(t *testing.T) {
assert.Equal(mt, want2, res.InsertedIDs[2], "expected inserted ID %v, got %v", want2, res.InsertedIDs[2])
})
mt.Run("batches", func(mt *mtest.T) {
// TODO(GODRIVER-425): remove this as part a larger project to refactor integration and other longrunning
// TODO tasks.
if os.Getenv("EVR_TASK_ID") == "" {
mt.Skip("skipping long running integration test outside of evergreen")
}
mt.Parallel()

const (
megabyte = 10 * 10 * 10 * 10 * 10 * 10
Expand All @@ -167,11 +166,7 @@ func TestCollection(t *testing.T) {
assert.Equal(mt, numDocs, len(res.InsertedIDs), "expected %v inserted IDs, got %v", numDocs, len(res.InsertedIDs))
})
mt.Run("large document batches", func(mt *mtest.T) {
// TODO(GODRIVER-425): remove this as part a larger project to refactor integration and other longrunning
// TODO tasks.
if os.Getenv("EVR_TASK_ID") == "" {
mt.Skip("skipping long running integration test outside of evergreen")
}
mt.Parallel()

docs := []interface{}{create16MBDocument(mt), create16MBDocument(mt)}
_, err := mt.Coll.InsertMany(context.Background(), docs)
Expand All @@ -182,6 +177,8 @@ func TestCollection(t *testing.T) {
assert.Equal(mt, "insert", evt.CommandName, "expected 'insert' event, got '%v'", evt.CommandName)
})
mt.RunOpts("write error", noClientOpts, func(mt *mtest.T) {
mt.Parallel()

docs := []interface{}{
bson.D{{"_id", primitive.NewObjectID()}},
bson.D{{"_id", primitive.NewObjectID()}},
Expand Down Expand Up @@ -212,7 +209,9 @@ func TestCollection(t *testing.T) {
}
})
mt.Run("return only inserted ids", func(mt *mtest.T) {
id := int32(11)
mt.Parallel()

id := int32(15)
docs := []interface{}{
bson.D{{"_id", id}},
bson.D{{"_id", id}},
Expand Down Expand Up @@ -249,11 +248,7 @@ func TestCollection(t *testing.T) {
}
})
mt.Run("writeError index", func(mt *mtest.T) {
// TODO(GODRIVER-425): remove this as part a larger project to refactor integration and other longrunning
// TODO tasks.
if os.Getenv("EVR_TASK_ID") == "" {
mt.Skip("skipping long running integration test outside of evergreen")
}
mt.Parallel()

// force multiple batches
numDocs := 700000
Expand Down
4 changes: 0 additions & 4 deletions mongo/integration/csot_cse_prose_test.go
Expand Up @@ -30,10 +30,6 @@ func TestCSOTClientSideEncryptionProse(t *testing.T) {

mt.RunOpts("2. maxTimeMS is not set for commands sent to mongocryptd",
noClientOpts, func(mt *mtest.T) {
if testing.Short() {
mt.Skip("skipping integration test in short mode")
}

kmsProviders := map[string]map[string]interface{}{
"local": {
"key": localMasterKey,
Expand Down
4 changes: 3 additions & 1 deletion x/mongo/driver/topology/pool_test.go
Expand Up @@ -513,7 +513,9 @@ func TestPool(t *testing.T) {
time.Sleep(50 * time.Millisecond)
c2, err := p.checkOut(context.Background())
noerr(t, err)
assert.NotEqualf(t, c1, c2, "expected a new connection on 2nd check out after idle timeout expires")
// Assert that the connection pointers are not equal. Don't use "assert.NotEqual" because it asserts
// non-equality of fields, possibly accessing some fields non-atomically and causing a race condition.
assert.True(t, c1 != c2, "expected a new connection on 2nd check out after idle timeout expires")
assert.Equalf(t, 2, d.lenopened(), "should have opened 2 connections")
assert.Equalf(t, 1, p.totalConnectionCount(), "pool should have 1 total connection")

Expand Down

0 comments on commit 48b0701

Please sign in to comment.