Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
19e857f
commit 170e690
Showing
4 changed files
with
251 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,144 @@ | ||
// inspired by Java Kafka testcontainers' module | ||
//https://github.com/testcontainers/testcontainers-java/blob/master/modules/kafka/src/main/java/org/testcontainers/containers/KafkaContainer.java | ||
|
||
package canned | ||
|
||
import ( | ||
"context" | ||
"github.com/testcontainers/testcontainers-go" | ||
"io/ioutil" | ||
"os" | ||
) | ||
|
||
const ( | ||
clusterName = "kafka-cluster" | ||
zookeeperPort = "2181" | ||
kafkaBrokerPort = "9092" | ||
kafkaClientPort = "9093" | ||
zookeeperImage = "confluentinc/cp-zookeeper:5.2.1" | ||
kafkaImage = "confluentinc/cp-kafka:5.2.1" | ||
) | ||
|
||
type KafkaCluster struct { | ||
kafkaContainer testcontainers.Container | ||
zookeeperContainer testcontainers.Container | ||
} | ||
|
||
// StartCluster starts kafka cluster | ||
func (kc *KafkaCluster) StartCluster() { | ||
ctx := context.Background() | ||
|
||
kc.zookeeperContainer.Start(ctx) | ||
kc.kafkaContainer.Start(ctx) | ||
kc.startKafka() | ||
} | ||
|
||
// GetKafkaHost gets the kafka host:port so it can be accessed from outside the container | ||
func (kc *KafkaCluster) GetKafkaHost() string { | ||
ctx := context.Background() | ||
host, err := kc.kafkaContainer.Host(ctx) | ||
if err != nil { | ||
panic(err) | ||
} | ||
port, err := kc.kafkaContainer.MappedPort(ctx, kafkaClientPort) | ||
if err != nil { | ||
panic(err) | ||
} | ||
|
||
// returns the exposed kafka host:port | ||
return host + ":" + port.Port() | ||
} | ||
|
||
func (kc *KafkaCluster) startKafka() { | ||
ctx := context.Background() | ||
|
||
kafkaStartFile, err := ioutil.TempFile("", "testcontainers_start.sh") | ||
if err != nil { | ||
panic(err) | ||
} | ||
defer os.Remove(kafkaStartFile.Name()) | ||
|
||
// needs to set KAFKA_ADVERTISED_LISTENERS with the exposed kafka port | ||
exposedHost := kc.GetKafkaHost() | ||
kafkaStartFile.WriteString("#!/bin/bash \n") | ||
kafkaStartFile.WriteString("export KAFKA_ADVERTISED_LISTENERS='PLAINTEXT://" + exposedHost + ",BROKER://kafka:" + kafkaBrokerPort + "'\n") | ||
kafkaStartFile.WriteString(". /etc/confluent/docker/bash-config \n") | ||
kafkaStartFile.WriteString("/etc/confluent/docker/configure \n") | ||
kafkaStartFile.WriteString("/etc/confluent/docker/launch \n") | ||
|
||
err = kc.kafkaContainer.CopyFileToContainer(ctx, kafkaStartFile.Name(), "testcontainers_start.sh", 0700) | ||
if err != nil { | ||
panic(err) | ||
} | ||
} | ||
|
||
func NewKafkaCluster() *KafkaCluster { | ||
ctx := context.Background() | ||
|
||
// creates a network, so kafka and zookeeper can communicate directly | ||
network, err := testcontainers.GenericNetwork(ctx, testcontainers.GenericNetworkRequest{ | ||
NetworkRequest: testcontainers.NetworkRequest{Name: clusterName}, | ||
}) | ||
if err != nil { | ||
panic(err) | ||
} | ||
|
||
dockerNetwork := network.(*testcontainers.DockerNetwork) | ||
|
||
zookeeperContainer := createZookeeperContainer(dockerNetwork) | ||
kafkaContainer := createKafkaContainer(dockerNetwork) | ||
|
||
return &KafkaCluster{ | ||
zookeeperContainer: zookeeperContainer, | ||
kafkaContainer: kafkaContainer, | ||
} | ||
} | ||
|
||
func createZookeeperContainer(network *testcontainers.DockerNetwork) testcontainers.Container { | ||
ctx := context.Background() | ||
|
||
// creates the zookeeper container, but do not start it yet | ||
zookeeperContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ | ||
ContainerRequest: testcontainers.ContainerRequest{ | ||
Image: zookeeperImage, | ||
ExposedPorts: []string{zookeeperPort}, | ||
Env: map[string]string{"ZOOKEEPER_CLIENT_PORT": zookeeperPort, "ZOOKEEPER_TICK_TIME": "2000"}, | ||
Networks: []string{network.Name}, | ||
NetworkAliases: map[string][]string{network.Name: {"zookeeper"}}, | ||
}, | ||
}) | ||
if err != nil { | ||
panic(err) | ||
} | ||
|
||
return zookeeperContainer | ||
} | ||
|
||
func createKafkaContainer(network *testcontainers.DockerNetwork) testcontainers.Container { | ||
ctx := context.Background() | ||
|
||
// creates the kafka container, but do not start it yet | ||
kafkaContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ | ||
ContainerRequest: testcontainers.ContainerRequest{ | ||
Image: kafkaImage, | ||
ExposedPorts: []string{kafkaClientPort}, | ||
Env: map[string]string{ | ||
"KAFKA_BROKER_ID": "1", | ||
"KAFKA_ZOOKEEPER_CONNECT": "zookeeper:" + zookeeperPort, | ||
"KAFKA_LISTENERS": "PLAINTEXT://0.0.0.0:" + kafkaClientPort + ",BROKER://0.0.0.0:" + kafkaBrokerPort, | ||
"KAFKA_LISTENER_SECURITY_PROTOCOL_MAP": "BROKER:PLAINTEXT,PLAINTEXT:PLAINTEXT", | ||
"KAFKA_INTER_BROKER_LISTENER_NAME": "BROKER", | ||
"KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR": "1", | ||
}, | ||
Networks: []string{network.Name}, | ||
NetworkAliases: map[string][]string{network.Name: {"kafka"}}, | ||
// the container only starts when it finds and run /testcontainers_start.sh | ||
Cmd: []string{"sh", "-c", "while [ ! -f /testcontainers_start.sh ]; do sleep 0.1; done; /testcontainers_start.sh"}, | ||
}, | ||
}) | ||
if err != nil { | ||
panic(err) | ||
} | ||
|
||
return kafkaContainer | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
package canned_test | ||
|
||
import ( | ||
"fmt" | ||
"github.com/testcontainers/testcontainers-go/canned" | ||
"gopkg.in/confluentinc/confluent-kafka-go.v1/kafka" | ||
"reflect" | ||
"testing" | ||
) | ||
|
||
const ( | ||
KAFKA_TOPIC = "myTopic" | ||
) | ||
|
||
func TestKafkaConsumerAndProducerUsingTestContainer(t *testing.T) { | ||
kafkaCluster := canned.NewKafkaCluster() | ||
kafkaCluster.StartCluster() | ||
kafkaServer := kafkaCluster.GetKafkaHost() | ||
producedMessages := []string{"Trying", "out", "kafka", "with", "test", "containers"} | ||
|
||
produceKafkaMessages(kafkaServer, producedMessages) | ||
consumedMessages := consumeKafkaMessages(kafkaServer) | ||
|
||
if !reflect.DeepEqual(producedMessages, consumedMessages) { | ||
t.Fatalf("Consumed messages are not equal to produced messages. [%s] != [%s]", consumedMessages, producedMessages) | ||
} | ||
} | ||
|
||
func produceKafkaMessages(kafkaServer string, messages []string) { | ||
kafkaProducer, err := kafka.NewProducer(&kafka.ConfigMap{ | ||
"bootstrap.servers": kafkaServer, | ||
}) | ||
if err != nil { | ||
panic(err) | ||
} | ||
defer kafkaProducer.Close() | ||
|
||
fmt.Printf("Producing messages into kafka...\n") | ||
|
||
topic := KAFKA_TOPIC | ||
for _, word := range messages { | ||
deliveryChan := make(chan kafka.Event) | ||
|
||
kafkaProducer.Produce(&kafka.Message{ | ||
TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, | ||
Value: []byte(word), | ||
Key: []byte("key"), | ||
}, deliveryChan) | ||
|
||
e := <-deliveryChan | ||
m := e.(*kafka.Message) | ||
|
||
if m.TopicPartition.Error != nil { | ||
fmt.Printf("Delivery failed: %v\n", m.TopicPartition.Error) | ||
} else { | ||
fmt.Printf("Delivered message [%s] to topic %s [%d] at offset %v\n", | ||
word, *m.TopicPartition.Topic, m.TopicPartition.Partition, m.TopicPartition.Offset) | ||
} | ||
} | ||
} | ||
|
||
func consumeKafkaMessages(kafkaServer string) []string { | ||
kafkaConsumer, err := kafka.NewConsumer(&kafka.ConfigMap{ | ||
"bootstrap.servers": kafkaServer, | ||
"group.id": "myGroup", | ||
"auto.offset.reset": "earliest", | ||
}) | ||
if err != nil { | ||
panic(err) | ||
} | ||
err = kafkaConsumer.SubscribeTopics([]string{KAFKA_TOPIC}, nil) | ||
if err != nil { | ||
panic(err) | ||
} | ||
defer kafkaConsumer.Close() | ||
|
||
fmt.Printf("Consuming messages from kafka...\n") | ||
|
||
var consumedMessages []string | ||
run := true | ||
for run == true { | ||
select { | ||
default: | ||
ev := kafkaConsumer.Poll(100) | ||
if ev == nil { | ||
continue | ||
} | ||
|
||
switch e := ev.(type) { | ||
case *kafka.Message: | ||
fmt.Printf("Message on %s: %s\n", e.TopicPartition, string(e.Value)) | ||
consumedMessages = append(consumedMessages, string(e.Value)) | ||
default: | ||
fmt.Printf("Consumed all messges. Stopping consumer.\n") | ||
run = false | ||
} | ||
} | ||
} | ||
|
||
return consumedMessages | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters