Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

change zookeeper image #2147

Merged
merged 2 commits into from May 13, 2024
Merged

change zookeeper image #2147

merged 2 commits into from May 13, 2024

Conversation

Sotatek-TinnNguyen
Copy link
Contributor

Proposed changes

  • Change zookeeper to official

Types of changes

Please put an x in the boxes related to your change.

  • Bugfix
  • New feature or enhancement
  • Others

Checklist

Put an x in the boxes that apply. You can also fill these out after creating the PR. If you're unsure about any of them, don't hesitate to ask. We're here to help! This is simply a reminder of what we are going to look for before merging your code.

  • I have read the CONTRIBUTING GUIDELINES doc
  • I have signed the CLA
  • Lint and unit tests pass locally with my changes ($ make test)
  • I have added tests that prove my fix is effective or that my feature works
  • I have added necessary documentation (if appropriate)
  • Any dependent changes have been merged and published in downstream modules

Related issues

  • Please leave the issue numbers or links related to this PR here.

Further comments

If this is a relatively large or complex change, kick off the discussion by explaining why you chose the solution you did and what alternatives you considered, etc...

blukat29
blukat29 previously approved these changes May 7, 2024
@blukat29
Copy link
Contributor

blukat29 commented May 7, 2024

The official zookeeper image seems working. But there's no official Kafka image. Could you try https://hub.docker.com/r/bitnami/kafka instead?

JayChoi1736
JayChoi1736 previously approved these changes May 7, 2024
@blukat29
Copy link
Contributor

@Sotatek-TinnNguyen Found the solution thanks to uber/cadence#5975.
Please rebase this PR against the latest dev and apply the below fixes.

See diff
diff --git a/.circleci/config.yml b/.circleci/config.yml
index c8aad0d5..f74c875d 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -43,22 +43,20 @@ executors:
         auth:
           username: $DOCKER_LOGIN
           password: $DOCKER_PASSWORD
-      - name: zookeeper
-        image: wurstmeister/zookeeper
-        auth:
-          username: $DOCKER_LOGIN
-          password: $DOCKER_PASSWORD
       - name: kafka
-        image: wurstmeister/kafka
+        image: bitnami/kafka:3.7
         auth:
           username: $DOCKER_LOGIN
           password: $DOCKER_PASSWORD
         environment:
-          KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
-          KAFKA_ADVERTISED_LISTENERS: INSIDE://:9092,OUTSIDE://kafka:9094
-          KAFKA_LISTENERS: INSIDE://:9092,OUTSIDE://kafka:9094
-          KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT
-          KAFKA_INTER_BROKER_LISTENER_NAME: INSIDE
+          KAFKA_CFG_NODE_ID: 0
+          KAFKA_CFG_PROCESS_ROLES: controller,broker
+          KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 0@kafka:9093
+          KAFKA_CFG_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:9093
+          KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092
+          KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT
+          KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER
+          KAFKA_CFG_INTER_BROKER_LISTENER_NAME: PLAINTEXT
   darwin-executor: # this executor is for packaging darwin binaries
     working_directory: ~/go/src/github.com/klaytn/klaytn
     macos:
@@ -280,16 +278,6 @@ commands:
               sleep 1
             done
             echo Failed waiting for Redis && exit 1
-      - run:
-          name: "Waiting for zookeeper to be ready"
-          command: |
-            for i in `seq 1 10`;
-            do
-              nc -z zookeeper 2181 && echo Success && exit 0
-              echo -n .
-              sleep 1
-            done
-            echo Failed waiting for zookeeper && exit 1
       - run:
           name: "Waiting for Kafka to be ready"
           command: |
diff --git a/datasync/chaindatafetcher/kafka/kafka_test.go b/datasync/chaindatafetcher/kafka/kafka_test.go
index f27611a9..a5cd385f 100644
--- a/datasync/chaindatafetcher/kafka/kafka_test.go
+++ b/datasync/chaindatafetcher/kafka/kafka_test.go
@@ -45,11 +45,11 @@ type KafkaSuite struct {
 	topic    string
 }
 
-// In order to test KafkaSuite, any available kafka broker must be connectable with "kafka:9094".
+// In order to test KafkaSuite, any available kafka broker must be connectable with "kafka:9092".
 // If no kafka broker is available, the KafkaSuite tests are skipped.
 func (s *KafkaSuite) SetupTest() {
 	s.conf = GetDefaultKafkaConfig()
-	s.conf.Brokers = []string{"kafka:9094"}
+	s.conf.Brokers = []string{"kafka:9092"}
 	kfk, err := NewKafka(s.conf)
 	if err == sarama.ErrOutOfBrokers {
 		s.T().Log("Failed connecting to brokers", s.conf.Brokers)
@@ -159,27 +159,34 @@ func (s *KafkaSuite) TestKafka_setupTopicConcurrency() {
 }
 
 func (s *KafkaSuite) TestKafka_CreateAndDeleteTopic() {
+	topicName := "test-create-delete-topic"
+
 	// no topic to be deleted
-	err := s.kfk.DeleteTopic(s.topic)
+	err := s.kfk.DeleteTopic(topicName)
 	s.Error(err)
 	s.True(strings.Contains(err.Error(), sarama.ErrUnknownTopicOrPartition.Error()))
 
 	// created a topic successfully
-	err = s.kfk.CreateTopic(s.topic)
+	err = s.kfk.CreateTopic(topicName)
 	s.NoError(err)
 
 	// failed to create a duplicated topic
-	err = s.kfk.CreateTopic(s.topic)
+	err = s.kfk.CreateTopic(topicName)
 	s.Error(err)
 	s.True(strings.Contains(err.Error(), sarama.ErrTopicAlreadyExists.Error()))
 
 	// deleted a topic successfully
-	s.Nil(s.kfk.DeleteTopic(s.topic))
+	s.Nil(s.kfk.DeleteTopic(topicName))
 
-	topics, err := s.kfk.ListTopics()
-	if _, exist := topics[s.topic]; exist {
-		s.Fail("topic must not exist")
+	for i := 0; i < 10; i++ {
+		topics, err := s.kfk.ListTopics()
+		s.NoError(err)
+		if _, exist := topics[topicName]; !exist {
+			return // success
+		}
+		time.Sleep(time.Second)
 	}
+	s.Fail("topic must not exist")
 }
 
 type kafkaData struct {
@@ -225,7 +232,7 @@ func (s *KafkaSuite) subscribeData(topic, groupId string, numTests int, handler
 	}()
 
 	// wait for all data to be consumed
-	timeout := time.NewTimer(5 * time.Second)
+	timeout := time.NewTimer(10 * time.Second)
 	for i := 0; i < numTests; i++ {
 		select {
 		case <-numCheckCh:
@@ -466,7 +473,7 @@ func (s *KafkaSuite) TestKafka_PubSubWithSegements_BufferOverflow() {
 	}()
 
 	// checkout the returned error is buffer overflow error
-	timeout := time.NewTimer(3 * time.Second)
+	timeout := time.NewTimer(10 * time.Second)
 	select {
 	case <-timeout.C:
 		s.Fail("timeout")
@@ -501,7 +508,7 @@ func (s *KafkaSuite) TestKafka_PubSubWithSegments_ErrCallBack() {
 	}()
 
 	// checkout the returned error is callback error
-	timeout := time.NewTimer(3 * time.Second)
+	timeout := time.NewTimer(10 * time.Second)
 	select {
 	case <-timeout.C:
 		s.Fail("timeout")
@@ -543,7 +550,7 @@ func (s *KafkaSuite) TestKafka_PubSubWithSegments_MessageTimeout() {
 	}()
 
 	// checkout the returned error is callback error
-	timeout := time.NewTimer(3 * time.Second)
+	timeout := time.NewTimer(10 * time.Second)
 	select {
 	case <-timeout.C:
 		s.Fail("timeout")
@@ -580,6 +587,5 @@ func (s *KafkaSuite) TestKafka_Consumer_AddTopicAndHandler_Error() {
 }

 func TestKafkaSuite(t *testing.T) {
-       // TODO: revive after CircleCI image fix
-       // suite.Run(t, new(KafkaSuite))
+       suite.Run(t, new(KafkaSuite))
 }

@blukat29 blukat29 merged commit ed19158 into dev May 13, 2024
11 checks passed
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

None yet

4 participants