From 2d2326efcf8993a79d13a7207ed2fcf4ea1367e2 Mon Sep 17 00:00:00 2001 From: Leonid Koftun Date: Mon, 3 Feb 2020 23:20:59 +0100 Subject: [PATCH 01/41] feat: Add alter and list partition reassignments Implementation of KIP-455. Also includes work to make Sarama protocol support the new optional tagged fields functionality from KIP-482 - add headerVersion for all requests (Ref: KIP-482) - implement AlterPartitionReassignmentsRequest/Reponse protocol - add tests for alter_partition_reassignments - pretty print partition reassignment errors - add ListPartitionReassignmentsRequest/Response protocol - decode empty tagged fields in response header v1 - make sure mockbroker can handle different reponse header versions - make sure partition reassignment can be aborted - add Alter/ListPartitionReassignments to admin client api https://cwiki.apache.org/confluence/display/KAFKA/KIP-455%3A+Create+an+Administrative+API+for+Replica+Reassignment https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields Co-authored-by: Dirk Wilden Co-authored-by: Leonid Koftun Co-authored-by: iyacontrol --- acl_create_request.go | 4 + acl_create_response.go | 4 + acl_delete_request.go | 4 + acl_delete_response.go | 4 + acl_describe_request.go | 4 + acl_describe_response.go | 4 + add_offsets_to_txn_request.go | 4 + add_offsets_to_txn_response.go | 4 + add_partitions_to_txn_request.go | 4 + add_partitions_to_txn_response.go | 4 + admin.go | 84 +++++++++ admin_test.go | 161 +++++++++++++++++ alter_configs_request.go | 4 + alter_configs_response.go | 4 + alter_partition_reassignments_request.go | 130 ++++++++++++++ alter_partition_reassignments_request_test.go | 56 ++++++ alter_partition_reassignments_response.go | 157 ++++++++++++++++ ...r_partition_reassignments_response_test.go | 45 +++++ api_versions_request.go | 4 + api_versions_response.go | 4 + async_producer_test.go | 2 +- broker.go | 61 ++++++- broker_test.go | 4 + consumer_metadata_request.go | 4 + consumer_metadata_response.go | 4 + create_partitions_request.go | 4 + create_partitions_response.go | 4 + create_topics_request.go | 4 + create_topics_response.go | 4 + delete_groups_request.go | 4 + delete_groups_response.go | 4 + delete_records_request.go | 4 + delete_records_response.go | 4 + delete_topics_request.go | 4 + delete_topics_response.go | 4 + describe_configs_request.go | 4 + describe_configs_response.go | 4 + describe_groups_request.go | 4 + describe_groups_response.go | 4 + describe_log_dirs_request.go | 4 + describe_log_dirs_response.go | 4 + encoder_decoder.go | 5 + end_txn_request.go | 4 + end_txn_response.go | 4 + errors.go | 16 ++ fetch_request.go | 4 + fetch_response.go | 4 + find_coordinator_request.go | 4 + find_coordinator_response.go | 4 + heartbeat_request.go | 4 + heartbeat_response.go | 4 + init_producer_id_request.go | 4 + init_producer_id_response.go | 4 + join_group_request.go | 4 + join_group_response.go | 4 + leave_group_request.go | 4 + leave_group_response.go | 4 + list_groups_request.go | 4 + list_groups_response.go | 4 + list_partition_reassignments_request.go | 98 ++++++++++ list_partition_reassignments_request_test.go | 31 ++++ list_partition_reassignments_response.go | 169 ++++++++++++++++++ list_partition_reassignments_response_test.go | 32 ++++ metadata_request.go | 4 + metadata_response.go | 4 + mockbroker.go | 34 +++- mockresponses.go | 93 +++++++--- offset_commit_request.go | 4 + offset_commit_response.go | 4 + offset_fetch_request.go | 4 + offset_fetch_response.go | 4 + offset_manager_test.go | 6 +- offset_request.go | 4 + offset_response.go | 4 + packet_decoder.go | 6 + packet_encoder.go | 7 + prep_encoder.go | 49 +++++ produce_request.go | 4 + produce_response.go | 16 ++ real_decoder.go | 96 ++++++++++ real_encoder.go | 52 ++++++ request.go | 28 ++- request_test.go | 23 ++- response_header.go | 9 +- response_header_test.go | 22 ++- sasl_authenticate_request.go | 4 + sasl_authenticate_response.go | 4 + sasl_handshake_request.go | 4 + sasl_handshake_response.go | 4 + sync_group_request.go | 4 + sync_group_response.go | 4 + txn_offset_commit_request.go | 4 + txn_offset_commit_response.go | 4 + 93 files changed, 1692 insertions(+), 60 deletions(-) create mode 100644 alter_partition_reassignments_request.go create mode 100644 alter_partition_reassignments_request_test.go create mode 100644 alter_partition_reassignments_response.go create mode 100644 alter_partition_reassignments_response_test.go create mode 100644 list_partition_reassignments_request.go create mode 100644 list_partition_reassignments_request_test.go create mode 100644 list_partition_reassignments_response.go create mode 100644 list_partition_reassignments_response_test.go diff --git a/acl_create_request.go b/acl_create_request.go index da1cdefc3..6d8a70e1a 100644 --- a/acl_create_request.go +++ b/acl_create_request.go @@ -47,6 +47,10 @@ func (c *CreateAclsRequest) version() int16 { return c.Version } +func (c *CreateAclsRequest) headerVersion() int16 { + return 1 +} + func (c *CreateAclsRequest) requiredVersion() KafkaVersion { switch c.Version { case 1: diff --git a/acl_create_response.go b/acl_create_response.go index f5a5e9a64..bc018ed00 100644 --- a/acl_create_response.go +++ b/acl_create_response.go @@ -55,6 +55,10 @@ func (c *CreateAclsResponse) version() int16 { return 0 } +func (c *CreateAclsResponse) headerVersion() int16 { + return 0 +} + func (c *CreateAclsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/acl_delete_request.go b/acl_delete_request.go index 15908eac9..415252259 100644 --- a/acl_delete_request.go +++ b/acl_delete_request.go @@ -48,6 +48,10 @@ func (d *DeleteAclsRequest) version() int16 { return int16(d.Version) } +func (c *DeleteAclsRequest) headerVersion() int16 { + return 1 +} + func (d *DeleteAclsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/acl_delete_response.go b/acl_delete_response.go index 3754faeba..cb6308826 100644 --- a/acl_delete_response.go +++ b/acl_delete_response.go @@ -56,6 +56,10 @@ func (d *DeleteAclsResponse) version() int16 { return d.Version } +func (d *DeleteAclsResponse) headerVersion() int16 { + return 0 +} + func (d *DeleteAclsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/acl_describe_request.go b/acl_describe_request.go index 5222d46ee..29841a5ce 100644 --- a/acl_describe_request.go +++ b/acl_describe_request.go @@ -25,6 +25,10 @@ func (d *DescribeAclsRequest) version() int16 { return int16(d.Version) } +func (d *DescribeAclsRequest) headerVersion() int16 { + return 1 +} + func (d *DescribeAclsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/acl_describe_response.go b/acl_describe_response.go index a60d39f35..c43408b24 100644 --- a/acl_describe_response.go +++ b/acl_describe_response.go @@ -77,6 +77,10 @@ func (d *DescribeAclsResponse) version() int16 { return d.Version } +func (d *DescribeAclsResponse) headerVersion() int16 { + return 0 +} + func (d *DescribeAclsResponse) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/add_offsets_to_txn_request.go b/add_offsets_to_txn_request.go index fc227ab86..95586f9a1 100644 --- a/add_offsets_to_txn_request.go +++ b/add_offsets_to_txn_request.go @@ -48,6 +48,10 @@ func (a *AddOffsetsToTxnRequest) version() int16 { return 0 } +func (a *AddOffsetsToTxnRequest) headerVersion() int16 { + return 1 +} + func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/add_offsets_to_txn_response.go b/add_offsets_to_txn_response.go index c88c1f89f..bdb184419 100644 --- a/add_offsets_to_txn_response.go +++ b/add_offsets_to_txn_response.go @@ -40,6 +40,10 @@ func (a *AddOffsetsToTxnResponse) version() int16 { return 0 } +func (a *AddOffsetsToTxnResponse) headerVersion() int16 { + return 0 +} + func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/add_partitions_to_txn_request.go b/add_partitions_to_txn_request.go index 8d4b42e34..6289f4514 100644 --- a/add_partitions_to_txn_request.go +++ b/add_partitions_to_txn_request.go @@ -72,6 +72,10 @@ func (a *AddPartitionsToTxnRequest) version() int16 { return 0 } +func (a *AddPartitionsToTxnRequest) headerVersion() int16 { + return 1 +} + func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/add_partitions_to_txn_response.go b/add_partitions_to_txn_response.go index eb4f23eca..73b73b07f 100644 --- a/add_partitions_to_txn_response.go +++ b/add_partitions_to_txn_response.go @@ -79,6 +79,10 @@ func (a *AddPartitionsToTxnResponse) version() int16 { return 0 } +func (a *AddPartitionsToTxnResponse) headerVersion() int16 { + return 0 +} + func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/admin.go b/admin.go index 7dd725064..a7d733da1 100644 --- a/admin.go +++ b/admin.go @@ -42,6 +42,14 @@ type ClusterAdmin interface { // new partitions. This operation is supported by brokers with version 1.0.0 or higher. CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error + // Alter the replica assignment for partitions. + // This operation is supported by brokers with version 2.4.0.0 or higher. + AlterPartitionReassignments(topic string, assignment [][]int32) error + + // Provides info on ongoing partitions replica reassignments. + // This operation is supported by brokers with version 2.4.0.0 or higher. + ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) + // Delete records whose offset is smaller than the given offset of the corresponding partition. // This operation is supported by brokers with version 0.11.0.0 or higher. DeleteRecords(topic string, partitionOffsets map[int32]int64) error @@ -452,6 +460,82 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ }) } +func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error { + if topic == "" { + return ErrInvalidTopic + } + + request := &AlterPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), + } + + for i := 0; i < len(assignment); i++ { + request.AddBlock(topic, int32(i), assignment[i]) + } + + return ca.retryOnError(isErrNoController, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + + errs := make([]error, 0) + + rsp, err := b.AlterPartitionReassignments(request) + + if err != nil { + errs = append(errs, err) + } else { + if rsp.ErrorCode > 0 { + errs = append(errs, errors.New(rsp.ErrorCode.Error())) + } + + for topic, topicErrors := range rsp.Errors { + for partition, partitionError := range topicErrors { + if partitionError.errorCode != ErrNoError { + errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error()) + errs = append(errs, errors.New(errStr)) + } + } + } + } + + if len(errs) > 0 { + return ErrReassignPartitions{MultiError{&errs}} + } + + return nil + }) +} + +func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) { + if topic == "" { + return nil, ErrInvalidTopic + } + + request := &ListPartitionReassignmentsRequest{ + TimeoutMs: int32(60000), + Version: int16(0), + } + + request.AddBlock(topic, partitions) + + b, err := ca.Controller() + if err != nil { + return nil, err + } + _ = b.Open(ca.client.Config()) + + rsp, err := b.ListPartitionReassignments(request) + + if err == nil && rsp != nil { + return rsp.TopicStatus, nil + } else { + return nil, err + } +} + func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error { if topic == "" { return ErrInvalidTopic diff --git a/admin_test.go b/admin_test.go index fcbe539b5..bcec47f61 100644 --- a/admin_test.go +++ b/admin_test.go @@ -332,6 +332,167 @@ func TestClusterAdminCreatePartitionsWithoutAuthorization(t *testing.T) { } } +func TestClusterAdminAlterPartitionReassignments(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + defer seedBroker.Close() + + secondBroker := NewMockBroker(t, 2) + defer secondBroker.Close() + + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetController(secondBroker.BrokerID()). + SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). + SetBroker(secondBroker.Addr(), secondBroker.BrokerID()), + }) + + secondBroker.SetHandlerByMap(map[string]MockResponse{ + "AlterPartitionReassignmentsRequest": NewMockAlterPartitionReassignmentsResponse(t), + }) + + config := NewConfig() + config.Version = V2_4_0_0 + admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + var topicAssignment = make([][]int32, 0, 3) + + err = admin.AlterPartitionReassignments("my_topic", topicAssignment) + if err != nil { + t.Fatal(err) + } + + err = admin.Close() + if err != nil { + t.Fatal(err) + } +} + +func TestClusterAdminAlterPartitionReassignmentsWithDiffVersion(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + defer seedBroker.Close() + + secondBroker := NewMockBroker(t, 2) + defer secondBroker.Close() + + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetController(secondBroker.BrokerID()). + SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). + SetBroker(secondBroker.Addr(), secondBroker.BrokerID()), + }) + + secondBroker.SetHandlerByMap(map[string]MockResponse{ + "AlterPartitionReassignmentsRequest": NewMockAlterPartitionReassignmentsResponse(t), + }) + + config := NewConfig() + config.Version = V2_3_0_0 + admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + var topicAssignment = make([][]int32, 0, 3) + + err = admin.AlterPartitionReassignments("my_topic", topicAssignment) + + if !strings.ContainsAny(err.Error(), ErrUnsupportedVersion.Error()) { + t.Fatal(err) + } + + err = admin.Close() + if err != nil { + t.Fatal(err) + } +} + +func TestClusterAdminListPartitionReassignments(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + defer seedBroker.Close() + + secondBroker := NewMockBroker(t, 2) + defer secondBroker.Close() + + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetController(secondBroker.BrokerID()). + SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). + SetBroker(secondBroker.Addr(), secondBroker.BrokerID()), + }) + + secondBroker.SetHandlerByMap(map[string]MockResponse{ + "ListPartitionReassignmentsRequest": NewMockListPartitionReassignmentsResponse(t), + }) + + config := NewConfig() + config.Version = V2_4_0_0 + admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + response, err := admin.ListPartitionReassignments("my_topic", []int32{0, 1}) + if err != nil { + t.Fatal(err) + } + + partitionStatus, ok := response["my_topic"] + if !ok { + t.Fatalf("topic missing in response") + } else { + if len(partitionStatus) != 2 { + t.Fatalf("partition missing in response") + } + } + + err = admin.Close() + if err != nil { + t.Fatal(err) + } +} + +func TestClusterAdminListPartitionReassignmentsWithDiffVersion(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + defer seedBroker.Close() + + secondBroker := NewMockBroker(t, 2) + defer secondBroker.Close() + + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetController(secondBroker.BrokerID()). + SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). + SetBroker(secondBroker.Addr(), secondBroker.BrokerID()), + }) + + secondBroker.SetHandlerByMap(map[string]MockResponse{ + "ListPartitionReassignmentsRequest": NewMockListPartitionReassignmentsResponse(t), + }) + + config := NewConfig() + config.Version = V2_3_0_0 + admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + var partitions = make([]int32, 0) + + _, err = admin.ListPartitionReassignments("my_topic", partitions) + + if !strings.ContainsAny(err.Error(), ErrUnsupportedVersion.Error()) { + t.Fatal(err) + } + + err = admin.Close() + if err != nil { + t.Fatal(err) + } +} + func TestClusterAdminDeleteRecords(t *testing.T) { topicName := "my_topic" seedBroker := NewMockBroker(t, 1) diff --git a/alter_configs_request.go b/alter_configs_request.go index 26c275b83..c88bb604a 100644 --- a/alter_configs_request.go +++ b/alter_configs_request.go @@ -117,6 +117,10 @@ func (a *AlterConfigsRequest) version() int16 { return 0 } +func (a *AlterConfigsRequest) headerVersion() int16 { + return 1 +} + func (a *AlterConfigsRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/alter_configs_response.go b/alter_configs_response.go index 3893663cf..99a526005 100644 --- a/alter_configs_response.go +++ b/alter_configs_response.go @@ -92,6 +92,10 @@ func (a *AlterConfigsResponse) version() int16 { return 0 } +func (a *AlterConfigsResponse) headerVersion() int16 { + return 0 +} + func (a *AlterConfigsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/alter_partition_reassignments_request.go b/alter_partition_reassignments_request.go new file mode 100644 index 000000000..f0a2f9dd5 --- /dev/null +++ b/alter_partition_reassignments_request.go @@ -0,0 +1,130 @@ +package sarama + +type alterPartitionReassignmentsBlock struct { + replicas []int32 +} + +func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error { + if err := pe.putNullableCompactInt32Array(b.replicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) { + if b.replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + return nil +} + +type AlterPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string]map[int32]*alterPartitionReassignmentsBlock + Version int16 +} + +func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsBlock{} + if err := block.decode(pd); err != nil { + return err + } + r.blocks[topic][partition] = block + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *AlterPartitionReassignmentsRequest) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock) + } + + r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas} +} diff --git a/alter_partition_reassignments_request_test.go b/alter_partition_reassignments_request_test.go new file mode 100644 index 000000000..8d282729d --- /dev/null +++ b/alter_partition_reassignments_request_test.go @@ -0,0 +1,56 @@ +package sarama + +import "testing" + +var ( + alterPartitionReassignmentsRequestNoBlock = []byte{ + 0, 0, 39, 16, // timout 10000 + 1, // 1-1=0 blocks + 0, // empty tagged fields + } + + alterPartitionReassignmentsRequestOneBlock = []byte{ + 0, 0, 39, 16, // timout 10000 + 2, // 2-1=1 block + 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string + 2, // 2-1=1 partitions + 0, 0, 0, 0, // partitionId + 3, // 3-1=2 replica array size + 0, 0, 3, 232, // replica 1000 + 0, 0, 3, 233, // replica 1001 + 0, 0, 0, // empty tagged fields + } + + alterPartitionReassignmentsAbortRequest = []byte{ + 0, 0, 39, 16, // timout 10000 + 2, // 2-1=1 block + 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string + 2, // 2-1=1 partitions + 0, 0, 0, 0, // partitionId + 0, // replica array is null (indicates that a pending reassignment should be aborted) + 0, 0, 0, // empty tagged fields + } +) + +func TestAlterPartitionReassignmentRequest(t *testing.T) { + var request *AlterPartitionReassignmentsRequest + + request = &AlterPartitionReassignmentsRequest{ + TimeoutMs: int32(10000), + Version: int16(0), + } + + testRequest(t, "no block", request, alterPartitionReassignmentsRequestNoBlock) + + request.AddBlock("topic", 0, []int32{1000, 1001}) + + testRequest(t, "one block", request, alterPartitionReassignmentsRequestOneBlock) + + request = &AlterPartitionReassignmentsRequest{ + TimeoutMs: int32(10000), + Version: int16(0), + } + request.AddBlock("topic", 0, nil) + + testRequest(t, "abort assignment", request, alterPartitionReassignmentsAbortRequest) +} diff --git a/alter_partition_reassignments_response.go b/alter_partition_reassignments_response.go new file mode 100644 index 000000000..b3f9a15fe --- /dev/null +++ b/alter_partition_reassignments_response.go @@ -0,0 +1,157 @@ +package sarama + +type alterPartitionReassignmentsErrorBlock struct { + errorCode KError + errorMessage *string +} + +func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error { + pe.putInt16(int16(b.errorCode)) + if err := pe.putNullableCompactString(b.errorMessage); err != nil { + return err + } + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) { + errorCode, err := pd.getInt16() + if err != nil { + return err + } + b.errorCode = KError(errorCode) + b.errorMessage, err = pd.getCompactNullableString() + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + return err +} + +type AlterPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock +} + +func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock) + r.Errors[topic] = partitions + } + + partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message} +} + +func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.Errors)) + for topic, partitions := range r.Errors { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + return nil +} + +func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + if numTopics > 0 { + r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &alterPartitionReassignmentsErrorBlock{} + if err := block.decode(pd); err != nil { + return err + } + + r.Errors[topic][partition] = block + } + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err = pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *AlterPartitionReassignmentsResponse) key() int16 { + return 45 +} + +func (r *AlterPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/alter_partition_reassignments_response_test.go b/alter_partition_reassignments_response_test.go new file mode 100644 index 000000000..614b571b3 --- /dev/null +++ b/alter_partition_reassignments_response_test.go @@ -0,0 +1,45 @@ +package sarama + +import "testing" + +var ( + alterPartitionReassignmentsResponseNoError = []byte{ + 0, 0, 39, 16, // ThrottleTimeMs 10000 + 0, 0, // errorcode + 0, // null string + 1, // empty errors array + 0, // empty tagged fields + } + + alterPartitionReassignmentsResponseWithError = []byte{ + 0, 0, 39, 16, // ThrottleTimeMs 10000 + 0, 12, // errorcode + 6, 101, 114, 114, 111, 114, // error string "error" + 2, // errors array length 1 + 6, 116, 111, 112, 105, 99, // topic name "topic" + 2, // partition array length 1 + 0, 0, 0, 1, // partitionId + 0, 3, //kerror + 7, 101, 114, 114, 111, 114, 50, // error string "error2" + 0, 0, 0, // empty tagged fields + } +) + +func TestAlterPartitionReassignmentResponse(t *testing.T) { + var response *AlterPartitionReassignmentsResponse + + response = &AlterPartitionReassignmentsResponse{ + ThrottleTimeMs: int32(10000), + Version: int16(0), + } + + testResponse(t, "no error", response, alterPartitionReassignmentsResponseNoError) + + errorMessage := "error" + partitionError := "error2" + response.ErrorCode = 12 + response.ErrorMessage = &errorMessage + response.AddError("topic", 1, 3, &partitionError) + + testResponse(t, "with error", response, alterPartitionReassignmentsResponseWithError) +} diff --git a/api_versions_request.go b/api_versions_request.go index b33167c0b..d67c5e1e5 100644 --- a/api_versions_request.go +++ b/api_versions_request.go @@ -20,6 +20,10 @@ func (a *ApiVersionsRequest) version() int16 { return 0 } +func (a *ApiVersionsRequest) headerVersion() int16 { + return 1 +} + func (a *ApiVersionsRequest) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/api_versions_response.go b/api_versions_response.go index bb1f0b31a..582e29db4 100644 --- a/api_versions_response.go +++ b/api_versions_response.go @@ -84,6 +84,10 @@ func (r *ApiVersionsResponse) version() int16 { return 0 } +func (a *ApiVersionsResponse) headerVersion() int16 { + return 0 +} + func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/async_producer_test.go b/async_producer_test.go index 3de308680..d0f012d24 100644 --- a/async_producer_test.go +++ b/async_producer_test.go @@ -988,7 +988,7 @@ func TestAsyncProducerIdempotentRetryCheckBatch(t *testing.T) { lastBatchFirstSeq := -1 lastBatchSize := -1 lastSequenceWrittenToDisk := -1 - handlerFailBeforeWrite := func(req *request) (res encoder) { + handlerFailBeforeWrite := func(req *request) (res encoderWithHeader) { switch req.body.key() { case 3: return metadataResponse diff --git a/broker.go b/broker.go index 9ca41c91e..4f3991af7 100644 --- a/broker.go +++ b/broker.go @@ -119,6 +119,7 @@ type SCRAMClient interface { type responsePromise struct { requestTime time.Time correlationID int32 + headerVersion int16 packets chan []byte errors chan error } @@ -513,6 +514,32 @@ func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePart return response, nil } +//AlterPartitionReassignments sends a alter partition reassignments request and +//returns alter partition reassignments response +func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) { + response := new(AlterPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +//ListPartitionReassignments sends a list partition reassignments request and +//returns list partition reassignments response +func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) { + response := new(ListPartitionReassignmentsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + //DeleteRecords send a request to delete records and return delete record //response or error func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { @@ -693,7 +720,7 @@ func (b *Broker) write(buf []byte) (n int, err error) { return b.conn.Write(buf) } -func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { +func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) { b.lock.Lock() defer b.lock.Unlock() @@ -731,14 +758,19 @@ func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, return nil, nil } - promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)} + promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)} b.responses <- promise return &promise, nil } -func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { - promise, err := b.send(req, res != nil) +func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error { + responseHeaderVersion := int16(-1) + if res != nil { + responseHeaderVersion = res.headerVersion() + } + + promise, err := b.send(req, res != nil, responseHeaderVersion) if err != nil { return err } @@ -818,7 +850,6 @@ func (b *Broker) encode(pe packetEncoder, version int16) (err error) { func (b *Broker) responseReceiver() { var dead error - header := make([]byte, 8) for response := range b.responses { if dead != nil { @@ -829,6 +860,9 @@ func (b *Broker) responseReceiver() { continue } + var headerLength = getHeaderLength(response.headerVersion) + header := make([]byte, headerLength) + bytesReadHeader, err := b.readFull(header) requestLatency := time.Since(response.requestTime) if err != nil { @@ -839,7 +873,7 @@ func (b *Broker) responseReceiver() { } decodedHeader := responseHeader{} - err = decode(header, &decodedHeader) + err = versionedDecode(header, &decodedHeader, response.headerVersion) if err != nil { b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) dead = err @@ -855,7 +889,7 @@ func (b *Broker) responseReceiver() { continue } - buf := make([]byte, decodedHeader.length-4) + buf := make([]byte, decodedHeader.length-int32(headerLength)+4) bytesReadBody, err := b.readFull(buf) b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) if err != nil { @@ -869,6 +903,15 @@ func (b *Broker) responseReceiver() { close(b.done) } +func getHeaderLength(headerVersion int16) int8 { + if headerVersion < 1 { + return 8 + } else { + // header contains additional tagged field length (0), we don't support actual tags yet. + return 9 + } +} + func (b *Broker) authenticateViaSASL() error { switch b.conf.Net.SASL.Mechanism { case SASLTypeOAuth: @@ -1180,7 +1223,7 @@ func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, e } header := responseHeader{} - err = decode(buf, &header) + err = versionedDecode(buf, &header, 0) if err != nil { return nil, err } @@ -1269,7 +1312,7 @@ func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correl } header := responseHeader{} - err = decode(buf, &header) + err = versionedDecode(buf, &header, 0) if err != nil { return bytesRead, err } diff --git a/broker_test.go b/broker_test.go index 387dae952..e2b17462c 100644 --- a/broker_test.go +++ b/broker_test.go @@ -42,6 +42,10 @@ func (m mockEncoder) encode(pe packetEncoder) error { return pe.putRawBytes(m.bytes) } +func (m mockEncoder) headerVersion() int16 { + return 0 +} + type brokerMetrics struct { bytesRead int bytesWritten int diff --git a/consumer_metadata_request.go b/consumer_metadata_request.go index a8dcaefe8..e5ebdaef5 100644 --- a/consumer_metadata_request.go +++ b/consumer_metadata_request.go @@ -29,6 +29,10 @@ func (r *ConsumerMetadataRequest) version() int16 { return 0 } +func (r *ConsumerMetadataRequest) headerVersion() int16 { + return 1 +} + func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { return V0_8_2_0 } diff --git a/consumer_metadata_response.go b/consumer_metadata_response.go index f39a8711c..1b5d00d22 100644 --- a/consumer_metadata_response.go +++ b/consumer_metadata_response.go @@ -73,6 +73,10 @@ func (r *ConsumerMetadataResponse) version() int16 { return 0 } +func (r *ConsumerMetadataResponse) headerVersion() int16 { + return 0 +} + func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { return V0_8_2_0 } diff --git a/create_partitions_request.go b/create_partitions_request.go index af321e994..46fb04402 100644 --- a/create_partitions_request.go +++ b/create_partitions_request.go @@ -67,6 +67,10 @@ func (r *CreatePartitionsRequest) version() int16 { return 0 } +func (r *CreatePartitionsRequest) headerVersion() int16 { + return 1 +} + func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/create_partitions_response.go b/create_partitions_response.go index bb18204a7..12ce78857 100644 --- a/create_partitions_response.go +++ b/create_partitions_response.go @@ -63,6 +63,10 @@ func (r *CreatePartitionsResponse) version() int16 { return 0 } +func (r *CreatePartitionsResponse) headerVersion() int16 { + return 0 +} + func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/create_topics_request.go b/create_topics_request.go index 709c0a44e..287acd069 100644 --- a/create_topics_request.go +++ b/create_topics_request.go @@ -79,6 +79,10 @@ func (c *CreateTopicsRequest) version() int16 { return c.Version } +func (r *CreateTopicsRequest) headerVersion() int16 { + return 1 +} + func (c *CreateTopicsRequest) requiredVersion() KafkaVersion { switch c.Version { case 2: diff --git a/create_topics_response.go b/create_topics_response.go index a493e02ac..7e1448a66 100644 --- a/create_topics_response.go +++ b/create_topics_response.go @@ -70,6 +70,10 @@ func (c *CreateTopicsResponse) version() int16 { return c.Version } +func (c *CreateTopicsResponse) headerVersion() int16 { + return 0 +} + func (c *CreateTopicsResponse) requiredVersion() KafkaVersion { switch c.Version { case 2: diff --git a/delete_groups_request.go b/delete_groups_request.go index 305a324ac..4ac8bbee4 100644 --- a/delete_groups_request.go +++ b/delete_groups_request.go @@ -21,6 +21,10 @@ func (r *DeleteGroupsRequest) version() int16 { return 0 } +func (r *DeleteGroupsRequest) headerVersion() int16 { + return 1 +} + func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion { return V1_1_0_0 } diff --git a/delete_groups_response.go b/delete_groups_response.go index c067ebb42..5e7b1ed36 100644 --- a/delete_groups_response.go +++ b/delete_groups_response.go @@ -65,6 +65,10 @@ func (r *DeleteGroupsResponse) version() int16 { return 0 } +func (r *DeleteGroupsResponse) headerVersion() int16 { + return 0 +} + func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion { return V1_1_0_0 } diff --git a/delete_records_request.go b/delete_records_request.go index 93efafd4d..dc106b17d 100644 --- a/delete_records_request.go +++ b/delete_records_request.go @@ -77,6 +77,10 @@ func (d *DeleteRecordsRequest) version() int16 { return 0 } +func (d *DeleteRecordsRequest) headerVersion() int16 { + return 1 +} + func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/delete_records_response.go b/delete_records_response.go index 733a58b6b..d530b4c7e 100644 --- a/delete_records_response.go +++ b/delete_records_response.go @@ -80,6 +80,10 @@ func (d *DeleteRecordsResponse) version() int16 { return 0 } +func (d *DeleteRecordsResponse) headerVersion() int16 { + return 0 +} + func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/delete_topics_request.go b/delete_topics_request.go index 911f67d31..ba6780a8e 100644 --- a/delete_topics_request.go +++ b/delete_topics_request.go @@ -38,6 +38,10 @@ func (d *DeleteTopicsRequest) version() int16 { return d.Version } +func (d *DeleteTopicsRequest) headerVersion() int16 { + return 1 +} + func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/delete_topics_response.go b/delete_topics_response.go index 34225460a..733961a89 100644 --- a/delete_topics_response.go +++ b/delete_topics_response.go @@ -68,6 +68,10 @@ func (d *DeleteTopicsResponse) version() int16 { return d.Version } +func (d *DeleteTopicsResponse) headerVersion() int16 { + return 0 +} + func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion { switch d.Version { case 1: diff --git a/describe_configs_request.go b/describe_configs_request.go index ccb587b35..d0c735280 100644 --- a/describe_configs_request.go +++ b/describe_configs_request.go @@ -100,6 +100,10 @@ func (r *DescribeConfigsRequest) version() int16 { return r.Version } +func (r *DescribeConfigsRequest) headerVersion() int16 { + return 1 +} + func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/describe_configs_response.go b/describe_configs_response.go index dd919f127..063ae9112 100644 --- a/describe_configs_response.go +++ b/describe_configs_response.go @@ -112,6 +112,10 @@ func (r *DescribeConfigsResponse) version() int16 { return r.Version } +func (r *DescribeConfigsResponse) headerVersion() int16 { + return 0 +} + func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/describe_groups_request.go b/describe_groups_request.go index 1fb356777..f8962da58 100644 --- a/describe_groups_request.go +++ b/describe_groups_request.go @@ -21,6 +21,10 @@ func (r *DescribeGroupsRequest) version() int16 { return 0 } +func (r *DescribeGroupsRequest) headerVersion() int16 { + return 1 +} + func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/describe_groups_response.go b/describe_groups_response.go index 542b3a971..bc242e421 100644 --- a/describe_groups_response.go +++ b/describe_groups_response.go @@ -43,6 +43,10 @@ func (r *DescribeGroupsResponse) version() int16 { return 0 } +func (r *DescribeGroupsResponse) headerVersion() int16 { + return 0 +} + func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/describe_log_dirs_request.go b/describe_log_dirs_request.go index cb1e78152..c0bf04e04 100644 --- a/describe_log_dirs_request.go +++ b/describe_log_dirs_request.go @@ -78,6 +78,10 @@ func (r *DescribeLogDirsRequest) version() int16 { return r.Version } +func (r *DescribeLogDirsRequest) headerVersion() int16 { + return 1 +} + func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/describe_log_dirs_response.go b/describe_log_dirs_response.go index d207312ef..a9a747615 100644 --- a/describe_log_dirs_response.go +++ b/describe_log_dirs_response.go @@ -61,6 +61,10 @@ func (r *DescribeLogDirsResponse) version() int16 { return r.Version } +func (r *DescribeLogDirsResponse) headerVersion() int16 { + return 0 +} + func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/encoder_decoder.go b/encoder_decoder.go index 7ce3bc0f6..025bad61f 100644 --- a/encoder_decoder.go +++ b/encoder_decoder.go @@ -12,6 +12,11 @@ type encoder interface { encode(pe packetEncoder) error } +type encoderWithHeader interface { + encoder + headerVersion() int16 +} + // Encode takes an Encoder and turns it into bytes while potentially recording metrics. func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { if e == nil { diff --git a/end_txn_request.go b/end_txn_request.go index 2cd9b506d..6635425dd 100644 --- a/end_txn_request.go +++ b/end_txn_request.go @@ -45,6 +45,10 @@ func (a *EndTxnRequest) version() int16 { return 0 } +func (r *EndTxnRequest) headerVersion() int16 { + return 1 +} + func (a *EndTxnRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/end_txn_response.go b/end_txn_response.go index 33b27e33d..763976726 100644 --- a/end_txn_response.go +++ b/end_txn_response.go @@ -39,6 +39,10 @@ func (e *EndTxnResponse) version() int16 { return 0 } +func (r *EndTxnResponse) headerVersion() int16 { + return 0 +} + func (e *EndTxnResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/errors.go b/errors.go index 97be3c0f1..ca621b092 100644 --- a/errors.go +++ b/errors.go @@ -94,6 +94,14 @@ func (mErr MultiError) Error() string { return errString } +func (mErr MultiError) PrettyError() string { + var errString = "" + for _, err := range *mErr.Errors { + errString += err.Error() + "\n" + } + return errString +} + // ErrDeleteRecords is the type of error returned when fail to delete the required records type ErrDeleteRecords struct { MultiError @@ -103,6 +111,14 @@ func (err ErrDeleteRecords) Error() string { return "kafka server: failed to delete records " + err.MultiError.Error() } +type ErrReassignPartitions struct { + MultiError +} + +func (err ErrReassignPartitions) Error() string { + return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError()) +} + // Numeric error codes returned by the Kafka server. const ( ErrNoError KError = 0 diff --git a/fetch_request.go b/fetch_request.go index 9a3e8dd79..f893aeff7 100644 --- a/fetch_request.go +++ b/fetch_request.go @@ -239,6 +239,10 @@ func (r *FetchRequest) version() int16 { return r.Version } +func (r *FetchRequest) headerVersion() int16 { + return 1 +} + func (r *FetchRequest) requiredVersion() KafkaVersion { switch r.Version { case 0: diff --git a/fetch_response.go b/fetch_response.go index dc0aeed2e..ca6d78832 100644 --- a/fetch_response.go +++ b/fetch_response.go @@ -335,6 +335,10 @@ func (r *FetchResponse) version() int16 { return r.Version } +func (r *FetchResponse) headerVersion() int16 { + return 0 +} + func (r *FetchResponse) requiredVersion() KafkaVersion { switch r.Version { case 0: diff --git a/find_coordinator_request.go b/find_coordinator_request.go index ff2ad206c..597bcbf78 100644 --- a/find_coordinator_request.go +++ b/find_coordinator_request.go @@ -51,6 +51,10 @@ func (f *FindCoordinatorRequest) version() int16 { return f.Version } +func (r *FindCoordinatorRequest) headerVersion() int16 { + return 1 +} + func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion { switch f.Version { case 1: diff --git a/find_coordinator_response.go b/find_coordinator_response.go index 9c900e8b7..83a648ad4 100644 --- a/find_coordinator_response.go +++ b/find_coordinator_response.go @@ -82,6 +82,10 @@ func (f *FindCoordinatorResponse) version() int16 { return f.Version } +func (r *FindCoordinatorResponse) headerVersion() int16 { + return 0 +} + func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion { switch f.Version { case 1: diff --git a/heartbeat_request.go b/heartbeat_request.go index ce49c4739..e9d9af191 100644 --- a/heartbeat_request.go +++ b/heartbeat_request.go @@ -42,6 +42,10 @@ func (r *HeartbeatRequest) version() int16 { return 0 } +func (r *HeartbeatRequest) headerVersion() int16 { + return 1 +} + func (r *HeartbeatRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/heartbeat_response.go b/heartbeat_response.go index 766f5fdec..577ab72e5 100644 --- a/heartbeat_response.go +++ b/heartbeat_response.go @@ -27,6 +27,10 @@ func (r *HeartbeatResponse) version() int16 { return 0 } +func (r *HeartbeatResponse) headerVersion() int16 { + return 0 +} + func (r *HeartbeatResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/init_producer_id_request.go b/init_producer_id_request.go index 8ceb6c232..689444397 100644 --- a/init_producer_id_request.go +++ b/init_producer_id_request.go @@ -38,6 +38,10 @@ func (i *InitProducerIDRequest) version() int16 { return 0 } +func (i *InitProducerIDRequest) headerVersion() int16 { + return 1 +} + func (i *InitProducerIDRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/init_producer_id_response.go b/init_producer_id_response.go index 1b32eb085..3e1242bf6 100644 --- a/init_producer_id_response.go +++ b/init_producer_id_response.go @@ -50,6 +50,10 @@ func (i *InitProducerIDResponse) version() int16 { return 0 } +func (i *InitProducerIDResponse) headerVersion() int16 { + return 0 +} + func (i *InitProducerIDResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/join_group_request.go b/join_group_request.go index 97e9299ea..3734e82e4 100644 --- a/join_group_request.go +++ b/join_group_request.go @@ -134,6 +134,10 @@ func (r *JoinGroupRequest) version() int16 { return r.Version } +func (r *JoinGroupRequest) headerVersion() int16 { + return 1 +} + func (r *JoinGroupRequest) requiredVersion() KafkaVersion { switch r.Version { case 2: diff --git a/join_group_response.go b/join_group_response.go index 5752acc8a..54b0a45c2 100644 --- a/join_group_response.go +++ b/join_group_response.go @@ -123,6 +123,10 @@ func (r *JoinGroupResponse) version() int16 { return r.Version } +func (r *JoinGroupResponse) headerVersion() int16 { + return 0 +} + func (r *JoinGroupResponse) requiredVersion() KafkaVersion { switch r.Version { case 2: diff --git a/leave_group_request.go b/leave_group_request.go index e17742748..d7789b68d 100644 --- a/leave_group_request.go +++ b/leave_group_request.go @@ -35,6 +35,10 @@ func (r *LeaveGroupRequest) version() int16 { return 0 } +func (r *LeaveGroupRequest) headerVersion() int16 { + return 1 +} + func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/leave_group_response.go b/leave_group_response.go index d60c626da..25f8d5eb3 100644 --- a/leave_group_response.go +++ b/leave_group_response.go @@ -27,6 +27,10 @@ func (r *LeaveGroupResponse) version() int16 { return 0 } +func (r *LeaveGroupResponse) headerVersion() int16 { + return 0 +} + func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/list_groups_request.go b/list_groups_request.go index 3b16abf7f..ed44cc27e 100644 --- a/list_groups_request.go +++ b/list_groups_request.go @@ -19,6 +19,10 @@ func (r *ListGroupsRequest) version() int16 { return 0 } +func (r *ListGroupsRequest) headerVersion() int16 { + return 1 +} + func (r *ListGroupsRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/list_groups_response.go b/list_groups_response.go index 56115d4c7..777bae7e6 100644 --- a/list_groups_response.go +++ b/list_groups_response.go @@ -64,6 +64,10 @@ func (r *ListGroupsResponse) version() int16 { return 0 } +func (r *ListGroupsResponse) headerVersion() int16 { + return 0 +} + func (r *ListGroupsResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/list_partition_reassignments_request.go b/list_partition_reassignments_request.go new file mode 100644 index 000000000..c1ffa9ba0 --- /dev/null +++ b/list_partition_reassignments_request.go @@ -0,0 +1,98 @@ +package sarama + +type ListPartitionReassignmentsRequest struct { + TimeoutMs int32 + blocks map[string][]int32 + Version int16 +} + +func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error { + pe.putInt32(r.TimeoutMs) + + pe.putCompactArrayLength(len(r.blocks)) + + for topic, partitions := range r.blocks { + if err := pe.putCompactString(topic); err != nil { + return err + } + + if err := pe.putCompactInt32Array(partitions); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.TimeoutMs, err = pd.getInt32(); err != nil { + return err + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.blocks = make(map[string][]int32) + for i := 0; i < topicCount; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make([]int32, partitionCount) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + r.blocks[topic][j] = partition + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return +} + +func (r *ListPartitionReassignmentsRequest) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsRequest) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsRequest) headerVersion() int16 { + return 2 +} + +func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion { + return V2_4_0_0 +} + +func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) { + if r.blocks == nil { + r.blocks = make(map[string][]int32) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = partitionIDs + } +} diff --git a/list_partition_reassignments_request_test.go b/list_partition_reassignments_request_test.go new file mode 100644 index 000000000..d9f9f92ca --- /dev/null +++ b/list_partition_reassignments_request_test.go @@ -0,0 +1,31 @@ +package sarama + +import "testing" + +var ( + listPartitionReassignmentsRequestOneBlock = []byte{ + 0, 0, 39, 16, // timout 10000 + 2, // 2-1=1 block + 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string + 2, // 2-1=1 partitions + 0, 0, 0, 0, // partitionId + 0, 0, // empty tagged fields + } +) + +func TestListPartitionReassignmentRequest(t *testing.T) { + var request *ListPartitionReassignmentsRequest + + request = &ListPartitionReassignmentsRequest{ + TimeoutMs: int32(10000), + Version: int16(0), + } + + request.AddBlock("topic", []int32{0}) + + testRequest(t, "one block", request, listPartitionReassignmentsRequestOneBlock) + + request.AddBlock("topic2", []int32{1, 2}) + + testRequestWithoutByteComparison(t, "two blocks", request) +} diff --git a/list_partition_reassignments_response.go b/list_partition_reassignments_response.go new file mode 100644 index 000000000..a5786ee7f --- /dev/null +++ b/list_partition_reassignments_response.go @@ -0,0 +1,169 @@ +package sarama + +type PartitionReplicaReassignmentsStatus struct { + replicas []int32 + addingReplicas []int32 + removingReplicas []int32 +} + +func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error { + if err := pe.putCompactInt32Array(b.replicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.addingReplicas); err != nil { + return err + } + if err := pe.putCompactInt32Array(b.removingReplicas); err != nil { + return err + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) { + if b.replicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.addingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if b.removingReplicas, err = pd.getCompactInt32Array(); err != nil { + return err + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return err +} + +type ListPartitionReassignmentsResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ErrorMessage *string + TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus +} + +func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) { + if r.TopicStatus == nil { + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus) + } + partitions := r.TopicStatus[topic] + if partitions == nil { + partitions = make(map[int32]*PartitionReplicaReassignmentsStatus) + r.TopicStatus[topic] = partitions + } + + partitions[partition] = &PartitionReplicaReassignmentsStatus{replicas: replicas, addingReplicas: addingReplicas, removingReplicas: removingReplicas} +} + +func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + pe.putInt16(int16(r.ErrorCode)) + if err := pe.putNullableCompactString(r.ErrorMessage); err != nil { + return err + } + + pe.putCompactArrayLength(len(r.TopicStatus)) + for topic, partitions := range r.TopicStatus { + if err := pe.putCompactString(topic); err != nil { + return err + } + pe.putCompactArrayLength(len(partitions)) + for partition, block := range partitions { + pe.putInt32(partition) + + if err := block.encode(pe); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.ErrorCode = KError(kerr) + + if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil { + return err + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics) + for i := 0; i < numTopics; i++ { + topic, err := pd.getCompactString() + if err != nil { + return err + } + + ongoingPartitionReassignments, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments) + + for j := 0; j < ongoingPartitionReassignments; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + + block := &PartitionReplicaReassignmentsStatus{} + if err := block.decode(pd); err != nil { + return err + } + r.TopicStatus[topic][partition] = block + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *ListPartitionReassignmentsResponse) key() int16 { + return 46 +} + +func (r *ListPartitionReassignmentsResponse) version() int16 { + return r.Version +} + +func (r *ListPartitionReassignmentsResponse) headerVersion() int16 { + return 1 +} + +func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion { + return V2_4_0_0 +} diff --git a/list_partition_reassignments_response_test.go b/list_partition_reassignments_response_test.go new file mode 100644 index 000000000..ba6ca5c5b --- /dev/null +++ b/list_partition_reassignments_response_test.go @@ -0,0 +1,32 @@ +package sarama + +import "testing" + +var ( + listPartitionReassignmentsResponse = []byte{ + 0, 0, 39, 16, // ThrottleTimeMs 10000 + 0, 0, // errorcode + 0, // null string + 2, // block array length 1 + 6, 116, 111, 112, 105, 99, // topic name "topic" + 2, // partition array length 1 + 0, 0, 0, 1, // partitionId + 3, 0, 0, 3, 232, 0, 0, 3, 233, // replicas [1000, 1001] + 3, 0, 0, 3, 234, 0, 0, 3, 235, // addingReplicas [1002, 1003] + 3, 0, 0, 3, 236, 0, 0, 3, 237, // addingReplicas [1004, 1005] + 0, 0, 0, // empty tagged fields + } +) + +func TestListPartitionReassignmentResponse(t *testing.T) { + var response *ListPartitionReassignmentsResponse + + response = &ListPartitionReassignmentsResponse{ + ThrottleTimeMs: int32(10000), + Version: int16(0), + } + + response.AddBlock("topic", 1, []int32{1000, 1001}, []int32{1002, 1003}, []int32{1004, 1005}) + + testResponse(t, "one topic", response, listPartitionReassignmentsResponse) +} diff --git a/metadata_request.go b/metadata_request.go index 1b590d368..e835f5a9c 100644 --- a/metadata_request.go +++ b/metadata_request.go @@ -65,6 +65,10 @@ func (r *MetadataRequest) version() int16 { return r.Version } +func (r *MetadataRequest) headerVersion() int16 { + return 1 +} + func (r *MetadataRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/metadata_response.go b/metadata_response.go index 916992d24..0bb8702cc 100644 --- a/metadata_response.go +++ b/metadata_response.go @@ -255,6 +255,10 @@ func (r *MetadataResponse) version() int16 { return r.Version } +func (r *MetadataResponse) headerVersion() int16 { + return 0 +} + func (r *MetadataResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/mockbroker.go b/mockbroker.go index 56e3436ef..ff5a68ae7 100644 --- a/mockbroker.go +++ b/mockbroker.go @@ -20,7 +20,7 @@ const ( type GSSApiHandlerFunc func([]byte) []byte -type requestHandlerFunc func(req *request) (res encoder) +type requestHandlerFunc func(req *request) (res encoderWithHeader) // RequestNotifierFunc is invoked when a mock broker processes a request successfully // and will provides the number of bytes read and written. @@ -55,7 +55,7 @@ type MockBroker struct { port int32 closing chan none stopper chan none - expectations chan encoder + expectations chan encoderWithHeader listener net.Listener t TestReporter latency time.Duration @@ -83,7 +83,7 @@ func (b *MockBroker) SetLatency(latency time.Duration) { // and uses the found MockResponse instance to generate an appropriate reply. // If the request type is not found in the map then nothing is sent. func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { - b.setHandler(func(req *request) (res encoder) { + b.setHandler(func(req *request) (res encoderWithHeader) { reqTypeName := reflect.TypeOf(req.body).Elem().Name() mockResponse := handlerMap[reqTypeName] if mockResponse == nil { @@ -231,7 +231,6 @@ func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.W } }() - resHeader := make([]byte, 8) var bytesWritten int var bytesRead int for { @@ -281,8 +280,7 @@ func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.W continue } - binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) - binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) + resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes))) if _, err = conn.Write(resHeader); err != nil { b.serverError(err) break @@ -318,7 +316,25 @@ func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.W Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) } -func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) { +func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte { + headerLength := uint32(8) + + if headerVersion >= 1 { + headerLength = 9 + } + + resHeader := make([]byte, headerLength) + binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4) + binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId)) + + if headerVersion >= 1 { + binary.PutUvarint(resHeader[8:], 0) + } + + return resHeader +} + +func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) { select { case res, ok := <-b.expectations: if !ok { @@ -373,7 +389,7 @@ func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener stopper: make(chan none), t: t, brokerID: brokerID, - expectations: make(chan encoder, 512), + expectations: make(chan encoderWithHeader, 512), listener: listener, } broker.handler = broker.defaultRequestHandler @@ -394,6 +410,6 @@ func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener return broker } -func (b *MockBroker) Returns(e encoder) { +func (b *MockBroker) Returns(e encoderWithHeader) { b.expectations <- e } diff --git a/mockresponses.go b/mockresponses.go index 6fa72ebb0..e77463a58 100644 --- a/mockresponses.go +++ b/mockresponses.go @@ -18,20 +18,20 @@ type TestReporter interface { // allows generating a response based on a request body. MockResponses are used // to program behavior of MockBroker in tests. type MockResponse interface { - For(reqBody versionedDecoder) (res encoder) + For(reqBody versionedDecoder) (res encoderWithHeader) } // MockWrapper is a mock response builder that returns a particular concrete // response regardless of the actual request passed to the `For` method. type MockWrapper struct { - res encoder + res encoderWithHeader } -func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { +func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) { return mw.res } -func NewMockWrapper(res encoder) *MockWrapper { +func NewMockWrapper(res encoderWithHeader) *MockWrapper { return &MockWrapper{res: res} } @@ -50,7 +50,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence { switch res := res.(type) { case MockResponse: ms.responses[i] = res - case encoder: + case encoderWithHeader: ms.responses[i] = NewMockWrapper(res) default: panic(fmt.Sprintf("Unexpected response type: %T", res)) @@ -59,7 +59,7 @@ func NewMockSequence(responses ...interface{}) *MockSequence { return ms } -func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { +func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) { res = mc.responses[0].For(reqBody) if len(mc.responses) > 1 { mc.responses = mc.responses[1:] @@ -79,7 +79,7 @@ func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse { } } -func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoder { +func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { request := reqBody.(*ListGroupsRequest) _ = request response := &ListGroupsResponse{ @@ -110,7 +110,7 @@ func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, descrip return m } -func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoder { +func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { request := reqBody.(*DescribeGroupsRequest) response := &DescribeGroupsResponse{} @@ -166,7 +166,7 @@ func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResp return mmr } -func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { +func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { metadataRequest := reqBody.(*MetadataRequest) metadataResponse := &MetadataResponse{ Version: metadataRequest.version(), @@ -233,7 +233,7 @@ func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, of return mor } -func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { +func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader { offsetRequest := reqBody.(*OffsetRequest) offsetResponse := &OffsetResponse{Version: mor.version} for topic, partitions := range offsetRequest.blocks { @@ -309,7 +309,7 @@ func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, of return mfr } -func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { +func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { fetchRequest := reqBody.(*FetchRequest) res := &FetchResponse{ Version: mfr.version, @@ -393,7 +393,7 @@ func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *M return mr } -func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ConsumerMetadataRequest) group := req.ConsumerGroup res := &ConsumerMetadataResponse{} @@ -442,7 +442,7 @@ func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, return mr } -func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*FindCoordinatorRequest) res := &FindCoordinatorResponse{} var v interface{} @@ -489,7 +489,7 @@ func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int3 return mr } -func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetCommitRequest) group := req.ConsumerGroup res := &OffsetCommitResponse{} @@ -546,7 +546,7 @@ func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KE return mr } -func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*ProduceRequest) res := &ProduceResponse{ Version: mr.version, @@ -605,7 +605,7 @@ func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchRespo return mr } -func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*OffsetFetchRequest) group := req.ConsumerGroup res := &OffsetFetchResponse{Version: req.Version} @@ -630,7 +630,7 @@ func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse { return &MockCreateTopicsResponse{t: t} } -func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateTopicsRequest) res := &CreateTopicsResponse{ Version: req.Version, @@ -659,7 +659,7 @@ func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse { return &MockDeleteTopicsResponse{t: t} } -func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteTopicsRequest) res := &DeleteTopicsResponse{} res.TopicErrorCodes = make(map[string]KError) @@ -679,7 +679,7 @@ func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsRespon return &MockCreatePartitionsResponse{t: t} } -func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreatePartitionsRequest) res := &CreatePartitionsResponse{} res.TopicPartitionErrors = make(map[string]*TopicPartitionError) @@ -698,6 +698,43 @@ func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder { return res } +type MockAlterPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse { + return &MockAlterPartitionReassignmentsResponse{t: t} +} + +func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterPartitionReassignmentsRequest) + _ = req + res := &AlterPartitionReassignmentsResponse{} + return res +} + +type MockListPartitionReassignmentsResponse struct { + t TestReporter +} + +func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse { + return &MockListPartitionReassignmentsResponse{t: t} +} + +func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ListPartitionReassignmentsRequest) + _ = req + res := &ListPartitionReassignmentsResponse{} + + for topic, partitions := range req.blocks { + for _, partition := range partitions { + res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2}) + } + } + + return res +} + type MockDeleteRecordsResponse struct { t TestReporter } @@ -706,7 +743,7 @@ func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse { return &MockDeleteRecordsResponse{t: t} } -func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteRecordsRequest) res := &DeleteRecordsResponse{} res.Topics = make(map[string]*DeleteRecordsResponseTopic) @@ -729,7 +766,7 @@ func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse return &MockDescribeConfigsResponse{t: t} } -func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeConfigsRequest) res := &DescribeConfigsResponse{ Version: req.Version, @@ -824,7 +861,7 @@ func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse { return &MockAlterConfigsResponse{t: t} } -func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*AlterConfigsRequest) res := &AlterConfigsResponse{} @@ -845,7 +882,7 @@ func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse { return &MockCreateAclsResponse{t: t} } -func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*CreateAclsRequest) res := &CreateAclsResponse{} @@ -863,7 +900,7 @@ func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse { return &MockListAclsResponse{t: t} } -func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DescribeAclsRequest) res := &DescribeAclsResponse{} res.Err = ErrNoError @@ -905,7 +942,7 @@ func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateRespon return &MockSaslAuthenticateResponse{t: t} } -func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoder { +func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader { res := &SaslAuthenticateResponse{} res.Err = msar.kerror res.SaslAuthBytes = msar.saslAuthBytes @@ -936,7 +973,7 @@ func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse { return &MockSaslHandshakeResponse{t: t} } -func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoder { +func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader { res := &SaslHandshakeResponse{} res.Err = mshr.kerror res.EnabledMechanisms = mshr.enabledMechanisms @@ -957,7 +994,7 @@ func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse { return &MockDeleteAclsResponse{t: t} } -func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder { +func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader { req := reqBody.(*DeleteAclsRequest) res := &DeleteAclsResponse{} @@ -983,7 +1020,7 @@ func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDelete return m } -func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoder { +func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader { resp := &DeleteGroupsResponse{ GroupErrorCodes: map[string]KError{}, } diff --git a/offset_commit_request.go b/offset_commit_request.go index 5732ed95c..9931cade5 100644 --- a/offset_commit_request.go +++ b/offset_commit_request.go @@ -170,6 +170,10 @@ func (r *OffsetCommitRequest) version() int16 { return r.Version } +func (r *OffsetCommitRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/offset_commit_response.go b/offset_commit_response.go index e842298db..342260ef5 100644 --- a/offset_commit_response.go +++ b/offset_commit_response.go @@ -94,6 +94,10 @@ func (r *OffsetCommitResponse) version() int16 { return r.Version } +func (r *OffsetCommitResponse) headerVersion() int16 { + return 0 +} + func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/offset_fetch_request.go b/offset_fetch_request.go index 68608241f..51e9faa3f 100644 --- a/offset_fetch_request.go +++ b/offset_fetch_request.go @@ -68,6 +68,10 @@ func (r *OffsetFetchRequest) version() int16 { return r.Version } +func (r *OffsetFetchRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/offset_fetch_response.go b/offset_fetch_response.go index 9e2570280..9c64e0708 100644 --- a/offset_fetch_response.go +++ b/offset_fetch_response.go @@ -155,6 +155,10 @@ func (r *OffsetFetchResponse) version() int16 { return r.Version } +func (r *OffsetFetchResponse) headerVersion() int16 { + return 0 +} + func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/offset_manager_test.go b/offset_manager_test.go index eca926d52..f1baa9cdb 100644 --- a/offset_manager_test.go +++ b/offset_manager_test.go @@ -134,7 +134,7 @@ func TestNewOffsetManagerOffsetsAutoCommit(t *testing.T) { ocResponse := new(OffsetCommitResponse) ocResponse.AddError("my_topic", 0, ErrNoError) - handler := func(req *request) (res encoder) { + handler := func(req *request) (res encoderWithHeader) { close(called) return ocResponse } @@ -329,7 +329,7 @@ func TestPartitionOffsetManagerResetOffsetWithRetention(t *testing.T) { ocResponse := new(OffsetCommitResponse) ocResponse.AddError("my_topic", 0, ErrNoError) - handler := func(req *request) (res encoder) { + handler := func(req *request) (res encoderWithHeader) { if req.body.version() != 2 { t.Errorf("Expected to be using version 2. Actual: %v", req.body.version()) } @@ -390,7 +390,7 @@ func TestPartitionOffsetManagerMarkOffsetWithRetention(t *testing.T) { ocResponse := new(OffsetCommitResponse) ocResponse.AddError("my_topic", 0, ErrNoError) - handler := func(req *request) (res encoder) { + handler := func(req *request) (res encoderWithHeader) { if req.body.version() != 2 { t.Errorf("Expected to be using version 2. Actual: %v", req.body.version()) } diff --git a/offset_request.go b/offset_request.go index 58e223762..c0b3305f6 100644 --- a/offset_request.go +++ b/offset_request.go @@ -116,6 +116,10 @@ func (r *OffsetRequest) version() int16 { return r.Version } +func (r *OffsetRequest) headerVersion() int16 { + return 1 +} + func (r *OffsetRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/offset_response.go b/offset_response.go index 8b2193f9a..ead3ebbcc 100644 --- a/offset_response.go +++ b/offset_response.go @@ -150,6 +150,10 @@ func (r *OffsetResponse) version() int16 { return r.Version } +func (r *OffsetResponse) headerVersion() int16 { + return 0 +} + func (r *OffsetResponse) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/packet_decoder.go b/packet_decoder.go index 9be854c07..ed00ba350 100644 --- a/packet_decoder.go +++ b/packet_decoder.go @@ -10,8 +10,11 @@ type packetDecoder interface { getInt32() (int32, error) getInt64() (int64, error) getVarint() (int64, error) + getUVarint() (uint64, error) getArrayLength() (int, error) + getCompactArrayLength() (int, error) getBool() (bool, error) + getEmptyTaggedFieldArray() (int, error) // Collections getBytes() ([]byte, error) @@ -19,6 +22,9 @@ type packetDecoder interface { getRawBytes(length int) ([]byte, error) getString() (string, error) getNullableString() (*string, error) + getCompactString() (string, error) + getCompactNullableString() (*string, error) + getCompactInt32Array() ([]int32, error) getInt32Array() ([]int32, error) getInt64Array() ([]int64, error) getStringArray() ([]string, error) diff --git a/packet_encoder.go b/packet_encoder.go index 67b8daed8..50c735c04 100644 --- a/packet_encoder.go +++ b/packet_encoder.go @@ -12,6 +12,8 @@ type packetEncoder interface { putInt32(in int32) putInt64(in int64) putVarint(in int64) + putUVarint(in uint64) + putCompactArrayLength(in int) putArrayLength(in int) error putBool(in bool) @@ -19,11 +21,16 @@ type packetEncoder interface { putBytes(in []byte) error putVarintBytes(in []byte) error putRawBytes(in []byte) error + putCompactString(in string) error + putNullableCompactString(in *string) error putString(in string) error putNullableString(in *string) error putStringArray(in []string) error + putCompactInt32Array(in []int32) error + putNullableCompactInt32Array(in []int32) error putInt32Array(in []int32) error putInt64Array(in []int64) error + putEmptyTaggedFieldArray() // Provide the current offset to record the batch size metric offset() int diff --git a/prep_encoder.go b/prep_encoder.go index b633cd151..827542c50 100644 --- a/prep_encoder.go +++ b/prep_encoder.go @@ -2,6 +2,7 @@ package sarama import ( "encoding/binary" + "errors" "fmt" "math" @@ -36,6 +37,11 @@ func (pe *prepEncoder) putVarint(in int64) { pe.length += binary.PutVarint(buf[:], in) } +func (pe *prepEncoder) putUVarint(in uint64) { + var buf [binary.MaxVarintLen64]byte + pe.length += binary.PutUvarint(buf[:], in) +} + func (pe *prepEncoder) putArrayLength(in int) error { if in > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} @@ -44,6 +50,10 @@ func (pe *prepEncoder) putArrayLength(in int) error { return nil } +func (pe *prepEncoder) putCompactArrayLength(in int) { + pe.putUVarint(uint64(in + 1)) +} + func (pe *prepEncoder) putBool(in bool) { pe.length++ } @@ -67,6 +77,20 @@ func (pe *prepEncoder) putVarintBytes(in []byte) error { return pe.putRawBytes(in) } +func (pe *prepEncoder) putCompactString(in string) error { + pe.putCompactArrayLength(len(in)) + return pe.putRawBytes([]byte(in)) +} + +func (pe *prepEncoder) putNullableCompactString(in *string) error { + if in == nil { + pe.putUVarint(0) + return nil + } else { + return pe.putCompactString(*in) + } +} + func (pe *prepEncoder) putRawBytes(in []byte) error { if len(in) > math.MaxInt32 { return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} @@ -107,6 +131,27 @@ func (pe *prepEncoder) putStringArray(in []string) error { return nil } +func (pe *prepEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + pe.putUVarint(0) + return nil + } + + pe.putUVarint(uint64(len(in)) + 1) + pe.length += 4 * len(in) + return nil +} + func (pe *prepEncoder) putInt32Array(in []int32) error { err := pe.putArrayLength(len(in)) if err != nil { @@ -125,6 +170,10 @@ func (pe *prepEncoder) putInt64Array(in []int64) error { return nil } +func (pe *prepEncoder) putEmptyTaggedFieldArray() { + pe.putUVarint(0) +} + func (pe *prepEncoder) offset() int { return pe.length } diff --git a/produce_request.go b/produce_request.go index 178972a0f..0034651e2 100644 --- a/produce_request.go +++ b/produce_request.go @@ -206,6 +206,10 @@ func (r *ProduceRequest) version() int16 { return r.Version } +func (r *ProduceRequest) headerVersion() int16 { + return 1 +} + func (r *ProduceRequest) requiredVersion() KafkaVersion { switch r.Version { case 1: diff --git a/produce_response.go b/produce_response.go index 7fc2fffb3..edf978790 100644 --- a/produce_response.go +++ b/produce_response.go @@ -163,6 +163,22 @@ func (r *ProduceResponse) encode(pe packetEncoder) error { return nil } +func (r *ProduceResponse) key() int16 { + return 0 +} + +func (r *ProduceResponse) version() int16 { + return r.Version +} + +func (r *ProduceResponse) headerVersion() int16 { + return 0 +} + +func (r *ProduceResponse) requiredVersion() KafkaVersion { + return MinVersion +} + func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { if r.Blocks == nil { return nil diff --git a/real_decoder.go b/real_decoder.go index 6c5a1b9b0..8ac576db2 100644 --- a/real_decoder.go +++ b/real_decoder.go @@ -9,7 +9,9 @@ var errInvalidArrayLength = PacketDecodingError{"invalid array length"} var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} var errInvalidStringLength = PacketDecodingError{"invalid string length"} var errVarintOverflow = PacketDecodingError{"varint overflow"} +var errUVarintOverflow = PacketDecodingError{"uvarint overflow"} var errInvalidBool = PacketDecodingError{"invalid bool"} +var errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"} type realDecoder struct { raw []byte @@ -73,6 +75,22 @@ func (rd *realDecoder) getVarint() (int64, error) { return tmp, nil } +func (rd *realDecoder) getUVarint() (uint64, error) { + tmp, n := binary.Uvarint(rd.raw[rd.off:]) + if n == 0 { + rd.off = len(rd.raw) + return 0, ErrInsufficientData + } + + if n < 0 { + rd.off -= n + return 0, errUVarintOverflow + } + + rd.off += n + return tmp, nil +} + func (rd *realDecoder) getArrayLength() (int, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) @@ -89,6 +107,19 @@ func (rd *realDecoder) getArrayLength() (int, error) { return tmp, nil } +func (rd *realDecoder) getCompactArrayLength() (int, error) { + n, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if n == 0 { + return 0, nil + } + + return int(n) - 1, nil +} + func (rd *realDecoder) getBool() (bool, error) { b, err := rd.getInt8() if err != nil || b == 0 { @@ -100,6 +131,19 @@ func (rd *realDecoder) getBool() (bool, error) { return true, nil } +func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) { + tagCount, err := rd.getUVarint() + if err != nil { + return 0, err + } + + if tagCount != 0 { + return 0, errUnsupportedTaggedFields + } + + return 0, nil +} + // collections func (rd *realDecoder) getBytes() ([]byte, error) { @@ -167,6 +211,58 @@ func (rd *realDecoder) getNullableString() (*string, error) { return &tmpStr, err } +func (rd *realDecoder) getCompactString() (string, error) { + n, err := rd.getUVarint() + if err != nil { + return "", err + } + + var length = int(n - 1) + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length + return tmpStr, nil +} + +func (rd *realDecoder) getCompactNullableString() (*string, error) { + n, err := rd.getUVarint() + + if err != nil { + return nil, err + } + + var length = int(n - 1) + + if length < 0 { + return nil, err + } + + tmpStr := string(rd.raw[rd.off : rd.off+length]) + rd.off += length + return &tmpStr, err +} + +func (rd *realDecoder) getCompactInt32Array() ([]int32, error) { + n, err := rd.getUVarint() + if err != nil { + return nil, err + } + + if n == 0 { + return nil, nil + } + + arrayLength := int(n) - 1 + + ret := make([]int32, arrayLength) + + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + func (rd *realDecoder) getInt32Array() ([]int32, error) { if rd.remaining() < 4 { rd.off = len(rd.raw) diff --git a/real_encoder.go b/real_encoder.go index 3c75387f7..ba073f7d3 100644 --- a/real_encoder.go +++ b/real_encoder.go @@ -2,6 +2,7 @@ package sarama import ( "encoding/binary" + "errors" "github.com/rcrowley/go-metrics" ) @@ -39,11 +40,20 @@ func (re *realEncoder) putVarint(in int64) { re.off += binary.PutVarint(re.raw[re.off:], in) } +func (re *realEncoder) putUVarint(in uint64) { + re.off += binary.PutUvarint(re.raw[re.off:], in) +} + func (re *realEncoder) putArrayLength(in int) error { re.putInt32(int32(in)) return nil } +func (re *realEncoder) putCompactArrayLength(in int) { + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(in + 1)) +} + func (re *realEncoder) putBool(in bool) { if in { re.putInt8(1) @@ -78,6 +88,19 @@ func (re *realEncoder) putVarintBytes(in []byte) error { return re.putRawBytes(in) } +func (re *realEncoder) putCompactString(in string) error { + re.putCompactArrayLength(len(in)) + return re.putRawBytes([]byte(in)) +} + +func (re *realEncoder) putNullableCompactString(in *string) error { + if in == nil { + re.putInt8(0) + return nil + } + return re.putCompactString(*in) +} + func (re *realEncoder) putString(in string) error { re.putInt16(int16(len(in))) copy(re.raw[re.off:], in) @@ -108,6 +131,31 @@ func (re *realEncoder) putStringArray(in []string) error { return nil } +func (re *realEncoder) putCompactInt32Array(in []int32) error { + if in == nil { + return errors.New("expected int32 array to be non null") + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putNullableCompactInt32Array(in []int32) error { + if in == nil { + re.putUVarint(0) + return nil + } + // 0 represents a null array, so +1 has to be added + re.putUVarint(uint64(len(in)) + 1) + for _, val := range in { + re.putInt32(val) + } + return nil +} + func (re *realEncoder) putInt32Array(in []int32) error { err := re.putArrayLength(len(in)) if err != nil { @@ -130,6 +178,10 @@ func (re *realEncoder) putInt64Array(in []int64) error { return nil } +func (re *realEncoder) putEmptyTaggedFieldArray() { + re.putUVarint(0) +} + func (re *realEncoder) offset() int { return re.off } diff --git a/request.go b/request.go index 6e4ad8731..dcfd3946c 100644 --- a/request.go +++ b/request.go @@ -11,6 +11,7 @@ type protocolBody interface { versionedDecoder key() int16 version() int16 + headerVersion() int16 requiredVersion() KafkaVersion } @@ -26,12 +27,19 @@ func (r *request) encode(pe packetEncoder) error { pe.putInt16(r.body.version()) pe.putInt32(r.correlationID) - err := pe.putString(r.clientID) - if err != nil { - return err + if r.body.headerVersion() >= 1 { + err := pe.putString(r.clientID) + if err != nil { + return err + } + } + + if r.body.headerVersion() >= 2 { + // we don't use tag headers at the moment so we just put an array length of 0 + pe.putUVarint(0) } - err = r.body.encode(pe) + err := r.body.encode(pe) if err != nil { return err } @@ -65,6 +73,14 @@ func (r *request) decode(pd packetDecoder) (err error) { return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} } + if r.body.headerVersion() >= 2 { + // tagged field + _, err = pd.getUVarint() + if err != nil { + return err + } + } + return r.body.decode(pd, version) } @@ -166,6 +182,10 @@ func allocateBody(key, version int16) protocolBody { return &CreatePartitionsRequest{} case 42: return &DeleteGroupsRequest{} + case 45: + return &AlterPartitionReassignmentsRequest{} + case 46: + return &ListPartitionReassignmentsRequest{} } return nil } diff --git a/request_test.go b/request_test.go index fec190795..95cd6bb32 100644 --- a/request_test.go +++ b/request_test.go @@ -42,13 +42,32 @@ func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) { testRequestDecode(t, name, rb, packet) } +func testRequestWithoutByteComparison(t *testing.T, name string, rb protocolBody) { + if !rb.requiredVersion().IsAtLeast(MinVersion) { + t.Errorf("Request %s has invalid required version", name) + } + packet := testRequestEncode(t, name, rb, nil) + testRequestDecode(t, name, rb, packet) +} + func testRequestEncode(t *testing.T, name string, rb protocolBody, expected []byte) []byte { req := &request{correlationID: 123, clientID: "foo", body: rb} packet, err := encode(req, nil) - headerSize := 14 + len("foo") + + headerSize := 0 + + switch rb.headerVersion() { + case 1: + headerSize = 14 + len("foo") + case 2: + headerSize = 14 + len("foo") + 1 + default: + t.Error("Encoding", name, "failed\nheaderVersion", rb.headerVersion(), "not implemented") + } + if err != nil { t.Error(err) - } else if !bytes.Equal(packet[headerSize:], expected) { + } else if expected != nil && !bytes.Equal(packet[headerSize:], expected) { t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected) } return packet diff --git a/response_header.go b/response_header.go index 7a7591851..5dffb75be 100644 --- a/response_header.go +++ b/response_header.go @@ -10,7 +10,7 @@ type responseHeader struct { correlationID int32 } -func (r *responseHeader) decode(pd packetDecoder) (err error) { +func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) { r.length, err = pd.getInt32() if err != nil { return err @@ -20,5 +20,12 @@ func (r *responseHeader) decode(pd packetDecoder) (err error) { } r.correlationID, err = pd.getInt32() + + if version >= 1 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + return err } diff --git a/response_header_test.go b/response_header_test.go index 8f9fdb80c..31c35ae6a 100644 --- a/response_header_test.go +++ b/response_header_test.go @@ -3,15 +3,31 @@ package sarama import "testing" var ( - responseHeaderBytes = []byte{ + responseHeaderBytesV0 = []byte{ 0x00, 0x00, 0x0f, 0x00, 0x0a, 0xbb, 0xcc, 0xff} + + responseHeaderBytesV1 = []byte{ + 0x00, 0x00, 0x0f, 0x00, + 0x0a, 0xbb, 0xcc, 0xff, 0x00} ) -func TestResponseHeader(t *testing.T) { +func TestResponseHeaderV0(t *testing.T) { + header := responseHeader{} + + testVersionDecodable(t, "response header", &header, responseHeaderBytesV0, 0) + if header.length != 0xf00 { + t.Error("Decoding header length failed, got", header.length) + } + if header.correlationID != 0x0abbccff { + t.Error("Decoding header correlation id failed, got", header.correlationID) + } +} + +func TestResponseHeaderV1(t *testing.T) { header := responseHeader{} - testDecodable(t, "response header", &header, responseHeaderBytes) + testVersionDecodable(t, "response header", &header, responseHeaderBytesV1, 1) if header.length != 0xf00 { t.Error("Decoding header length failed, got", header.length) } diff --git a/sasl_authenticate_request.go b/sasl_authenticate_request.go index 54c8b0992..90504df6f 100644 --- a/sasl_authenticate_request.go +++ b/sasl_authenticate_request.go @@ -24,6 +24,10 @@ func (r *SaslAuthenticateRequest) version() int16 { return 0 } +func (r *SaslAuthenticateRequest) headerVersion() int16 { + return 1 +} + func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/sasl_authenticate_response.go b/sasl_authenticate_response.go index 0038c3f36..3ef57b5af 100644 --- a/sasl_authenticate_response.go +++ b/sasl_authenticate_response.go @@ -39,6 +39,10 @@ func (r *SaslAuthenticateResponse) version() int16 { return 0 } +func (r *SaslAuthenticateResponse) headerVersion() int16 { + return 0 +} + func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion { return V1_0_0_0 } diff --git a/sasl_handshake_request.go b/sasl_handshake_request.go index fe5ba0504..74dc3072f 100644 --- a/sasl_handshake_request.go +++ b/sasl_handshake_request.go @@ -29,6 +29,10 @@ func (r *SaslHandshakeRequest) version() int16 { return r.Version } +func (r *SaslHandshakeRequest) headerVersion() int16 { + return 1 +} + func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/sasl_handshake_response.go b/sasl_handshake_response.go index ef290d4bc..69dfc3178 100644 --- a/sasl_handshake_response.go +++ b/sasl_handshake_response.go @@ -33,6 +33,10 @@ func (r *SaslHandshakeResponse) version() int16 { return 0 } +func (r *SaslHandshakeResponse) headerVersion() int16 { + return 0 +} + func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { return V0_10_0_0 } diff --git a/sync_group_request.go b/sync_group_request.go index fe207080e..ac6ecb13e 100644 --- a/sync_group_request.go +++ b/sync_group_request.go @@ -77,6 +77,10 @@ func (r *SyncGroupRequest) version() int16 { return 0 } +func (r *SyncGroupRequest) headerVersion() int16 { + return 1 +} + func (r *SyncGroupRequest) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/sync_group_response.go b/sync_group_response.go index 194b382b4..af019c42f 100644 --- a/sync_group_response.go +++ b/sync_group_response.go @@ -36,6 +36,10 @@ func (r *SyncGroupResponse) version() int16 { return 0 } +func (r *SyncGroupResponse) headerVersion() int16 { + return 0 +} + func (r *SyncGroupResponse) requiredVersion() KafkaVersion { return V0_9_0_0 } diff --git a/txn_offset_commit_request.go b/txn_offset_commit_request.go index 71e95b814..c4043a335 100644 --- a/txn_offset_commit_request.go +++ b/txn_offset_commit_request.go @@ -91,6 +91,10 @@ func (a *TxnOffsetCommitRequest) version() int16 { return 0 } +func (a *TxnOffsetCommitRequest) headerVersion() int16 { + return 1 +} + func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion { return V0_11_0_0 } diff --git a/txn_offset_commit_response.go b/txn_offset_commit_response.go index 6c980f406..94d8029da 100644 --- a/txn_offset_commit_response.go +++ b/txn_offset_commit_response.go @@ -78,6 +78,10 @@ func (a *TxnOffsetCommitResponse) version() int16 { return 0 } +func (a *TxnOffsetCommitResponse) headerVersion() int16 { + return 0 +} + func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion { return V0_11_0_0 } From a68bc327ae5cfe8714b029eb691a9aaf81917177 Mon Sep 17 00:00:00 2001 From: Dirk Wilden Date: Thu, 12 Mar 2020 20:19:33 +0100 Subject: [PATCH 02/41] make fields in PartitionReplicaReassignmentsStatus public --- list_partition_reassignments_response.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/list_partition_reassignments_response.go b/list_partition_reassignments_response.go index a5786ee7f..bd7c9f767 100644 --- a/list_partition_reassignments_response.go +++ b/list_partition_reassignments_response.go @@ -1,19 +1,19 @@ package sarama type PartitionReplicaReassignmentsStatus struct { - replicas []int32 - addingReplicas []int32 - removingReplicas []int32 + Replicas []int32 + AddingReplicas []int32 + RemovingReplicas []int32 } func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error { - if err := pe.putCompactInt32Array(b.replicas); err != nil { + if err := pe.putCompactInt32Array(b.Replicas); err != nil { return err } - if err := pe.putCompactInt32Array(b.addingReplicas); err != nil { + if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil { return err } - if err := pe.putCompactInt32Array(b.removingReplicas); err != nil { + if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil { return err } @@ -23,15 +23,15 @@ func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error { } func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) { - if b.replicas, err = pd.getCompactInt32Array(); err != nil { + if b.Replicas, err = pd.getCompactInt32Array(); err != nil { return err } - if b.addingReplicas, err = pd.getCompactInt32Array(); err != nil { + if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil { return err } - if b.removingReplicas, err = pd.getCompactInt32Array(); err != nil { + if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil { return err } @@ -60,7 +60,7 @@ func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition in r.TopicStatus[topic] = partitions } - partitions[partition] = &PartitionReplicaReassignmentsStatus{replicas: replicas, addingReplicas: addingReplicas, removingReplicas: removingReplicas} + partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas} } func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error { From 89f624030147812861a5640b85c2bf9000f2b377 Mon Sep 17 00:00:00 2001 From: Dirk Wilden Date: Thu, 12 Mar 2020 20:24:39 +0100 Subject: [PATCH 03/41] fix decoding response with empty topic list --- list_partition_reassignments_response.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/list_partition_reassignments_response.go b/list_partition_reassignments_response.go index bd7c9f767..4baa6a08e 100644 --- a/list_partition_reassignments_response.go +++ b/list_partition_reassignments_response.go @@ -110,7 +110,7 @@ func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version in } numTopics, err := pd.getCompactArrayLength() - if err != nil || numTopics == 0 { + if err != nil { return err } From ae549510a36d52beca8e9aa4c728f72ae0033cfd Mon Sep 17 00:00:00 2001 From: kzing Date: Tue, 17 Mar 2020 15:26:02 +0800 Subject: [PATCH 04/41] fix config.net.keepalive document --- config.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/config.go b/config.go index 08d50cebc..fb7e5513e 100644 --- a/config.go +++ b/config.go @@ -96,8 +96,9 @@ type Config struct { GSSAPI GSSAPIConfig } - // KeepAlive specifies the keep-alive period for an active network connection. - // If zero, keep-alives are disabled. (default is 0: disabled). + // KeepAlive specifies the keep-alive period for an active network connection (defaults to 0). + // If zero or positive, keep-alives are enabled. + // If negative, keep-alives are disabled. KeepAlive time.Duration // LocalAddr is the local address to use when dialing an From 159fd52ba14fe1da64fb598544bd58b0d1959a3e Mon Sep 17 00:00:00 2001 From: kzinglzy Date: Tue, 17 Mar 2020 16:11:36 +0800 Subject: [PATCH 05/41] rm validation and the corresponding test --- config.go | 2 -- config_test.go | 5 ----- 2 files changed, 7 deletions(-) diff --git a/config.go b/config.go index fb7e5513e..e899820cb 100644 --- a/config.go +++ b/config.go @@ -538,8 +538,6 @@ func (c *Config) Validate() error { return ConfigurationError("Net.ReadTimeout must be > 0") case c.Net.WriteTimeout <= 0: return ConfigurationError("Net.WriteTimeout must be > 0") - case c.Net.KeepAlive < 0: - return ConfigurationError("Net.KeepAlive must be >= 0") case c.Net.SASL.Enable: if c.Net.SASL.Mechanism == "" { c.Net.SASL.Mechanism = SASLTypePlaintext diff --git a/config_test.go b/config_test.go index a7267b12e..6da9dbe5f 100644 --- a/config_test.go +++ b/config_test.go @@ -67,11 +67,6 @@ func TestNetConfigValidates(t *testing.T) { cfg.Net.WriteTimeout = 0 }, "Net.WriteTimeout must be > 0"}, - {"KeepAlive", - func(cfg *Config) { - cfg.Net.KeepAlive = -1 - }, - "Net.KeepAlive must be >= 0"}, {"SASL.User", func(cfg *Config) { cfg.Net.SASL.Enable = true From d89fa9dc141e2984bba433976f75eb7cb8825b37 Mon Sep 17 00:00:00 2001 From: Mickael Maison Date: Thu, 19 Mar 2020 11:27:48 +0000 Subject: [PATCH 06/41] Add DescribeLogDirs to admin client --- admin.go | 48 +++++++++++++++++++++++++++++++++++ admin_test.go | 48 +++++++++++++++++++++++++++++++++++ describe_log_dirs_response.go | 6 +++++ mockresponses.go | 42 ++++++++++++++++++++++++++++++ 4 files changed, 144 insertions(+) diff --git a/admin.go b/admin.go index a7d733da1..44e7780b0 100644 --- a/admin.go +++ b/admin.go @@ -101,6 +101,9 @@ type ClusterAdmin interface { // Get information about the nodes in the cluster DescribeCluster() (brokers []*Broker, controllerID int32, err error) + // Get information about all log directories on the given set of brokers + DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error) + // Close shuts down the admin and closes underlying client. Close() error } @@ -878,3 +881,48 @@ func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { return nil } + +func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) { + allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata) + + // Query brokers in parallel, since we may have to query multiple brokers + logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds)) + errors := make(chan error, len(brokerIds)) + wg := sync.WaitGroup{} + + for _, b := range brokerIds { + wg.Add(1) + broker, err := ca.findBroker(b) + if err != nil { + Logger.Printf("Unable to find broker with ID = %v\n", b) + continue + } + go func(b *Broker, conf *Config) { + defer wg.Done() + _ = b.Open(conf) // Ensure that broker is opened + + response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{}) + if err != nil { + errors <- err + return + } + logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata) + logDirs[b.ID()] = response.LogDirs + logDirsMaps <- logDirs + }(broker, ca.conf) + } + + wg.Wait() + close(logDirsMaps) + close(errors) + + for logDirsMap := range logDirsMaps { + for id, logDirs := range logDirsMap { + allLogDirs[id] = logDirs + } + } + + // Intentionally return only the first error for simplicity + err = <-errors + return +} diff --git a/admin_test.go b/admin_test.go index bcec47f61..ce7e46f58 100644 --- a/admin_test.go +++ b/admin_test.go @@ -1309,3 +1309,51 @@ func TestRefreshMetaDataWithDifferentController(t *testing.T) { seedBroker2.BrokerID(), b.ID()) } } + +func TestDescribeLogDirs(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + defer seedBroker.Close() + + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetController(seedBroker.BrokerID()). + SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), + "DescribeLogDirsRequest": NewMockDescribeLogDirsResponse(t). + SetLogDirs("/tmp/logs", map[string]int{"topic1": 2, "topic2": 2}), + }) + + config := NewConfig() + config.Version = V1_0_0_0 + + admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + logDirsPerBroker, err := admin.DescribeLogDirs([]int32{seedBroker.BrokerID()}) + if err != nil { + t.Fatal(err) + } + + if len(logDirsPerBroker) != 1 { + t.Fatalf("Expected %v results, got %v", 1, len(logDirsPerBroker)) + } + logDirs := logDirsPerBroker[seedBroker.BrokerID()] + if len(logDirs) != 1 { + t.Fatalf("Expected log dirs for broker %v to be returned, but it did not, got %v", seedBroker.BrokerID(), len(logDirs)) + } + logDirsBroker := logDirs[0] + if logDirsBroker.ErrorCode != ErrNoError { + t.Fatalf("Expected no error for broker %v, but it was %v", seedBroker.BrokerID(), logDirsBroker.ErrorCode) + } + if logDirsBroker.Path != "/tmp/logs" { + t.Fatalf("Expected log dirs for broker %v to be '/tmp/logs', but it was %v", seedBroker.BrokerID(), logDirsBroker.Path) + } + if len(logDirsBroker.Topics) != 2 { + t.Fatalf("Expected log dirs for broker %v to have 2 topics, but it had %v", seedBroker.BrokerID(), len(logDirsBroker.Topics)) + } + err = admin.Close() + if err != nil { + t.Fatal(err) + } +} diff --git a/describe_log_dirs_response.go b/describe_log_dirs_response.go index a9a747615..411da38ad 100644 --- a/describe_log_dirs_response.go +++ b/describe_log_dirs_response.go @@ -84,6 +84,9 @@ func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error { return err } + if err := pe.putArrayLength(len(r.Topics)); err != nil { + return err + } for _, topic := range r.Topics { if err := topic.encode(pe); err != nil { return err @@ -137,6 +140,9 @@ func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error { return err } + if err := pe.putArrayLength(len(r.Partitions)); err != nil { + return err + } for _, partition := range r.Partitions { if err := partition.encode(pe); err != nil { return err diff --git a/mockresponses.go b/mockresponses.go index e77463a58..dcb809dae 100644 --- a/mockresponses.go +++ b/mockresponses.go @@ -1029,3 +1029,45 @@ func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHead } return resp } + +type MockDescribeLogDirsResponse struct { + t TestReporter + logDirs []DescribeLogDirsResponseDirMetadata +} + +func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse { + return &MockDescribeLogDirsResponse{t: t} +} + +func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse { + topics := []DescribeLogDirsResponseTopic{} + for topic := range topicPartitions { + partitions := []DescribeLogDirsResponsePartition{} + for i := 0; i < topicPartitions[topic]; i++ { + partitions = append(partitions, DescribeLogDirsResponsePartition{ + PartitionID: int32(i), + IsTemporary: false, + OffsetLag: int64(0), + Size: int64(1234), + }) + } + topics = append(topics, DescribeLogDirsResponseTopic{ + Topic: topic, + Partitions: partitions, + }) + } + logDir := DescribeLogDirsResponseDirMetadata{ + ErrorCode: ErrNoError, + Path: logDirPath, + Topics: topics, + } + m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir} + return m +} + +func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader { + resp := &DescribeLogDirsResponse{ + LogDirs: m.logDirs, + } + return resp +} From a74c07e4d6980c01dd56e92914c52c14af7c548e Mon Sep 17 00:00:00 2001 From: Sebastien Lavoie Date: Mon, 23 Mar 2020 13:35:52 -0400 Subject: [PATCH 07/41] Return the response error in heartbeatLoop `err` is always `nil`, it is checked above already. --- consumer_group.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consumer_group.go b/consumer_group.go index 0bc32b976..056b9e387 100644 --- a/consumer_group.go +++ b/consumer_group.go @@ -760,7 +760,7 @@ func (s *consumerGroupSession) heartbeatLoop() { case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration: return default: - s.parent.handleError(err, "", -1) + s.parent.handleError(resp.Err, "", -1) return } From 26bbb0417c682a0e38fd753f21254c66befc5c2a Mon Sep 17 00:00:00 2001 From: zoushengfu Date: Thu, 19 Mar 2020 11:38:01 +0800 Subject: [PATCH 08/41] bugfix: remove broker which is not exist in metadata --- client.go | 29 ++++++++++++++++++++++++++--- client_test.go | 35 +++++++++++++++++++++++++++++++++-- consumer_test.go | 11 +++++++++-- 3 files changed, 68 insertions(+), 7 deletions(-) diff --git a/client.go b/client.go index 81a3ca5b5..c3392f961 100644 --- a/client.go +++ b/client.go @@ -562,6 +562,30 @@ func (client *client) RefreshCoordinator(consumerGroup string) error { // private broker management helpers +func (client *client) updateBroker(brokers []*Broker) { + var currentBroker = make(map[int32]*Broker, len(brokers)) + + for _, broker := range brokers { + currentBroker[broker.ID()] = broker + if client.brokers[broker.ID()] == nil { // add new broker + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } + } + + for id, broker := range client.brokers { + if _, exist := currentBroker[id]; !exist { // remove old broker + safeAsyncClose(broker) + delete(client.brokers, id) + Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr()) + } + } +} + // registerBroker makes sure a broker received by a Metadata or Coordinator request is registered // in the brokers map. It returns the broker that is registered, which may be the provided broker, // or a previously registered Broker instance. You must hold the write lock before calling this function. @@ -885,10 +909,9 @@ func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bo // For all the brokers we received: // - if it is a new ID, save it // - if it is an existing ID, but the address we have is stale, discard the old one and save it + // - if some brokers is not exist in it, remove old broker // - otherwise ignore it, replacing our existing one would just bounce the connection - for _, broker := range data.Brokers { - client.registerBroker(broker) - } + client.updateBroker(data.Brokers) client.controllerID = data.ControllerID diff --git a/client_test.go b/client_test.go index 2beef6cfa..cd47a500b 100644 --- a/client_test.go +++ b/client_test.go @@ -441,6 +441,8 @@ func TestClientReceivingPartialMetadata(t *testing.T) { replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()} metadataPartial := new(MetadataResponse) + metadataPartial.AddBroker(seedBroker.Addr(), 1) + metadataPartial.AddBroker(leader.Addr(), 5) metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable) metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, []int32{}, ErrNoError) metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, []int32{}, ErrLeaderNotAvailable) @@ -485,6 +487,7 @@ func TestClientRefreshBehaviour(t *testing.T) { seedBroker.Returns(metadataResponse1) metadataResponse2 := new(MetadataResponse) + metadataResponse2.AddBroker(leader.Addr(), leader.BrokerID()) metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, nil, ErrNoError) seedBroker.Returns(metadataResponse2) @@ -512,6 +515,36 @@ func TestClientRefreshBehaviour(t *testing.T) { safeClose(t, client) } +func TestClientRefreshMetadataBrokerOffline(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 5) + + metadataResponse1 := new(MetadataResponse) + metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse1.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) + seedBroker.Returns(metadataResponse1) + + client, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + if len(client.Brokers()) != 2 { + t.Error("Meta broker is not 2") + } + + metadataResponse2 := new(MetadataResponse) + metadataResponse2.AddBroker(leader.Addr(), leader.BrokerID()) + seedBroker.Returns(metadataResponse2) + + if err := client.RefreshMetadata(); err != nil { + t.Error(err) + } + if len(client.Brokers()) != 1 { + t.Error("Meta broker is not 1") + } +} + func TestClientResurrectDeadSeeds(t *testing.T) { initialSeed := NewMockBroker(t, 0) emptyMetadata := new(MetadataResponse) @@ -656,7 +689,6 @@ func TestClientMetadataTimeout(t *testing.T) { // Start refreshing metadata in the background errChan := make(chan error) - start := time.Now() go func() { errChan <- c.RefreshMetadata() }() @@ -666,7 +698,6 @@ func TestClientMetadataTimeout(t *testing.T) { maxRefreshDuration := 2 * timeout select { case err := <-errChan: - t.Logf("Got err: %v after waiting for: %v", err, time.Since(start)) if err == nil { t.Fatal("Expected failed RefreshMetadata, got nil") } diff --git a/consumer_test.go b/consumer_test.go index 7b29ebce5..20ee8ddbd 100644 --- a/consumer_test.go +++ b/consumer_test.go @@ -640,6 +640,7 @@ func TestConsumerRebalancingMultiplePartitions(t *testing.T) { "MetadataRequest": NewMockMetadataResponse(t). SetBroker(leader0.Addr(), leader0.BrokerID()). SetBroker(leader1.Addr(), leader1.BrokerID()). + SetBroker(seedBroker.Addr(), seedBroker.BrokerID()). SetLeader("my_topic", 0, leader0.BrokerID()). SetLeader("my_topic", 1, leader1.BrokerID()), }) @@ -720,7 +721,10 @@ func TestConsumerRebalancingMultiplePartitions(t *testing.T) { seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetLeader("my_topic", 0, leader1.BrokerID()). - SetLeader("my_topic", 1, leader1.BrokerID()), + SetLeader("my_topic", 1, leader1.BrokerID()). + SetBroker(leader0.Addr(), leader0.BrokerID()). + SetBroker(leader1.Addr(), leader1.BrokerID()). + SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), }) // leader0 says no longer leader of partition 0 @@ -759,7 +763,10 @@ func TestConsumerRebalancingMultiplePartitions(t *testing.T) { seedBroker.SetHandlerByMap(map[string]MockResponse{ "MetadataRequest": NewMockMetadataResponse(t). SetLeader("my_topic", 0, leader1.BrokerID()). - SetLeader("my_topic", 1, leader0.BrokerID()), + SetLeader("my_topic", 1, leader0.BrokerID()). + SetBroker(leader0.Addr(), leader0.BrokerID()). + SetBroker(leader1.Addr(), leader1.BrokerID()). + SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), }) // leader1 provides three more messages on partition0, says no longer leader of partition1 From b9b3d095f504c13f89b1773c6ac3eada2e67a776 Mon Sep 17 00:00:00 2001 From: KJ Tsanaktsidis Date: Wed, 18 Mar 2020 16:01:15 +1100 Subject: [PATCH 09/41] Fix brokers continually allocating new Session IDs KIP-227 says that if a client provides a session ID of 0 and a session epoch of 0 in a FetchRequest, the broker should allocate a new sessionID. Then, the consumer can use this ID to avoid repeating the list of partitions to consume on every call. Sarama is currently sending 0 for both values, but totally ignoring the session ID generated by the broker. This means that the broker is allocating a new session ID on every request, filling its session ID cache and leading to FETCH_SESSION_ID_NOT_FOUND logs being written by the broker because other _actual_ session IDs are evicted. Instead, we should set the epoch to -1, which instructs the broker not to allocate a new session ID that's just going to be ignored anyway. --- consumer.go | 8 ++++++++ consumer_test.go | 51 +++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/consumer.go b/consumer.go index e311c7e89..e16d08aa9 100644 --- a/consumer.go +++ b/consumer.go @@ -887,6 +887,14 @@ func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { request.Version = 4 request.Isolation = bc.consumer.conf.Consumer.IsolationLevel } + if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) { + request.Version = 7 + // We do not currently implement KIP-227 FetchSessions. Setting the id to 0 + // and the epoch to -1 tells the broker not to generate as session ID we're going + // to just ignore anyway. + request.SessionID = 0 + request.SessionEpoch = -1 + } if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) { request.Version = 10 } diff --git a/consumer_test.go b/consumer_test.go index 7b29ebce5..ae2306526 100644 --- a/consumer_test.go +++ b/consumer_test.go @@ -488,7 +488,7 @@ func TestConsumerReceivingFetchResponseWithTooOldRecords(t *testing.T) { cfg := NewConfig() cfg.Consumer.Return.Errors = true - cfg.Version = V1_1_0_0 + cfg.Version = V0_11_0_0 broker0 := NewMockBroker(t, 0) @@ -569,6 +569,55 @@ func TestConsumeMessageWithNewerFetchAPIVersion(t *testing.T) { broker0.Close() } +func TestConsumeMessageWithSessionIDs(t *testing.T) { + // Given + fetchResponse1 := &FetchResponse{Version: 7} + fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1) + fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2) + + cfg := NewConfig() + cfg.Version = V1_1_0_0 + + broker0 := NewMockBroker(t, 0) + fetchResponse2 := &FetchResponse{} + fetchResponse2.Version = 7 + fetchResponse2.AddError("my_topic", 0, ErrNoError) + + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetVersion(1). + SetOffset("my_topic", 0, OffsetNewest, 1234). + SetOffset("my_topic", 0, OffsetOldest, 0), + "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), + }) + + master, err := NewConsumer([]string{broker0.Addr()}, cfg) + if err != nil { + t.Fatal(err) + } + + // When + consumer, err := master.ConsumePartition("my_topic", 0, 1) + if err != nil { + t.Fatal(err) + } + + assertMessageOffset(t, <-consumer.Messages(), 1) + assertMessageOffset(t, <-consumer.Messages(), 2) + + safeClose(t, consumer) + safeClose(t, master) + broker0.Close() + + fetchReq := broker0.History()[3].Request.(*FetchRequest) + if fetchReq.SessionID != 0 || fetchReq.SessionEpoch != -1 { + t.Error("Expected session ID to be zero & Epoch to be -1") + } +} + // It is fine if offsets of fetched messages are not sequential (although // strictly increasing!). func TestConsumerNonSequentialOffsets(t *testing.T) { From 9df3038ad4f9579970464498bc8b349f4169e5a5 Mon Sep 17 00:00:00 2001 From: KJ Tsanaktsidis Date: Thu, 9 Apr 2020 16:48:09 +1000 Subject: [PATCH 10/41] Fix "broker received out of order sequence" when brokers die When the following three conditions are satisfied, the producer code can skip message sequence numbers and cause the broker to complain that the sequences are out of order: * config.Producer.Idempotent is set * The producer loses, and then regains, its connection to a broker * The client code continues to attempt to produce messages whilst the broker is unavailable. For every message the client attempted to send while the broker is unavailable, the transaction manager sequence number will be incremented, however these messages will eventually fail and return an error to the caller. When the broker re-appears, and another message is published, it's sequence number is higher than the last one the broker remembered - the values that were attempted while it was down were never seen. Thus, from the broker's perspective, it's seeing out-of-order sequence numbers. The fix to this has a few parts: * Don't obtain a sequence number from the transaction manager until we're sure we want to try publishing the message * Affix the producer ID and epoch to the message once the sequence is generated * Increment the transaction manager epoch (and reset all sequence numbers to zero) when we permenantly fail to publish a message. That represents a sequence that the broker will never see, so the only safe thing to do is to roll over the epoch number. * Ensure we don't publish message sets that contain messages from multiple transaction manager epochs. --- async_producer.go | 60 +++++++++++++++++++++++++++++------- async_producer_test.go | 69 ++++++++++++++++++++++++++++++++++++++++++ produce_set.go | 24 ++++++++++----- 3 files changed, 135 insertions(+), 18 deletions(-) diff --git a/async_producer.go b/async_producer.go index 5c57a8137..d0ce01b66 100644 --- a/async_producer.go +++ b/async_producer.go @@ -60,13 +60,28 @@ const ( noProducerEpoch = -1 ) -func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) int32 { +func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) { key := fmt.Sprintf("%s-%d", topic, partition) t.mutex.Lock() defer t.mutex.Unlock() sequence := t.sequenceNumbers[key] t.sequenceNumbers[key] = sequence + 1 - return sequence + return sequence, t.producerEpoch +} + +func (t *transactionManager) bumpEpoch() { + t.mutex.Lock() + defer t.mutex.Unlock() + t.producerEpoch++ + for k := range t.sequenceNumbers { + t.sequenceNumbers[k] = 0 + } +} + +func (t *transactionManager) getProducerID() (int64, int16) { + t.mutex.Lock() + defer t.mutex.Unlock() + return t.producerID, t.producerEpoch } func newTransactionManager(conf *Config, client Client) (*transactionManager, error) { @@ -208,6 +223,8 @@ type ProducerMessage struct { flags flagSet expectation chan *ProducerError sequenceNumber int32 + producerEpoch int16 + hasSequence bool } const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. @@ -234,6 +251,9 @@ func (m *ProducerMessage) byteSize(version int) int { func (m *ProducerMessage) clear() { m.flags = 0 m.retries = 0 + m.sequenceNumber = 0 + m.producerEpoch = 0 + m.hasSequence = false } // ProducerError is the type of error generated when the producer fails to deliver a message. @@ -388,10 +408,6 @@ func (tp *topicProducer) dispatch() { continue } } - // All messages being retried (sent or not) have already had their retry count updated - if tp.parent.conf.Producer.Idempotent && msg.retries == 0 { - msg.sequenceNumber = tp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition) - } handler := tp.handlers[msg.Partition] if handler == nil { @@ -570,6 +586,15 @@ func (pp *partitionProducer) dispatch() { Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) } + // Now that we know we have a broker to actually try and send this message to, generate the sequence + // number for it. + // All messages being retried (sent or not) have already had their retry count updated + // Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer. + if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 { + msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition) + msg.hasSequence = true + } + pp.brokerProducer.input <- msg } } @@ -748,12 +773,21 @@ func (bp *brokerProducer) run() { } if bp.buffer.wouldOverflow(msg) { - if err := bp.waitForSpace(msg); err != nil { + Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, false); err != nil { bp.parent.retryMessage(msg, err) continue } } + if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch { + // The epoch was reset, need to roll the buffer over + Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID()) + if err := bp.waitForSpace(msg, true); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } if err := bp.buffer.add(msg); err != nil { bp.parent.returnError(msg, err) continue @@ -809,9 +843,7 @@ func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { return bp.currentRetries[msg.Topic][msg.Partition] } -func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { - Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) - +func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error { for { select { case response := <-bp.responses: @@ -819,7 +851,7 @@ func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { // handling a response can change our state, so re-check some things if reason := bp.needsRetry(msg); reason != nil { return reason - } else if !bp.buffer.wouldOverflow(msg) { + } else if !bp.buffer.wouldOverflow(msg) && !forceRollover { return nil } case bp.output <- bp.buffer: @@ -1030,6 +1062,12 @@ func (p *asyncProducer) shutdown() { } func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + // We need to reset the producer ID epoch if we set a sequence number on it, because the broker + // will never see a message with this number, so we can never continue the sequence. + if msg.hasSequence { + Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition) + p.txnmgr.bumpEpoch() + } msg.clear() pErr := &ProducerError{Msg: msg, Err: err} if p.conf.Producer.Return.Errors { diff --git a/async_producer_test.go b/async_producer_test.go index d0f012d24..46b97790a 100644 --- a/async_producer_test.go +++ b/async_producer_test.go @@ -1130,6 +1130,75 @@ func TestAsyncProducerIdempotentErrorOnOutOfSeq(t *testing.T) { closeProducer(t, producer) } +func TestAsyncProducerIdempotentEpochRollover(t *testing.T) { + broker := NewMockBroker(t, 1) + defer broker.Close() + + metadataResponse := &MetadataResponse{ + Version: 1, + ControllerID: 1, + } + metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, nil, ErrNoError) + broker.Returns(metadataResponse) + + initProducerID := &InitProducerIDResponse{ + ThrottleTime: 0, + ProducerID: 1000, + ProducerEpoch: 1, + } + broker.Returns(initProducerID) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Flush.Frequency = 10 * time.Millisecond + config.Producer.Return.Successes = true + config.Producer.Retry.Max = 1 // This test needs to exercise what happens when retries exhaust + config.Producer.RequiredAcks = WaitForAll + config.Producer.Retry.Backoff = 0 + config.Producer.Idempotent = true + config.Net.MaxOpenRequests = 1 + config.Version = V0_11_0_0 + + producer, err := NewAsyncProducer([]string{broker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + defer closeProducer(t, producer) + + producer.Input() <- &ProducerMessage{Topic: "my_topic", Value: StringEncoder("hello")} + prodError := &ProduceResponse{ + Version: 3, + ThrottleTime: 0, + } + prodError.AddTopicPartition("my_topic", 0, ErrBrokerNotAvailable) + broker.Returns(prodError) + <-producer.Errors() + + lastReqRes := broker.history[len(broker.history)-1] + lastProduceBatch := lastReqRes.Request.(*ProduceRequest).records["my_topic"][0].RecordBatch + if lastProduceBatch.FirstSequence != 0 { + t.Error("first sequence not zero") + } + if lastProduceBatch.ProducerEpoch != 1 { + t.Error("first epoch was not one") + } + + // Now if we produce again, the epoch should have rolled over. + producer.Input() <- &ProducerMessage{Topic: "my_topic", Value: StringEncoder("hello")} + broker.Returns(prodError) + <-producer.Errors() + + lastReqRes = broker.history[len(broker.history)-1] + lastProduceBatch = lastReqRes.Request.(*ProduceRequest).records["my_topic"][0].RecordBatch + if lastProduceBatch.FirstSequence != 0 { + t.Error("second sequence not zero") + } + if lastProduceBatch.ProducerEpoch <= 1 { + t.Error("second epoch was not > 1") + } +} + // TestBrokerProducerShutdown ensures that a call to shutdown stops the // brokerProducer run() loop and doesn't leak any goroutines func TestBrokerProducerShutdown(t *testing.T) { diff --git a/produce_set.go b/produce_set.go index 36c43c6a6..9c70f8180 100644 --- a/produce_set.go +++ b/produce_set.go @@ -13,17 +13,22 @@ type partitionSet struct { } type produceSet struct { - parent *asyncProducer - msgs map[string]map[int32]*partitionSet + parent *asyncProducer + msgs map[string]map[int32]*partitionSet + producerID int64 + producerEpoch int16 bufferBytes int bufferCount int } func newProduceSet(parent *asyncProducer) *produceSet { + pid, epoch := parent.txnmgr.getProducerID() return &produceSet{ - msgs: make(map[string]map[int32]*partitionSet), - parent: parent, + msgs: make(map[string]map[int32]*partitionSet), + parent: parent, + producerID: pid, + producerEpoch: epoch, } } @@ -65,8 +70,8 @@ func (ps *produceSet) add(msg *ProducerMessage) error { Version: 2, Codec: ps.parent.conf.Producer.Compression, CompressionLevel: ps.parent.conf.Producer.CompressionLevel, - ProducerID: ps.parent.txnmgr.producerID, - ProducerEpoch: ps.parent.txnmgr.producerEpoch, + ProducerID: ps.producerID, + ProducerEpoch: ps.producerEpoch, } if ps.parent.conf.Producer.Idempotent { batch.FirstSequence = msg.sequenceNumber @@ -78,12 +83,17 @@ func (ps *produceSet) add(msg *ProducerMessage) error { } partitions[msg.Partition] = set } - set.msgs = append(set.msgs, msg) if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence { return errors.New("assertion failed: message out of sequence added to a batch") } + } + + // Past this point we can't return an error, because we've already added the message to the set. + set.msgs = append(set.msgs, msg) + + if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) { // We are being conservative here to avoid having to prep encode the record size += maximumRecordOverhead rec := &Record{ From 98976f5904947bda76763ef8635324f43181294b Mon Sep 17 00:00:00 2001 From: Vlad Gorodetsky Date: Wed, 15 Apr 2020 11:24:34 +0300 Subject: [PATCH 11/41] Add support for kafka 2.5.0 --- .github/workflows/ci.yml | 2 +- utils.go | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 46f015eca..f79a3274c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ jobs: fail-fast: false matrix: go-version: [1.12.x, 1.13.x, 1.14.x] - kafka-version: [2.3.1, 2.4.0] + kafka-version: [2.3.1, 2.4.1, 2.5.0] platform: [ubuntu-latest] env: diff --git a/utils.go b/utils.go index 75d0f606a..d138a5eb3 100644 --- a/utils.go +++ b/utils.go @@ -160,6 +160,7 @@ var ( V2_2_0_0 = newKafkaVersion(2, 2, 0, 0) V2_3_0_0 = newKafkaVersion(2, 3, 0, 0) V2_4_0_0 = newKafkaVersion(2, 4, 0, 0) + V2_5_0_0 = newKafkaVersion(2, 5, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -185,9 +186,10 @@ var ( V2_2_0_0, V2_3_0_0, V2_4_0_0, + V2_5_0_0, } MinVersion = V0_8_2_0 - MaxVersion = V2_4_0_0 + MaxVersion = V2_5_0_0 ) //ParseKafkaVersion parses and returns kafka version or error from a string From ca141913f2a03afc0ca77916c2e7932f284ae950 Mon Sep 17 00:00:00 2001 From: KJ Tsanaktsidis Date: Thu, 16 Apr 2020 17:13:44 +1000 Subject: [PATCH 12/41] Add a functional test to cover idempotent production --- functional_producer_test.go | 78 +++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/functional_producer_test.go b/functional_producer_test.go index 1fa0ba1c9..e589a8eb8 100644 --- a/functional_producer_test.go +++ b/functional_producer_test.go @@ -3,6 +3,7 @@ package sarama import ( "fmt" "os" + "strings" "sync" "testing" "time" @@ -96,6 +97,83 @@ func TestFuncProducingToInvalidTopic(t *testing.T) { safeClose(t, producer) } +func TestFuncProducingIdempotentWithBrokerFailure(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + config := NewConfig() + config.Producer.Flush.Frequency = 250 * time.Millisecond + config.Producer.Idempotent = true + config.Producer.Timeout = 500 * time.Millisecond + config.Producer.Retry.Max = 1 + config.Producer.Retry.Backoff = 500 * time.Millisecond + config.Producer.Return.Successes = true + config.Producer.Return.Errors = true + config.Producer.RequiredAcks = WaitForAll + config.Net.MaxOpenRequests = 1 + config.Version = V0_11_0_0 + + producer, err := NewSyncProducer(kafkaBrokers, config) + if err != nil { + t.Fatal(err) + } + defer safeClose(t, producer) + + // Successfully publish a few messages + for i := 0; i < 10; i++ { + _, _, err = producer.SendMessage(&ProducerMessage{ + Topic: "test.1", + Value: StringEncoder(fmt.Sprintf("%d message", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + // break the brokers. + for proxyName, proxy := range Proxies { + if !strings.Contains(proxyName, "kafka") { + continue + } + if err := proxy.Disable(); err != nil { + t.Fatal(err) + } + } + + // This should fail hard now + for i := 10; i < 20; i++ { + _, _, err = producer.SendMessage(&ProducerMessage{ + Topic: "test.1", + Value: StringEncoder(fmt.Sprintf("%d message", i)), + }) + if err == nil { + t.Fatal(err) + } + } + + // Now bring the proxy back up + for proxyName, proxy := range Proxies { + if !strings.Contains(proxyName, "kafka") { + continue + } + if err := proxy.Enable(); err != nil { + t.Fatal(err) + } + } + + // We should be able to publish again (once everything calms down) + // (otherwise it times out) + for { + _, _, err = producer.SendMessage(&ProducerMessage{ + Topic: "test.1", + Value: StringEncoder("comeback message"), + }) + if err == nil { + break + } + } +} + func testProducingMessages(t *testing.T, config *Config) { setupFunctionalTest(t) defer teardownFunctionalTest(t) From 31886822ab95ccf89a07ec6ce844590357ed073d Mon Sep 17 00:00:00 2001 From: Pierre Grimaud Date: Sun, 19 Apr 2020 02:45:33 +0200 Subject: [PATCH 13/41] Fix typos --- acl_create_response.go | 2 +- alter_configs_response.go | 4 ++-- alter_partition_reassignments_request_test.go | 6 +++--- api_versions_response.go | 2 +- broker.go | 6 +++--- broker_test.go | 2 +- client_test.go | 2 +- examples/consumergroup/main.go | 2 +- list_partition_reassignments_request_test.go | 2 +- record_test.go | 2 +- sasl_authenticate_response_test.go | 2 +- tools/kafka-console-producer/README.md | 2 +- 12 files changed, 17 insertions(+), 17 deletions(-) diff --git a/acl_create_response.go b/acl_create_response.go index bc018ed00..14b1b9e13 100644 --- a/acl_create_response.go +++ b/acl_create_response.go @@ -2,7 +2,7 @@ package sarama import "time" -//CreateAclsResponse is a an acl reponse creation type +//CreateAclsResponse is a an acl response creation type type CreateAclsResponse struct { ThrottleTime time.Duration AclCreationResponses []*AclCreationResponse diff --git a/alter_configs_response.go b/alter_configs_response.go index 99a526005..3266f9274 100644 --- a/alter_configs_response.go +++ b/alter_configs_response.go @@ -2,13 +2,13 @@ package sarama import "time" -//AlterConfigsResponse is a reponse type for alter config +//AlterConfigsResponse is a response type for alter config type AlterConfigsResponse struct { ThrottleTime time.Duration Resources []*AlterConfigsResourceResponse } -//AlterConfigsResourceResponse is a reponse type for alter config resource +//AlterConfigsResourceResponse is a response type for alter config resource type AlterConfigsResourceResponse struct { ErrorCode int16 ErrorMsg string diff --git a/alter_partition_reassignments_request_test.go b/alter_partition_reassignments_request_test.go index 8d282729d..c917f2d79 100644 --- a/alter_partition_reassignments_request_test.go +++ b/alter_partition_reassignments_request_test.go @@ -4,13 +4,13 @@ import "testing" var ( alterPartitionReassignmentsRequestNoBlock = []byte{ - 0, 0, 39, 16, // timout 10000 + 0, 0, 39, 16, // timeout 10000 1, // 1-1=0 blocks 0, // empty tagged fields } alterPartitionReassignmentsRequestOneBlock = []byte{ - 0, 0, 39, 16, // timout 10000 + 0, 0, 39, 16, // timeout 10000 2, // 2-1=1 block 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string 2, // 2-1=1 partitions @@ -22,7 +22,7 @@ var ( } alterPartitionReassignmentsAbortRequest = []byte{ - 0, 0, 39, 16, // timout 10000 + 0, 0, 39, 16, // timeout 10000 2, // 2-1=1 block 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string 2, // 2-1=1 partitions diff --git a/api_versions_response.go b/api_versions_response.go index 582e29db4..d09e8d9e1 100644 --- a/api_versions_response.go +++ b/api_versions_response.go @@ -1,6 +1,6 @@ package sarama -//ApiVersionsResponseBlock is an api version reponse block type +//ApiVersionsResponseBlock is an api version response block type type ApiVersionsResponseBlock struct { ApiKey int16 MinVersion int16 diff --git a/broker.go b/broker.go index 4f3991af7..90a1db981 100644 --- a/broker.go +++ b/broker.go @@ -73,7 +73,7 @@ const ( // server negotiate SASL by wrapping tokens with Kafka protocol headers. SASLHandshakeV1 = int16(1) // SASLExtKeyAuth is the reserved extension key name sent as part of the - // SASL/OAUTHBEARER intial client response + // SASL/OAUTHBEARER initial client response SASLExtKeyAuth = "auth" ) @@ -369,7 +369,7 @@ func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { return response, nil } -//CommitOffset return an Offset commit reponse or error +//CommitOffset return an Offset commit response or error func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { response := new(OffsetCommitResponse) @@ -1014,7 +1014,7 @@ func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int // When credentials are invalid, Kafka replies with a SaslAuthenticate response // containing an error code and message detailing the authentication failure. func (b *Broker) sendAndReceiveSASLPlainAuth() error { - // default to V0 to allow for backward compatability when SASL is enabled + // default to V0 to allow for backward compatibility when SASL is enabled // but not the handshake if b.conf.Net.SASL.Handshake { handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version) diff --git a/broker_test.go b/broker_test.go index e2b17462c..08fe77f7f 100644 --- a/broker_test.go +++ b/broker_test.go @@ -295,7 +295,7 @@ func TestSASLSCRAMSHAXXX(t *testing.T) { scramChallengeResp string }{ { - name: "SASL/SCRAMSHAXXX successfull authentication", + name: "SASL/SCRAMSHAXXX successful authentication", mockHandshakeErr: ErrNoError, scramClient: &MockSCRAMClient{}, scramChallengeResp: "pong", diff --git a/client_test.go b/client_test.go index cd47a500b..9eee454da 100644 --- a/client_test.go +++ b/client_test.go @@ -642,7 +642,7 @@ func TestClientController(t *testing.T) { } defer safeClose(t, client2) if _, err = client2.Controller(); err != ErrUnsupportedVersion { - t.Errorf("Expected Contoller() to return %s, found %s", ErrUnsupportedVersion, err) + t.Errorf("Expected Controller() to return %s, found %s", ErrUnsupportedVersion, err) } } diff --git a/examples/consumergroup/main.go b/examples/consumergroup/main.go index 776448f59..9a8b7cd8d 100644 --- a/examples/consumergroup/main.go +++ b/examples/consumergroup/main.go @@ -28,7 +28,7 @@ func init() { flag.StringVar(&brokers, "brokers", "", "Kafka bootstrap brokers to connect to, as a comma separated list") flag.StringVar(&group, "group", "", "Kafka consumer group definition") flag.StringVar(&version, "version", "2.1.1", "Kafka cluster version") - flag.StringVar(&topics, "topics", "", "Kafka topics to be consumed, as a comma seperated list") + flag.StringVar(&topics, "topics", "", "Kafka topics to be consumed, as a comma separated list") flag.StringVar(&assignor, "assignor", "range", "Consumer group partition assignment strategy (range, roundrobin, sticky)") flag.BoolVar(&oldest, "oldest", true, "Kafka consumer consume initial offset from oldest") flag.BoolVar(&verbose, "verbose", false, "Sarama logging") diff --git a/list_partition_reassignments_request_test.go b/list_partition_reassignments_request_test.go index d9f9f92ca..41a5b9cf1 100644 --- a/list_partition_reassignments_request_test.go +++ b/list_partition_reassignments_request_test.go @@ -4,7 +4,7 @@ import "testing" var ( listPartitionReassignmentsRequestOneBlock = []byte{ - 0, 0, 39, 16, // timout 10000 + 0, 0, 39, 16, // timeout 10000 2, // 2-1=1 block 6, 116, 111, 112, 105, 99, // topic name "topic" as compact string 2, // 2-1=1 partitions diff --git a/record_test.go b/record_test.go index 2756c5b25..1aceeda2c 100644 --- a/record_test.go +++ b/record_test.go @@ -283,7 +283,7 @@ func TestRecordBatchDecoding(t *testing.T) { r.length = varintLengthField{} } // The compression level is not restored on decoding. It is not needed - // anyway. We only set it here to ensure that comparision succeeds. + // anyway. We only set it here to ensure that comparison succeeds. batch.CompressionLevel = tc.batch.CompressionLevel if !reflect.DeepEqual(batch, tc.batch) { t.Errorf(spew.Sprintf("invalid decode of %s\ngot %+v\nwanted %+v", tc.name, batch, tc.batch)) diff --git a/sasl_authenticate_response_test.go b/sasl_authenticate_response_test.go index 04447966a..c555cfbfa 100644 --- a/sasl_authenticate_response_test.go +++ b/sasl_authenticate_response_test.go @@ -17,5 +17,5 @@ func TestSaslAuthenticateResponse(t *testing.T) { response.ErrorMessage = &msg response.SaslAuthBytes = []byte(`msg`) - testResponse(t, "authenticate reponse", response, saslAuthenticatResponseErr) + testResponse(t, "authenticate response", response, saslAuthenticatResponseErr) } diff --git a/tools/kafka-console-producer/README.md b/tools/kafka-console-producer/README.md index 6b3a65f21..7802b8bdf 100644 --- a/tools/kafka-console-producer/README.md +++ b/tools/kafka-console-producer/README.md @@ -25,7 +25,7 @@ A simple command line tool to produce a single message to Kafka. # Partitioning: by default, kafka-console-producer will partition as follows: # - manual partitioning if a -partition is provided # - hash partitioning by key if a -key is provided - # - random partioning otherwise. + # - random partitioning otherwise. # # You can override this using the -partitioner argument: echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random From fb37eafeb7e263413f61b5fa4812fc3b25628f02 Mon Sep 17 00:00:00 2001 From: aLeX Date: Wed, 11 Mar 2020 10:41:28 +0100 Subject: [PATCH 14/41] Handle errors with no message --- admin.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/admin.go b/admin.go index a7d733da1..fc08ff3c5 100644 --- a/admin.go +++ b/admin.go @@ -640,6 +640,9 @@ func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, if rspResource.ErrorMsg != "" { return nil, errors.New(rspResource.ErrorMsg) } + if rspResource.ErrorCode != 0 { + return nil, KError(rspResource.ErrorCode) + } for _, cfgEntry := range rspResource.Configs { entries = append(entries, *cfgEntry) } @@ -688,6 +691,9 @@ func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string if rspResource.ErrorMsg != "" { return errors.New(rspResource.ErrorMsg) } + if rspResource.ErrorCode != 0 { + return KError(rspResource.ErrorCode) + } } } return nil From c55e6a91be081c404a1b4239f5f42618fd7e0162 Mon Sep 17 00:00:00 2001 From: Alexandre Griffaut Date: Sun, 19 Apr 2020 14:52:43 +0200 Subject: [PATCH 15/41] AdminClient Test Handle errors with no message --- admin_test.go | 64 ++++++++++++++++++++++++++++++++++++++++++++++++ mockresponses.go | 48 ++++++++++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+) diff --git a/admin_test.go b/admin_test.go index bcec47f61..2902f6cdd 100644 --- a/admin_test.go +++ b/admin_test.go @@ -704,6 +704,39 @@ func TestClusterAdminDescribeConfig(t *testing.T) { } } +func TestClusterAdminDescribeConfigWithErrorCode(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + defer seedBroker.Close() + + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetController(seedBroker.BrokerID()). + SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), + "DescribeConfigsRequest": NewMockDescribeConfigsResponseWithErrorCode(t), + }) + + config := NewConfig() + config.Version = V1_1_0_0 + admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + defer func() { + _ = admin.Close() + }() + + resource := ConfigResource{ + Name: "r1", + Type: TopicResource, + ConfigNames: []string{"my_topic"}, + } + + _, err = admin.DescribeConfig(resource) + if err == nil { + t.Fatal(errors.New("ErrorCode present but no Error returned")) + } +} + // TestClusterAdminDescribeBrokerConfig ensures that a describe broker config // is sent to the broker in the resource struct, _not_ the controller func TestClusterAdminDescribeBrokerConfig(t *testing.T) { @@ -789,6 +822,37 @@ func TestClusterAdminAlterConfig(t *testing.T) { } } +func TestClusterAdminAlterConfigWithErrorCode(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + defer seedBroker.Close() + + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetController(seedBroker.BrokerID()). + SetBroker(seedBroker.Addr(), seedBroker.BrokerID()), + "AlterConfigsRequest": NewMockAlterConfigsResponseWithErrorCode(t), + }) + + config := NewConfig() + config.Version = V1_0_0_0 + admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + defer func() { + _ = admin.Close() + }() + + var value string + entries := make(map[string]*string) + value = "60000" + entries["retention.ms"] = &value + err = admin.AlterConfig(TopicResource, "my_topic", entries, false) + if err == nil { + t.Fatal(errors.New("ErrorCode present but no Error returned")) + } +} + func TestClusterAdminAlterBrokerConfig(t *testing.T) { controllerBroker := NewMockBroker(t, 1) defer controllerBroker.Close() diff --git a/mockresponses.go b/mockresponses.go index e77463a58..a9eb0376c 100644 --- a/mockresponses.go +++ b/mockresponses.go @@ -853,6 +853,31 @@ func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWith return res } +type MockDescribeConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode { + return &MockDescribeConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*DescribeConfigsRequest) + res := &DescribeConfigsResponse{ + Version: req.Version, + } + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &ResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + type MockAlterConfigsResponse struct { t TestReporter } @@ -874,6 +899,29 @@ func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHea return res } +type MockAlterConfigsResponseWithErrorCode struct { + t TestReporter +} + +func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode { + return &MockAlterConfigsResponseWithErrorCode{t: t} +} + +func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*AlterConfigsRequest) + res := &AlterConfigsResponse{} + + for _, r := range req.Resources { + res.Resources = append(res.Resources, &AlterConfigsResourceResponse{ + Name: r.Name, + Type: r.Type, + ErrorCode: 83, + ErrorMsg: "", + }) + } + return res +} + type MockCreateAclsResponse struct { t TestReporter } From 01ca5be4412be156c86044766287eccf96ffacf3 Mon Sep 17 00:00:00 2001 From: Ruben Vargas Date: Thu, 22 Aug 2019 09:50:27 -0500 Subject: [PATCH 16/41] Expose kerberos fast negotiation configuration Signed-off-by: Ruben --- gssapi_kerberos.go | 1 + kerberos_client.go | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/gssapi_kerberos.go b/gssapi_kerberos.go index 98be0f508..f6877c8a2 100644 --- a/gssapi_kerberos.go +++ b/gssapi_kerberos.go @@ -34,6 +34,7 @@ type GSSAPIConfig struct { Username string Password string Realm string + DisablePAFXFAST bool } type GSSAPIKerberosAuth struct { diff --git a/kerberos_client.go b/kerberos_client.go index 91b998f5d..0394182da 100644 --- a/kerberos_client.go +++ b/kerberos_client.go @@ -42,10 +42,10 @@ func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, if err != nil { return nil, err } - client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg) + client = krb5client.NewClientWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) } else { client = krb5client.NewClientWithPassword(config.Username, - config.Realm, config.Password, cfg) + config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST)) } return &KerberosGoKrb5Client{*client}, nil } From dd2a9df207d52e83dd68b9d7ce127fb8ea0380ba Mon Sep 17 00:00:00 2001 From: Ruben Vargas Date: Mon, 20 Apr 2020 16:46:49 -0500 Subject: [PATCH 17/41] Formar NewKerberosClient comments acording to golang standad, added test for DisablePAFXFast Signed-off-by: Ruben --- kerberos_client.go | 11 +++-------- kerberos_client_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/kerberos_client.go b/kerberos_client.go index 0394182da..ebc114179 100644 --- a/kerberos_client.go +++ b/kerberos_client.go @@ -19,14 +19,9 @@ func (c *KerberosGoKrb5Client) CName() types.PrincipalName { return c.Credentials.CName() } -/* -* -* Create kerberos client used to obtain TGT and TGS tokens -* used gokrb5 library, which is a pure go kerberos client with -* some GSS-API capabilities, and SPNEGO support. Kafka does not use SPNEGO -* it uses pure Kerberos 5 solution (RFC-4121 and RFC-4120). -* - */ +// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens. +// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120). +// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities. func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) { cfg, err := krb5config.Load(config.KerberosConfigPath) if err != nil { diff --git a/kerberos_client_test.go b/kerberos_client_test.go index 8af0e17ca..003da6a3e 100644 --- a/kerberos_client_test.go +++ b/kerberos_client_test.go @@ -84,3 +84,27 @@ func TestCreateWithKeyTab(t *testing.T) { t.Errorf("Expected error:%s, got:%s.", err, expectedErr) } } + +func TestCreateWithDisablePAFXFAST(t *testing.T) { + kerberosConfig, err := krbcfg.NewConfigFromString(testdata.TEST_KRB5CONF) + if err != nil { + t.Fatal(err) + } + // Expect to try to create a client with keytab and fails with "o such file or directory" error + expectedErr := errors.New("open nonexist.keytab: no such file or directory") + clientConfig := NewConfig() + clientConfig.Net.SASL.Mechanism = SASLTypeGSSAPI + clientConfig.Net.SASL.Enable = true + clientConfig.Net.SASL.GSSAPI.ServiceName = "kafka" + clientConfig.Net.SASL.GSSAPI.Realm = "EXAMPLE.COM" + clientConfig.Net.SASL.GSSAPI.Username = "client" + clientConfig.Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH + clientConfig.Net.SASL.GSSAPI.KeyTabPath = "nonexist.keytab" + clientConfig.Net.SASL.GSSAPI.KerberosConfigPath = "/etc/krb5.conf" + clientConfig.Net.SASL.GSSAPI.DisablePAFXFAST = true + + _, err = createClient(&clientConfig.Net.SASL.GSSAPI, kerberosConfig) + if err.Error() != expectedErr.Error() { + t.Errorf("Expected error:%s, got:%s.", err, expectedErr) + } +} From c6eb1d42e7342efbe9f7074add2efad3d0de5bd1 Mon Sep 17 00:00:00 2001 From: Kevin Cross <7290403+KevinJCross@users.noreply.github.com> Date: Tue, 5 May 2020 14:21:22 +0100 Subject: [PATCH 18/41] fix: Allow TLS to work over socks proxy. (#1666) --- broker.go | 21 +- broker_test.go | 560 +++++++++++++++++++++++---------------------- client_tls_test.go | 33 +-- config.go | 13 ++ 4 files changed, 327 insertions(+), 300 deletions(-) diff --git a/broker.go b/broker.go index 90a1db981..f379dca62 100644 --- a/broker.go +++ b/broker.go @@ -154,25 +154,20 @@ func (b *Broker) Open(conf *Config) error { go withRecover(func() { defer b.lock.Unlock() - dialer := net.Dialer{ - Timeout: conf.Net.DialTimeout, - KeepAlive: conf.Net.KeepAlive, - LocalAddr: conf.Net.LocalAddr, - } - - if conf.Net.TLS.Enable { - b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) - } else if conf.Net.Proxy.Enable { - b.conn, b.connErr = conf.Net.Proxy.Dialer.Dial("tcp", b.addr) - } else { - b.conn, b.connErr = dialer.Dial("tcp", b.addr) - } + dialer := conf.getDialer() + b.conn, b.connErr = dialer.Dial("tcp", b.addr) if b.connErr != nil { Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) b.conn = nil atomic.StoreInt32(&b.opened, 0) return } + + if conf.Net.TLS.Enable { + Logger.Printf("Using tls") + b.conn = tls.Client(b.conn, conf.Net.TLS.Config) + } + b.conn = newBufConn(b.conn) b.conf = conf diff --git a/broker_test.go b/broker_test.go index 08fe77f7f..c7d548600 100644 --- a/broker_test.go +++ b/broker_test.go @@ -80,38 +80,40 @@ func TestBrokerAccessors(t *testing.T) { func TestSimpleBrokerCommunication(t *testing.T) { for _, tt := range brokerTestTable { - Logger.Printf("Testing broker communication for %s", tt.name) - mb := NewMockBroker(t, 0) - mb.Returns(&mockEncoder{tt.response}) - pendingNotify := make(chan brokerMetrics) - // Register a callback to be notified about successful requests - mb.SetNotifier(func(bytesRead, bytesWritten int) { - pendingNotify <- brokerMetrics{bytesRead, bytesWritten} + t.Run(tt.name, func(t *testing.T) { + Logger.Printf("Testing broker communication for %s", tt.name) + mb := NewMockBroker(t, 0) + mb.Returns(&mockEncoder{tt.response}) + pendingNotify := make(chan brokerMetrics) + // Register a callback to be notified about successful requests + mb.SetNotifier(func(bytesRead, bytesWritten int) { + pendingNotify <- brokerMetrics{bytesRead, bytesWritten} + }) + broker := NewBroker(mb.Addr()) + // Set the broker id in order to validate local broker metrics + broker.id = 0 + conf := NewConfig() + conf.Version = tt.version + err := broker.Open(conf) + if err != nil { + t.Fatal(err) + } + tt.runner(t, broker) + // Wait up to 500 ms for the remote broker to process the request and + // notify us about the metrics + timeout := 500 * time.Millisecond + select { + case mockBrokerMetrics := <-pendingNotify: + validateBrokerMetrics(t, broker, mockBrokerMetrics) + case <-time.After(timeout): + t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout) + } + mb.Close() + err = broker.Close() + if err != nil { + t.Error(err) + } }) - broker := NewBroker(mb.Addr()) - // Set the broker id in order to validate local broker metrics - broker.id = 0 - conf := NewConfig() - conf.Version = tt.version - err := broker.Open(conf) - if err != nil { - t.Fatal(err) - } - tt.runner(t, broker) - // Wait up to 500 ms for the remote broker to process the request and - // notify us about the metrics - timeout := 500 * time.Millisecond - select { - case mockBrokerMetrics := <-pendingNotify: - validateBrokerMetrics(t, broker, mockBrokerMetrics) - case <-time.After(timeout): - t.Errorf("No request received for: %s after waiting for %v", tt.name, timeout) - } - mb.Close() - err = broker.Close() - if err != nil { - t.Error(err) - } } } @@ -204,58 +206,60 @@ func TestSASLOAuthBearer(t *testing.T) { } for i, test := range testTable { - // mockBroker mocks underlying network logic and broker responses - mockBroker := NewMockBroker(t, 0) + t.Run(test.name, func(t *testing.T) { + // mockBroker mocks underlying network logic and broker responses + mockBroker := NewMockBroker(t, 0) - mockBroker.SetHandlerByMap(map[string]MockResponse{ - "SaslAuthenticateRequest": test.mockSASLAuthResponse, - "SaslHandshakeRequest": test.mockSASLHandshakeResponse, - }) + mockBroker.SetHandlerByMap(map[string]MockResponse{ + "SaslAuthenticateRequest": test.mockSASLAuthResponse, + "SaslHandshakeRequest": test.mockSASLHandshakeResponse, + }) - // broker executes SASL requests against mockBroker - broker := NewBroker(mockBroker.Addr()) - broker.requestRate = metrics.NilMeter{} - broker.outgoingByteRate = metrics.NilMeter{} - broker.incomingByteRate = metrics.NilMeter{} - broker.requestSize = metrics.NilHistogram{} - broker.responseSize = metrics.NilHistogram{} - broker.responseRate = metrics.NilMeter{} - broker.requestLatency = metrics.NilHistogram{} - broker.requestsInFlight = metrics.NilCounter{} + // broker executes SASL requests against mockBroker + broker := NewBroker(mockBroker.Addr()) + broker.requestRate = metrics.NilMeter{} + broker.outgoingByteRate = metrics.NilMeter{} + broker.incomingByteRate = metrics.NilMeter{} + broker.requestSize = metrics.NilHistogram{} + broker.responseSize = metrics.NilHistogram{} + broker.responseRate = metrics.NilMeter{} + broker.requestLatency = metrics.NilHistogram{} + broker.requestsInFlight = metrics.NilCounter{} - conf := NewConfig() - conf.Net.SASL.Mechanism = SASLTypeOAuth - conf.Net.SASL.TokenProvider = test.tokProvider + conf := NewConfig() + conf.Net.SASL.Mechanism = SASLTypeOAuth + conf.Net.SASL.TokenProvider = test.tokProvider - broker.conf = conf + broker.conf = conf - dialer := net.Dialer{ - Timeout: conf.Net.DialTimeout, - KeepAlive: conf.Net.KeepAlive, - LocalAddr: conf.Net.LocalAddr, - } + dialer := net.Dialer{ + Timeout: conf.Net.DialTimeout, + KeepAlive: conf.Net.KeepAlive, + LocalAddr: conf.Net.LocalAddr, + } - conn, err := dialer.Dial("tcp", mockBroker.listener.Addr().String()) + conn, err := dialer.Dial("tcp", mockBroker.listener.Addr().String()) - if err != nil { - t.Fatal(err) - } + if err != nil { + t.Fatal(err) + } - broker.conn = conn + broker.conn = conn - err = broker.authenticateViaSASL() + err = broker.authenticateViaSASL() - if test.expectedBrokerError != ErrNoError { - if test.expectedBrokerError != err { - t.Errorf("[%d]:[%s] Expected %s auth error, got %s\n", i, test.name, test.expectedBrokerError, err) + if test.expectedBrokerError != ErrNoError { + if test.expectedBrokerError != err { + t.Errorf("[%d]:[%s] Expected %s auth error, got %s\n", i, test.name, test.expectedBrokerError, err) + } + } else if test.expectClientErr && err == nil { + t.Errorf("[%d]:[%s] Expected a client error and got none\n", i, test.name) + } else if !test.expectClientErr && err != nil { + t.Errorf("[%d]:[%s] Unexpected error, got %s\n", i, test.name, err) } - } else if test.expectClientErr && err == nil { - t.Errorf("[%d]:[%s] Expected a client error and got none\n", i, test.name) - } else if !test.expectClientErr && err != nil { - t.Errorf("[%d]:[%s] Unexpected error, got %s\n", i, test.name, err) - } - mockBroker.Close() + mockBroker.Close() + }) } } @@ -264,7 +268,7 @@ type MockSCRAMClient struct { done bool } -func (m *MockSCRAMClient) Begin(userName, password, authzID string) (err error) { +func (m *MockSCRAMClient) Begin(_, _, _ string) (err error) { return nil } @@ -325,70 +329,72 @@ func TestSASLSCRAMSHAXXX(t *testing.T) { } for i, test := range testTable { - // mockBroker mocks underlying network logic and broker responses - mockBroker := NewMockBroker(t, 0) - broker := NewBroker(mockBroker.Addr()) - // broker executes SASL requests against mockBroker - broker.requestRate = metrics.NilMeter{} - broker.outgoingByteRate = metrics.NilMeter{} - broker.incomingByteRate = metrics.NilMeter{} - broker.requestSize = metrics.NilHistogram{} - broker.responseSize = metrics.NilHistogram{} - broker.responseRate = metrics.NilMeter{} - broker.requestLatency = metrics.NilHistogram{} - broker.requestsInFlight = metrics.NilCounter{} + t.Run(test.name, func(t *testing.T) { + // mockBroker mocks underlying network logic and broker responses + mockBroker := NewMockBroker(t, 0) + broker := NewBroker(mockBroker.Addr()) + // broker executes SASL requests against mockBroker + broker.requestRate = metrics.NilMeter{} + broker.outgoingByteRate = metrics.NilMeter{} + broker.incomingByteRate = metrics.NilMeter{} + broker.requestSize = metrics.NilHistogram{} + broker.responseSize = metrics.NilHistogram{} + broker.responseRate = metrics.NilMeter{} + broker.requestLatency = metrics.NilHistogram{} + broker.requestsInFlight = metrics.NilCounter{} - mockSASLAuthResponse := NewMockSaslAuthenticateResponse(t).SetAuthBytes([]byte(test.scramChallengeResp)) - mockSASLHandshakeResponse := NewMockSaslHandshakeResponse(t).SetEnabledMechanisms([]string{SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512}) + mockSASLAuthResponse := NewMockSaslAuthenticateResponse(t).SetAuthBytes([]byte(test.scramChallengeResp)) + mockSASLHandshakeResponse := NewMockSaslHandshakeResponse(t).SetEnabledMechanisms([]string{SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512}) - if test.mockSASLAuthErr != ErrNoError { - mockSASLAuthResponse = mockSASLAuthResponse.SetError(test.mockSASLAuthErr) - } - if test.mockHandshakeErr != ErrNoError { - mockSASLHandshakeResponse = mockSASLHandshakeResponse.SetError(test.mockHandshakeErr) - } + if test.mockSASLAuthErr != ErrNoError { + mockSASLAuthResponse = mockSASLAuthResponse.SetError(test.mockSASLAuthErr) + } + if test.mockHandshakeErr != ErrNoError { + mockSASLHandshakeResponse = mockSASLHandshakeResponse.SetError(test.mockHandshakeErr) + } - mockBroker.SetHandlerByMap(map[string]MockResponse{ - "SaslAuthenticateRequest": mockSASLAuthResponse, - "SaslHandshakeRequest": mockSASLHandshakeResponse, - }) + mockBroker.SetHandlerByMap(map[string]MockResponse{ + "SaslAuthenticateRequest": mockSASLAuthResponse, + "SaslHandshakeRequest": mockSASLHandshakeResponse, + }) - conf := NewConfig() - conf.Net.SASL.Mechanism = SASLTypeSCRAMSHA512 - conf.Net.SASL.SCRAMClientGeneratorFunc = func() SCRAMClient { return test.scramClient } + conf := NewConfig() + conf.Net.SASL.Mechanism = SASLTypeSCRAMSHA512 + conf.Net.SASL.SCRAMClientGeneratorFunc = func() SCRAMClient { return test.scramClient } - broker.conf = conf - dialer := net.Dialer{ - Timeout: conf.Net.DialTimeout, - KeepAlive: conf.Net.KeepAlive, - LocalAddr: conf.Net.LocalAddr, - } + broker.conf = conf + dialer := net.Dialer{ + Timeout: conf.Net.DialTimeout, + KeepAlive: conf.Net.KeepAlive, + LocalAddr: conf.Net.LocalAddr, + } - conn, err := dialer.Dial("tcp", mockBroker.listener.Addr().String()) + conn, err := dialer.Dial("tcp", mockBroker.listener.Addr().String()) - if err != nil { - t.Fatal(err) - } + if err != nil { + t.Fatal(err) + } - broker.conn = conn + broker.conn = conn - err = broker.authenticateViaSASL() + err = broker.authenticateViaSASL() - if test.mockSASLAuthErr != ErrNoError { - if test.mockSASLAuthErr != err { - t.Errorf("[%d]:[%s] Expected %s SASL authentication error, got %s\n", i, test.name, test.mockHandshakeErr, err) - } - } else if test.mockHandshakeErr != ErrNoError { - if test.mockHandshakeErr != err { - t.Errorf("[%d]:[%s] Expected %s handshake error, got %s\n", i, test.name, test.mockHandshakeErr, err) + if test.mockSASLAuthErr != ErrNoError { + if test.mockSASLAuthErr != err { + t.Errorf("[%d]:[%s] Expected %s SASL authentication error, got %s\n", i, test.name, test.mockHandshakeErr, err) + } + } else if test.mockHandshakeErr != ErrNoError { + if test.mockHandshakeErr != err { + t.Errorf("[%d]:[%s] Expected %s handshake error, got %s\n", i, test.name, test.mockHandshakeErr, err) + } + } else if test.expectClientErr && err == nil { + t.Errorf("[%d]:[%s] Expected a client error and got none\n", i, test.name) + } else if !test.expectClientErr && err != nil { + t.Errorf("[%d]:[%s] Unexpected error, got %s\n", i, test.name, err) } - } else if test.expectClientErr && err == nil { - t.Errorf("[%d]:[%s] Expected a client error and got none\n", i, test.name) - } else if !test.expectClientErr && err != nil { - t.Errorf("[%d]:[%s] Unexpected error, got %s\n", i, test.name, err) - } - mockBroker.Close() + mockBroker.Close() + }) } } @@ -424,96 +430,98 @@ func TestSASLPlainAuth(t *testing.T) { } for i, test := range testTable { - // mockBroker mocks underlying network logic and broker responses - mockBroker := NewMockBroker(t, 0) + t.Run(test.name, func(t *testing.T) { + // mockBroker mocks underlying network logic and broker responses + mockBroker := NewMockBroker(t, 0) - mockSASLAuthResponse := NewMockSaslAuthenticateResponse(t). - SetAuthBytes([]byte(`response_payload`)) + mockSASLAuthResponse := NewMockSaslAuthenticateResponse(t). + SetAuthBytes([]byte(`response_payload`)) - if test.mockAuthErr != ErrNoError { - mockSASLAuthResponse = mockSASLAuthResponse.SetError(test.mockAuthErr) - } + if test.mockAuthErr != ErrNoError { + mockSASLAuthResponse = mockSASLAuthResponse.SetError(test.mockAuthErr) + } - mockSASLHandshakeResponse := NewMockSaslHandshakeResponse(t). - SetEnabledMechanisms([]string{SASLTypePlaintext}) + mockSASLHandshakeResponse := NewMockSaslHandshakeResponse(t). + SetEnabledMechanisms([]string{SASLTypePlaintext}) - if test.mockHandshakeErr != ErrNoError { - mockSASLHandshakeResponse = mockSASLHandshakeResponse.SetError(test.mockHandshakeErr) - } + if test.mockHandshakeErr != ErrNoError { + mockSASLHandshakeResponse = mockSASLHandshakeResponse.SetError(test.mockHandshakeErr) + } - mockBroker.SetHandlerByMap(map[string]MockResponse{ - "SaslAuthenticateRequest": mockSASLAuthResponse, - "SaslHandshakeRequest": mockSASLHandshakeResponse, - }) + mockBroker.SetHandlerByMap(map[string]MockResponse{ + "SaslAuthenticateRequest": mockSASLAuthResponse, + "SaslHandshakeRequest": mockSASLHandshakeResponse, + }) - // broker executes SASL requests against mockBroker - broker := NewBroker(mockBroker.Addr()) - broker.requestRate = metrics.NilMeter{} - broker.outgoingByteRate = metrics.NilMeter{} - broker.incomingByteRate = metrics.NilMeter{} - broker.requestSize = metrics.NilHistogram{} - broker.responseSize = metrics.NilHistogram{} - broker.responseRate = metrics.NilMeter{} - broker.requestLatency = metrics.NilHistogram{} - broker.requestsInFlight = metrics.NilCounter{} + // broker executes SASL requests against mockBroker + broker := NewBroker(mockBroker.Addr()) + broker.requestRate = metrics.NilMeter{} + broker.outgoingByteRate = metrics.NilMeter{} + broker.incomingByteRate = metrics.NilMeter{} + broker.requestSize = metrics.NilHistogram{} + broker.responseSize = metrics.NilHistogram{} + broker.responseRate = metrics.NilMeter{} + broker.requestLatency = metrics.NilHistogram{} + broker.requestsInFlight = metrics.NilCounter{} - conf := NewConfig() - conf.Net.SASL.Mechanism = SASLTypePlaintext - conf.Net.SASL.AuthIdentity = test.authidentity - conf.Net.SASL.User = "token" - conf.Net.SASL.Password = "password" - conf.Net.SASL.Version = SASLHandshakeV1 + conf := NewConfig() + conf.Net.SASL.Mechanism = SASLTypePlaintext + conf.Net.SASL.AuthIdentity = test.authidentity + conf.Net.SASL.User = "token" + conf.Net.SASL.Password = "password" + conf.Net.SASL.Version = SASLHandshakeV1 - broker.conf = conf - broker.conf.Version = V1_0_0_0 - dialer := net.Dialer{ - Timeout: conf.Net.DialTimeout, - KeepAlive: conf.Net.KeepAlive, - LocalAddr: conf.Net.LocalAddr, - } - - conn, err := dialer.Dial("tcp", mockBroker.listener.Addr().String()) - - if err != nil { - t.Fatal(err) - } - - broker.conn = conn - - err = broker.authenticateViaSASL() - if err == nil { - for _, rr := range mockBroker.History() { - switch r := rr.Request.(type) { - case *SaslAuthenticateRequest: - x := bytes.SplitN(r.SaslAuthBytes, []byte("\x00"), 3) - if string(x[0]) != conf.Net.SASL.AuthIdentity { - t.Errorf("[%d]:[%s] expected %s auth identity, got %s\n", i, test.name, conf.Net.SASL.AuthIdentity, x[0]) - } - if string(x[1]) != conf.Net.SASL.User { - t.Errorf("[%d]:[%s] expected %s user, got %s\n", i, test.name, conf.Net.SASL.User, x[1]) - } - if string(x[2]) != conf.Net.SASL.Password { - t.Errorf("[%d]:[%s] expected %s password, got %s\n", i, test.name, conf.Net.SASL.Password, x[2]) + broker.conf = conf + broker.conf.Version = V1_0_0_0 + dialer := net.Dialer{ + Timeout: conf.Net.DialTimeout, + KeepAlive: conf.Net.KeepAlive, + LocalAddr: conf.Net.LocalAddr, + } + + conn, err := dialer.Dial("tcp", mockBroker.listener.Addr().String()) + + if err != nil { + t.Fatal(err) + } + + broker.conn = conn + + err = broker.authenticateViaSASL() + if err == nil { + for _, rr := range mockBroker.History() { + switch r := rr.Request.(type) { + case *SaslAuthenticateRequest: + x := bytes.SplitN(r.SaslAuthBytes, []byte("\x00"), 3) + if string(x[0]) != conf.Net.SASL.AuthIdentity { + t.Errorf("[%d]:[%s] expected %s auth identity, got %s\n", i, test.name, conf.Net.SASL.AuthIdentity, x[0]) + } + if string(x[1]) != conf.Net.SASL.User { + t.Errorf("[%d]:[%s] expected %s user, got %s\n", i, test.name, conf.Net.SASL.User, x[1]) + } + if string(x[2]) != conf.Net.SASL.Password { + t.Errorf("[%d]:[%s] expected %s password, got %s\n", i, test.name, conf.Net.SASL.Password, x[2]) + } } } } - } - if test.mockAuthErr != ErrNoError { - if test.mockAuthErr != err { - t.Errorf("[%d]:[%s] Expected %s auth error, got %s\n", i, test.name, test.mockAuthErr, err) - } - } else if test.mockHandshakeErr != ErrNoError { - if test.mockHandshakeErr != err { - t.Errorf("[%d]:[%s] Expected %s handshake error, got %s\n", i, test.name, test.mockHandshakeErr, err) + if test.mockAuthErr != ErrNoError { + if test.mockAuthErr != err { + t.Errorf("[%d]:[%s] Expected %s auth error, got %s\n", i, test.name, test.mockAuthErr, err) + } + } else if test.mockHandshakeErr != ErrNoError { + if test.mockHandshakeErr != err { + t.Errorf("[%d]:[%s] Expected %s handshake error, got %s\n", i, test.name, test.mockHandshakeErr, err) + } + } else if test.expectClientErr && err == nil { + t.Errorf("[%d]:[%s] Expected a client error and got none\n", i, test.name) + } else if !test.expectClientErr && err != nil { + t.Errorf("[%d]:[%s] Unexpected error, got %s\n", i, test.name, err) } - } else if test.expectClientErr && err == nil { - t.Errorf("[%d]:[%s] Expected a client error and got none\n", i, test.name) - } else if !test.expectClientErr && err != nil { - t.Errorf("[%d]:[%s] Unexpected error, got %s\n", i, test.name, err) - } - mockBroker.Close() + mockBroker.Close() + }) } } @@ -616,73 +624,75 @@ func TestGSSAPIKerberosAuth_Authorize(t *testing.T) { }, } for i, test := range testTable { - mockBroker := NewMockBroker(t, 0) - // broker executes SASL requests against mockBroker + t.Run(test.name, func(t *testing.T) { + mockBroker := NewMockBroker(t, 0) + // broker executes SASL requests against mockBroker + + mockBroker.SetGSSAPIHandler(func(bytes []byte) []byte { + return nil + }) + broker := NewBroker(mockBroker.Addr()) + broker.requestRate = metrics.NilMeter{} + broker.outgoingByteRate = metrics.NilMeter{} + broker.incomingByteRate = metrics.NilMeter{} + broker.requestSize = metrics.NilHistogram{} + broker.responseSize = metrics.NilHistogram{} + broker.responseRate = metrics.NilMeter{} + broker.requestLatency = metrics.NilHistogram{} + broker.requestsInFlight = metrics.NilCounter{} + conf := NewConfig() + conf.Net.SASL.Mechanism = SASLTypeGSSAPI + conf.Net.SASL.GSSAPI.ServiceName = "kafka" + conf.Net.SASL.GSSAPI.KerberosConfigPath = "krb5.conf" + conf.Net.SASL.GSSAPI.Realm = "EXAMPLE.COM" + conf.Net.SASL.GSSAPI.Username = "kafka" + conf.Net.SASL.GSSAPI.Password = "kafka" + conf.Net.SASL.GSSAPI.KeyTabPath = "kafka.keytab" + conf.Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH + broker.conf = conf + broker.conf.Version = V1_0_0_0 + dialer := net.Dialer{ + Timeout: conf.Net.DialTimeout, + KeepAlive: conf.Net.KeepAlive, + LocalAddr: conf.Net.LocalAddr, + } + + conn, err := dialer.Dial("tcp", mockBroker.listener.Addr().String()) - mockBroker.SetGSSAPIHandler(func(bytes []byte) []byte { - return nil - }) - broker := NewBroker(mockBroker.Addr()) - broker.requestRate = metrics.NilMeter{} - broker.outgoingByteRate = metrics.NilMeter{} - broker.incomingByteRate = metrics.NilMeter{} - broker.requestSize = metrics.NilHistogram{} - broker.responseSize = metrics.NilHistogram{} - broker.responseRate = metrics.NilMeter{} - broker.requestLatency = metrics.NilHistogram{} - broker.requestsInFlight = metrics.NilCounter{} - conf := NewConfig() - conf.Net.SASL.Mechanism = SASLTypeGSSAPI - conf.Net.SASL.GSSAPI.ServiceName = "kafka" - conf.Net.SASL.GSSAPI.KerberosConfigPath = "krb5.conf" - conf.Net.SASL.GSSAPI.Realm = "EXAMPLE.COM" - conf.Net.SASL.GSSAPI.Username = "kafka" - conf.Net.SASL.GSSAPI.Password = "kafka" - conf.Net.SASL.GSSAPI.KeyTabPath = "kafka.keytab" - conf.Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH - broker.conf = conf - broker.conf.Version = V1_0_0_0 - dialer := net.Dialer{ - Timeout: conf.Net.DialTimeout, - KeepAlive: conf.Net.KeepAlive, - LocalAddr: conf.Net.LocalAddr, - } - - conn, err := dialer.Dial("tcp", mockBroker.listener.Addr().String()) - - if err != nil { - t.Fatal(err) - } - - gssapiHandler := KafkaGSSAPIHandler{ - client: &MockKerberosClient{}, - badResponse: test.badResponse, - badKeyChecksum: test.badKeyChecksum, - } - mockBroker.SetGSSAPIHandler(gssapiHandler.MockKafkaGSSAPI) - broker.conn = conn - if test.mockKerberosClient { - broker.kerberosAuthenticator.NewKerberosClientFunc = func(config *GSSAPIConfig) (KerberosClient, error) { - return &MockKerberosClient{ - mockError: test.error, - errorStage: test.errorStage, - }, nil - } - } else { - broker.kerberosAuthenticator.NewKerberosClientFunc = nil - } - - err = broker.authenticateViaSASL() - - if err != nil && test.error != nil { - if test.error.Error() != err.Error() { + if err != nil { + t.Fatal(err) + } + + gssapiHandler := KafkaGSSAPIHandler{ + client: &MockKerberosClient{}, + badResponse: test.badResponse, + badKeyChecksum: test.badKeyChecksum, + } + mockBroker.SetGSSAPIHandler(gssapiHandler.MockKafkaGSSAPI) + broker.conn = conn + if test.mockKerberosClient { + broker.kerberosAuthenticator.NewKerberosClientFunc = func(config *GSSAPIConfig) (KerberosClient, error) { + return &MockKerberosClient{ + mockError: test.error, + errorStage: test.errorStage, + }, nil + } + } else { + broker.kerberosAuthenticator.NewKerberosClientFunc = nil + } + + err = broker.authenticateViaSASL() + + if err != nil && test.error != nil { + if test.error.Error() != err.Error() { + t.Errorf("[%d] Expected error:%s, got:%s.", i, test.error, err) + } + } else if (err == nil && test.error != nil) || (err != nil && test.error == nil) { t.Errorf("[%d] Expected error:%s, got:%s.", i, test.error, err) } - } else if (err == nil && test.error != nil) || (err != nil && test.error == nil) { - t.Errorf("[%d] Expected error:%s, got:%s.", i, test.error, err) - } - mockBroker.Close() + mockBroker.Close() + }) } } @@ -723,17 +733,19 @@ func TestBuildClientFirstMessage(t *testing.T) { } for i, test := range testTable { - actual, err := buildClientFirstMessage(test.token) - - if !reflect.DeepEqual(test.expected, actual) { - t.Errorf("Expected %s, got %s\n", test.expected, actual) - } - if test.expectError && err == nil { - t.Errorf("[%d]:[%s] Expected an error but did not get one", i, test.name) - } - if !test.expectError && err != nil { - t.Errorf("[%d]:[%s] Expected no error but got %s\n", i, test.name, err) - } + t.Run(test.name, func(t *testing.T) { + actual, err := buildClientFirstMessage(test.token) + + if !reflect.DeepEqual(test.expected, actual) { + t.Errorf("Expected %s, got %s\n", test.expected, actual) + } + if test.expectError && err == nil { + t.Errorf("[%d]:[%s] Expected an error but did not get one", i, test.name) + } + if !test.expectError && err != nil { + t.Errorf("[%d]:[%s] Expected no error but got %s\n", i, test.name, err) + } + }) } } diff --git a/client_tls_test.go b/client_tls_test.go index 0e47e17c2..e36612705 100644 --- a/client_tls_test.go +++ b/client_tls_test.go @@ -1,16 +1,15 @@ package sarama import ( - "math/big" - "net" - "testing" - "time" - "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" "crypto/x509/pkix" + "math/big" + "net" + "testing" + "time" ) func TestTLS(t *testing.T) { @@ -95,10 +94,12 @@ func TestTLS(t *testing.T) { } for _, tc := range []struct { + name string Succeed bool Server, Client *tls.Config }{ - { // Verify client fails if wrong CA cert pool is specified + { + name: "Verify client fails if wrong CA cert pool is specified", Succeed: false, Server: serverTLSConfig, Client: &tls.Config{ @@ -109,7 +110,8 @@ func TestTLS(t *testing.T) { }}, }, }, - { // Verify client fails if wrong key is specified + { + name: "Verify client fails if wrong key is specified", Succeed: false, Server: serverTLSConfig, Client: &tls.Config{ @@ -120,7 +122,8 @@ func TestTLS(t *testing.T) { }}, }, }, - { // Verify client fails if wrong cert is specified + { + name: "Verify client fails if wrong cert is specified", Succeed: false, Server: serverTLSConfig, Client: &tls.Config{ @@ -131,7 +134,8 @@ func TestTLS(t *testing.T) { }}, }, }, - { // Verify client fails if no CAs are specified + { + name: "Verify client fails if no CAs are specified", Succeed: false, Server: serverTLSConfig, Client: &tls.Config{ @@ -141,18 +145,21 @@ func TestTLS(t *testing.T) { }}, }, }, - { // Verify client fails if no keys are specified + { + name: "Verify client fails if no keys are specified", Succeed: false, Server: serverTLSConfig, Client: &tls.Config{ RootCAs: pool, }, }, - { // Finally, verify it all works happily with client and server cert in place + { + name: "Finally, verify it all works happily with client and server cert in place", Succeed: true, Server: serverTLSConfig, Client: &tls.Config{ - RootCAs: pool, + RootCAs: pool, + ServerName: "127.0.0.1", Certificates: []tls.Certificate{{ Certificate: [][]byte{clientDer}, PrivateKey: clientkey, @@ -160,7 +167,7 @@ func TestTLS(t *testing.T) { }, }, } { - doListenerTLSTest(t, tc.Succeed, tc.Server, tc.Client) + t.Run(tc.name, func(t *testing.T) { doListenerTLSTest(t, tc.Succeed, tc.Server, tc.Client) }) } } diff --git a/config.go b/config.go index e899820cb..0ce308f80 100644 --- a/config.go +++ b/config.go @@ -734,3 +734,16 @@ func (c *Config) Validate() error { return nil } + +func (c *Config) getDialer() proxy.Dialer { + if c.Net.Proxy.Enable { + Logger.Printf("using proxy %s", c.Net.Proxy.Dialer) + return c.Net.Proxy.Dialer + } else { + return &net.Dialer{ + Timeout: c.Net.DialTimeout, + KeepAlive: c.Net.KeepAlive, + LocalAddr: c.Net.LocalAddr, + } + } +} From 73af7682287ccc5d3e7cedfad0073183f87d1ee2 Mon Sep 17 00:00:00 2001 From: Vlad Gorodetsky Date: Wed, 6 May 2020 11:06:53 +0300 Subject: [PATCH 19/41] Reduce build matrix --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f79a3274c..116deb5e5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,8 +9,8 @@ jobs: strategy: fail-fast: false matrix: - go-version: [1.12.x, 1.13.x, 1.14.x] - kafka-version: [2.3.1, 2.4.1, 2.5.0] + go-version: [1.14.x] + kafka-version: [2.4.1, 2.5.0] platform: [ubuntu-latest] env: From ab525ed5db3267f633504c4e3506f922986af189 Mon Sep 17 00:00:00 2001 From: Diego Alvarez Date: Thu, 7 May 2020 11:17:49 -0700 Subject: [PATCH 20/41] Set ServerName using tls.DialWithDialer approach --- broker.go | 11 +++++++++++ client_tls_test.go | 3 +-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/broker.go b/broker.go index f379dca62..3c39300c7 100644 --- a/broker.go +++ b/broker.go @@ -165,6 +165,17 @@ func (b *Broker) Open(conf *Config) error { if conf.Net.TLS.Enable { Logger.Printf("Using tls") + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if conf.Net.TLS.Config.ServerName == "" { + colonPos := strings.LastIndex(b.addr, ":") + if colonPos == -1 { + colonPos = len(b.addr) + } + hostname := b.addr[:colonPos] + conf.Net.TLS.Config.ServerName = hostname + } b.conn = tls.Client(b.conn, conf.Net.TLS.Config) } diff --git a/client_tls_test.go b/client_tls_test.go index e36612705..750145610 100644 --- a/client_tls_test.go +++ b/client_tls_test.go @@ -158,8 +158,7 @@ func TestTLS(t *testing.T) { Succeed: true, Server: serverTLSConfig, Client: &tls.Config{ - RootCAs: pool, - ServerName: "127.0.0.1", + RootCAs: pool, Certificates: []tls.Certificate{{ Certificate: [][]byte{clientDer}, PrivateKey: clientkey, From cb293024a2efb95fb03418fe1e5e7a2d41cdd05f Mon Sep 17 00:00:00 2001 From: Diego Alvarez Date: Thu, 7 May 2020 11:55:19 -0700 Subject: [PATCH 21/41] Creates conf.Net.TLS.Config if not provided --- broker.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/broker.go b/broker.go index 3c39300c7..cda5da8f7 100644 --- a/broker.go +++ b/broker.go @@ -165,18 +165,22 @@ func (b *Broker) Open(conf *Config) error { if conf.Net.TLS.Enable { Logger.Printf("Using tls") - + cfg := conf.Net.TLS.Config + if cfg == nil { + cfg = &tls.Config{} + } // If no ServerName is set, infer the ServerName // from the hostname we're connecting to. - if conf.Net.TLS.Config.ServerName == "" { + // Gets the hostname as tls.DialWithDialer does it. + if cfg.ServerName == "" { colonPos := strings.LastIndex(b.addr, ":") if colonPos == -1 { colonPos = len(b.addr) } hostname := b.addr[:colonPos] - conf.Net.TLS.Config.ServerName = hostname + cfg.ServerName = hostname } - b.conn = tls.Client(b.conn, conf.Net.TLS.Config) + b.conn = tls.Client(b.conn, cfg) } b.conn = newBufConn(b.conn) From 720500cf5980ffa9feb31003543728a58aa489e5 Mon Sep 17 00:00:00 2001 From: Dominic Evans Date: Thu, 7 May 2020 21:44:04 +0100 Subject: [PATCH 22/41] testfix: give kafka longer to startup For some reason we'd ended up with quite a short (10s) limit on waiting for Kafka to startup and open its listener and since moving from Travis to GitHub actions, this can periodically take longer causing the build to abort early. Bump it up to 2m and tweak a couple of other parts of the boot script. --- vagrant/boot_cluster.sh | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/vagrant/boot_cluster.sh b/vagrant/boot_cluster.sh index 6ac8bd4bc..4fbd17988 100755 --- a/vagrant/boot_cluster.sh +++ b/vagrant/boot_cluster.sh @@ -4,8 +4,8 @@ set -ex # Launch and wait for toxiproxy ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh & -while ! nc -q 1 localhost 2181 &2 - find "${KAFKA_INSTALL_ROOT}" -name "server.log" -print0 | xargs tail -256 + echo 'Error: kafka5 failed to startup' >&2 + find "${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}" -name "server.log" -print0 | xargs -0 --no-run-if-empty tail -256 exit ${RC} fi From a9ef2f77668f48b4a89f2c42995bd54bff71e5ce Mon Sep 17 00:00:00 2001 From: Dominic Evans Date: Thu, 7 May 2020 22:39:42 +0100 Subject: [PATCH 23/41] testfix: set KAFKA_HEAP_OPTS for zk and kafka - set Java heap options for ZooKeeper and Kafka - disable the adminServer (:8080) in ZooKeeper 3.5.x --- vagrant/boot_cluster.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/vagrant/boot_cluster.sh b/vagrant/boot_cluster.sh index 4fbd17988..c817fce91 100755 --- a/vagrant/boot_cluster.sh +++ b/vagrant/boot_cluster.sh @@ -8,6 +8,8 @@ while ! nc -q 1 localhost 2185 Date: Sat, 9 May 2020 18:11:56 +1000 Subject: [PATCH 24/41] Add docker-composed based functional test harness * Functional tests are skipped except when the `functional` build tag is passed to the go test command (i.e. go test -tags=functional) * If TOXIPROXY_ADDR is not set when the functional tests are being run, it will use docker-compose to automatically spin up a zookeeper/kafka/toxiproxy environment suitab le for running the tests. This requies a working Docker and for the docker-compose command line tool to be installed. * If TOXIPROXY_ADDR and KAFKA_VERSION are set, then the tests will not spin up any docker infrastructure and will instead rely on whatever kafka broker is behind the specified toxiproxy. --- .gitignore | 3 + docker-compose.yml | 134 +++++++++++ functional_client_test.go | 10 +- functional_consumer_group_test.go | 6 +- functional_consumer_test.go | 14 +- functional_offset_manager_test.go | 4 +- functional_producer_test.go | 18 +- functional_test.go | 386 ++++++++++++++++++++++++++---- 8 files changed, 504 insertions(+), 71 deletions(-) create mode 100644 docker-compose.yml diff --git a/.gitignore b/.gitignore index 6e362e4f2..eb4b19509 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,6 @@ _testmain.go coverage.txt profile.out + +simplest-uncommitted-msg-0.1-jar-with-dependencies.jar + diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 000000000..03425a6f9 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,134 @@ +version: '3.7' +services: + zookeeper-1: + image: 'confluentinc/cp-zookeeper:5.5.0' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '1' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + zookeeper-2: + image: 'confluentinc/cp-zookeeper:5.5.0' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '2' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + zookeeper-3: + image: 'confluentinc/cp-zookeeper:5.5.0' + restart: always + environment: + ZOOKEEPER_SERVER_ID: '3' + ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888' + ZOOKEEPER_CLIENT_PORT: '2181' + ZOOKEEPER_PEER_PORT: '2888' + ZOOKEEPER_LEADER_PORT: '3888' + ZOOKEEPER_INIT_LIMIT: '10' + ZOOKEEPER_SYNC_LIMIT: '5' + ZOOKEEPER_MAX_CLIENT_CONNS: '0' + kafka-1: + image: 'confluentinc/cp-kafka:5.5.0' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '1' + KAFKA_BROKER_RACK: '1' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-2: + image: 'confluentinc/cp-kafka:5.5.0' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '2' + KAFKA_BROKER_RACK: '2' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-3: + image: 'confluentinc/cp-kafka:5.5.0' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '3' + KAFKA_BROKER_RACK: '3' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-4: + image: 'confluentinc/cp-kafka:5.5.0' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '4' + KAFKA_BROKER_RACK: '4' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + kafka-5: + image: 'confluentinc/cp-kafka:5.5.0' + restart: always + environment: + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' + KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' + KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' + KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' + KAFKA_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_BROKER_ID: '5' + KAFKA_BROKER_RACK: '5' + KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000' + KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000' + KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' + toxiproxy: + image: 'shopify/toxiproxy:2.1.4' + ports: + # The tests themselves actually start the proies on these ports + - '29091:29091' + - '29092:29092' + - '29093:29093' + - '29094:29094' + - '29095:29095' + # This is the toxiproxy API port + - '8474:8474' diff --git a/functional_client_test.go b/functional_client_test.go index 2bf99d252..513b8ee9b 100644 --- a/functional_client_test.go +++ b/functional_client_test.go @@ -1,3 +1,5 @@ +//+build functional + package sarama import ( @@ -10,13 +12,13 @@ func TestFuncConnectionFailure(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) - Proxies["kafka1"].Enabled = false + FunctionalTestEnv.Proxies["kafka1"].Enabled = false SaveProxy(t, "kafka1") config := NewConfig() config.Metadata.Retry.Max = 1 - _, err := NewClient([]string{kafkaBrokers[0]}, config) + _, err := NewClient([]string{FunctionalTestEnv.KafkaBrokerAddrs[0]}, config) if err != ErrOutOfBrokers { t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err) } @@ -29,7 +31,7 @@ func TestFuncClientMetadata(t *testing.T) { config := NewConfig() config.Metadata.Retry.Max = 1 config.Metadata.Retry.Backoff = 10 * time.Millisecond - client, err := NewClient(kafkaBrokers, config) + client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } @@ -70,7 +72,7 @@ func TestFuncClientCoordinator(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) - client, err := NewClient(kafkaBrokers, nil) + client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, nil) if err != nil { t.Fatal(err) } diff --git a/functional_consumer_group_test.go b/functional_consumer_group_test.go index ae376086d..4d71510a8 100644 --- a/functional_consumer_group_test.go +++ b/functional_consumer_group_test.go @@ -1,4 +1,4 @@ -// +build go1.9 +//+build functional package sarama @@ -153,7 +153,7 @@ func testFuncConsumerGroupID(t *testing.T) string { } func testFuncConsumerGroupFuzzySeed(topic string) error { - client, err := NewClient(kafkaBrokers, nil) + client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, nil) if err != nil { return err } @@ -245,7 +245,7 @@ func runTestFuncConsumerGroupMember(t *testing.T, groupID, clientID string, maxM config.Consumer.Offsets.Initial = OffsetOldest config.Consumer.Group.Rebalance.Timeout = 10 * time.Second - group, err := NewConsumerGroup(kafkaBrokers, groupID, config) + group, err := NewConsumerGroup(FunctionalTestEnv.KafkaBrokerAddrs, groupID, config) if err != nil { t.Fatal(err) return nil diff --git a/functional_consumer_test.go b/functional_consumer_test.go index 8b31b45c5..aca9434db 100644 --- a/functional_consumer_test.go +++ b/functional_consumer_test.go @@ -1,3 +1,5 @@ +//+build functional + package sarama import ( @@ -16,7 +18,7 @@ func TestFuncConsumerOffsetOutOfRange(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) - consumer, err := NewConsumer(kafkaBrokers, nil) + consumer, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, nil) if err != nil { t.Fatal(err) } @@ -36,7 +38,7 @@ func TestConsumerHighWaterMarkOffset(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) - p, err := NewSyncProducer(kafkaBrokers, nil) + p, err := NewSyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, nil) if err != nil { t.Fatal(err) } @@ -47,7 +49,7 @@ func TestConsumerHighWaterMarkOffset(t *testing.T) { t.Fatal(err) } - c, err := NewConsumer(kafkaBrokers, nil) + c, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, nil) if err != nil { t.Fatal(err) } @@ -143,7 +145,7 @@ func TestReadOnlyAndAllCommittedMessages(t *testing.T) { config.Consumer.IsolationLevel = ReadCommitted config.Version = V0_11_0_0 - consumer, err := NewConsumer(kafkaBrokers, config) + consumer, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } @@ -205,7 +207,7 @@ func produceMsgs(t *testing.T, clientVersions []KafkaVersion, codecs []Compressi prodCfg.Net.MaxOpenRequests = 1 } - p, err := NewSyncProducer(kafkaBrokers, prodCfg) + p, err := NewSyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, prodCfg) if err != nil { t.Errorf("Failed to create producer: version=%s, compression=%s, err=%v", prodVer, codec, err) continue @@ -251,7 +253,7 @@ consumerVersionLoop: // message. consCfg := NewConfig() consCfg.Version = consVer - c, err := NewConsumer(kafkaBrokers, consCfg) + c, err := NewConsumer(FunctionalTestEnv.KafkaBrokerAddrs, consCfg) if err != nil { t.Fatal(err) } diff --git a/functional_offset_manager_test.go b/functional_offset_manager_test.go index 436f35ef4..32e160aab 100644 --- a/functional_offset_manager_test.go +++ b/functional_offset_manager_test.go @@ -1,3 +1,5 @@ +//+build functional + package sarama import ( @@ -9,7 +11,7 @@ func TestFuncOffsetManager(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) - client, err := NewClient(kafkaBrokers, nil) + client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, nil) if err != nil { t.Fatal(err) } diff --git a/functional_producer_test.go b/functional_producer_test.go index e589a8eb8..0f86368d3 100644 --- a/functional_producer_test.go +++ b/functional_producer_test.go @@ -1,3 +1,5 @@ +//+build functional + package sarama import ( @@ -53,7 +55,7 @@ func TestFuncMultiPartitionProduce(t *testing.T) { config.Producer.Flush.Frequency = 50 * time.Millisecond config.Producer.Flush.Messages = 200 config.Producer.Return.Successes = true - producer, err := NewSyncProducer(kafkaBrokers, config) + producer, err := NewSyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } @@ -81,7 +83,7 @@ func TestFuncProducingToInvalidTopic(t *testing.T) { setupFunctionalTest(t) defer teardownFunctionalTest(t) - producer, err := NewSyncProducer(kafkaBrokers, nil) + producer, err := NewSyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, nil) if err != nil { t.Fatal(err) } @@ -113,7 +115,7 @@ func TestFuncProducingIdempotentWithBrokerFailure(t *testing.T) { config.Net.MaxOpenRequests = 1 config.Version = V0_11_0_0 - producer, err := NewSyncProducer(kafkaBrokers, config) + producer, err := NewSyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } @@ -131,7 +133,7 @@ func TestFuncProducingIdempotentWithBrokerFailure(t *testing.T) { } // break the brokers. - for proxyName, proxy := range Proxies { + for proxyName, proxy := range FunctionalTestEnv.Proxies { if !strings.Contains(proxyName, "kafka") { continue } @@ -152,7 +154,7 @@ func TestFuncProducingIdempotentWithBrokerFailure(t *testing.T) { } // Now bring the proxy back up - for proxyName, proxy := range Proxies { + for proxyName, proxy := range FunctionalTestEnv.Proxies { if !strings.Contains(proxyName, "kafka") { continue } @@ -179,7 +181,7 @@ func testProducingMessages(t *testing.T, config *Config) { defer teardownFunctionalTest(t) // Configure some latency in order to properly validate the request latency metric - for _, proxy := range Proxies { + for _, proxy := range FunctionalTestEnv.Proxies { if _, err := proxy.AddToxic("", "latency", "", 1, toxiproxy.Attributes{"latency": 10}); err != nil { t.Fatal("Unable to configure latency toxicity", err) } @@ -188,7 +190,7 @@ func testProducingMessages(t *testing.T, config *Config) { config.Producer.Return.Successes = true config.Consumer.Return.Errors = true - client, err := NewClient(kafkaBrokers, config) + client, err := NewClient(FunctionalTestEnv.KafkaBrokerAddrs, config) if err != nil { t.Fatal(err) } @@ -380,7 +382,7 @@ func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) }() } - producer, err := NewAsyncProducer(kafkaBrokers, conf) + producer, err := NewAsyncProducer(FunctionalTestEnv.KafkaBrokerAddrs, conf) if err != nil { b.Fatal(err) } diff --git a/functional_test.go b/functional_test.go index 778d9e055..e56182be0 100644 --- a/functional_test.go +++ b/functional_test.go @@ -1,79 +1,378 @@ +//+build functional + package sarama import ( + "context" + "fmt" + toxiproxy "github.com/Shopify/toxiproxy/client" + "io" "log" - "math/rand" "net" + "net/http" + "net/url" "os" + "os/exec" + "path/filepath" "strconv" "strings" "testing" "time" - - toxiproxy "github.com/Shopify/toxiproxy/client" ) const ( - VagrantToxiproxy = "http://192.168.100.67:8474" - VagrantKafkaPeers = "192.168.100.67:9091,192.168.100.67:9092,192.168.100.67:9093,192.168.100.67:9094,192.168.100.67:9095" - VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185" + uncomittedMsgJar = "https://github.com/FrancoisPoinsot/simplest-uncommitted-msg/releases/download/0.1/simplest-uncommitted-msg-0.1-jar-with-dependencies.jar" ) var ( - kafkaAvailable, kafkaRequired bool - kafkaBrokers []string + testTopicDetails = map[string]*TopicDetail{ + "test.1": { + NumPartitions: 1, + ReplicationFactor: 3, + }, + "test.4": { + NumPartitions: 4, + ReplicationFactor: 3, + }, + "test.64": { + NumPartitions: 64, + ReplicationFactor: 3, + }, + "uncommitted-topic-test-4": { + NumPartitions: 1, + ReplicationFactor: 3, + }, + } - proxyClient *toxiproxy.Client - Proxies map[string]*toxiproxy.Proxy + FunctionalTestEnv *testEnvironment ) -func init() { +func TestMain(m *testing.M) { + // Functional tests for Sarama + // + // You can either set TOXIPROXY_ADDR, which points at a toxiproxy address + // already set up with 21801-21805 bound to zookeeper and 29091-29095 + // bound to kafka. Alternatively, if TOXIPROXY_ADDR is not set, we'll try + // and use Docker to bring up a 5-node zookeeper cluster & 5-node kafka + // cluster, with toxiproxy configured as above. + // + // In either case, the following topics will be deleted (if they exist) and + // then created/pre-seeded with data for the functional test run: + // * uncomitted-topic-test-4 + // * test.1 + // * test.4 + // * test.64 + os.Exit(testMain(m)) +} + +func testMain(m *testing.M) int { + ctx := context.Background() + var env testEnvironment + if os.Getenv("DEBUG") == "true" { Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) } - seed := time.Now().UTC().UnixNano() - if tmp := os.Getenv("TEST_SEED"); tmp != "" { - seed, _ = strconv.ParseInt(tmp, 0, 64) + usingExisting, err := existingEnvironment(ctx, &env) + if err != nil { + panic(err) + } + if !usingExisting { + err := prepareDockerTestEnvironment(ctx, &env) + if err != nil { + tearDownDockerTestEnvironment(ctx, &env) + panic(err) + } + defer tearDownDockerTestEnvironment(ctx, &env) } - Logger.Println("Using random seed:", seed) - rand.Seed(seed) + if err := prepareTestTopics(ctx, &env); err != nil { + panic(err) + } + FunctionalTestEnv = &env + return m.Run() +} + +type testEnvironment struct { + ToxiproxyClient *toxiproxy.Client + Proxies map[string]*toxiproxy.Proxy + KafkaBrokerAddrs []string + KafkaVersion string +} + +func prepareDockerTestEnvironment(ctx context.Context, env *testEnvironment) error { + Logger.Println("bringing up docker-based test environment") - proxyAddr := os.Getenv("TOXIPROXY_ADDR") - if proxyAddr == "" { - proxyAddr = VagrantToxiproxy + // Always (try to) tear down first. + if err := tearDownDockerTestEnvironment(ctx, env); err != nil { + return fmt.Errorf("failed to tear down existing env: %w", err) } - proxyClient = toxiproxy.NewClient(proxyAddr) - kafkaPeers := os.Getenv("KAFKA_PEERS") - if kafkaPeers == "" { - kafkaPeers = VagrantKafkaPeers + c := exec.Command("docker-compose", "up", "-d") + c.Stdout = os.Stdout + c.Stderr = os.Stderr + err := c.Run() + if err != nil { + return fmt.Errorf("failed to run docker-compose to start test enviroment: %w", err) } - kafkaBrokers = strings.Split(kafkaPeers, ",") - if c, err := net.DialTimeout("tcp", kafkaBrokers[0], 5*time.Second); err == nil { - if err = c.Close(); err == nil { - kafkaAvailable = true + // Set up toxiproxy Proxies + env.ToxiproxyClient = toxiproxy.NewClient("localhost:8474") + env.Proxies = map[string]*toxiproxy.Proxy{} + for i := 1; i <= 5; i++ { + proxyName := fmt.Sprintf("kafka%d", i) + proxy, err := env.ToxiproxyClient.CreateProxy( + proxyName, + fmt.Sprintf("0.0.0.0:%d", 29090 + i), + fmt.Sprintf("kafka-%d:%d", i, 29090 + i), + ) + if err != nil { + return fmt.Errorf("failed to create toxiproxy: %w", err) } + env.Proxies[proxyName] = proxy + env.KafkaBrokerAddrs = append(env.KafkaBrokerAddrs, fmt.Sprintf("127.0.0.1:%d", 29090 + i)) } - kafkaRequired = os.Getenv("CI") != "" + // the mapping of confluent platform docker image vesions -> kafka versions can be + // found here: https://docs.confluent.io/current/installation/versions-interoperability.html + // We have cp-5.5.0 in the docker-compose file, so that's kafka 2.5.0. + env.KafkaVersion = "2.5.0" + + // Wait for the kafka broker to come up + allBrokersUp := false + for i := 0; i < 45 && !allBrokersUp; i++ { + Logger.Println("waiting for kafka brokers to come up") + time.Sleep(1 * time.Second) + config := NewConfig() + config.Version, err = ParseKafkaVersion(env.KafkaVersion) + if err != nil { + return err + } + config.Net.DialTimeout = 1 * time.Second + config.Net.ReadTimeout = 1 * time.Second + config.Net.WriteTimeout = 1 * time.Second + config.ClientID = "sarama-tests" + brokersOk := make([]bool, len(env.KafkaBrokerAddrs)) + retryLoop: + for j, addr := range env.KafkaBrokerAddrs { + client, err := NewClient([]string{addr},config) + if err != nil { + continue + } + err = client.RefreshMetadata() + if err != nil { + continue + } + brokers := client.Brokers() + if len(brokers) < 5 { + continue + } + for _, broker := range brokers { + err := broker.Open(client.Config()) + if err != nil { + continue retryLoop + } + connected, err := broker.Connected() + if err != nil || !connected { + continue retryLoop + } + } + brokersOk[j] = true + } + allBrokersUp = true + for _, u := range brokersOk { + allBrokersUp = allBrokersUp && u + } + } + if !allBrokersUp { + return fmt.Errorf("timed out waiting for broker to come up") + } + + return nil } -func checkKafkaAvailability(t testing.TB) { - if !kafkaAvailable { - if kafkaRequired { - t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) - } else { - t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) +func existingEnvironment(ctx context.Context, env *testEnvironment) (bool, error) { + toxiproxyAddr, ok := os.LookupEnv("TOXIPROXY_ADDR") + if !ok { + return false, nil + } + toxiproxyURL, err := url.Parse(toxiproxyAddr) + if err != nil { + return false, fmt.Errorf("$TOXIPROXY_ADDR not parseable as url") + } + toxiproxyHost := toxiproxyURL.Hostname() + + env.ToxiproxyClient = toxiproxy.NewClient(toxiproxyAddr) + for i := 1; i <= 5; i++ { + proxyName := fmt.Sprintf("kafka%d", i) + proxy, err := env.ToxiproxyClient.Proxy(proxyName) + if err != nil { + return false, fmt.Errorf("no proxy kafka%d on toxiproxy: %w", i, err) + } + env.Proxies[proxyName] = proxy + // get the host:port from the proxy & toxiproxy addr, so we can do "$toxiproxy_addr:$proxy_port" + _, proxyPort, err := net.SplitHostPort(proxy.Listen) + if err != nil { + return false, fmt.Errorf("proxy.Listen not a host:port combo: %w", err) + } + env.KafkaBrokerAddrs = append(env.KafkaBrokerAddrs, fmt.Sprintf("%s:%s", toxiproxyHost, proxyPort)) + } + + env.KafkaVersion, ok = os.LookupEnv("KAFKA_VERSION") + if !ok { + return false, fmt.Errorf("KAFKA_VERSION needs to be provided with TOXIPROXY_ADDR") + } + return true, nil +} + +func tearDownDockerTestEnvironment(ctx context.Context, env *testEnvironment) error { + c := exec.Command("docker-compose", "down", "--volumes") + c.Stdout = os.Stdout + c.Stderr = os.Stderr + downErr := c.Run() + + c = exec.Command("docker-compose", "rm", "-v", "--force", "--stop") + c.Stdout = os.Stdout + c.Stderr = os.Stderr + rmErr := c.Run() + if downErr != nil { + return fmt.Errorf("failed to run docker-compose to stop test enviroment: %w", downErr) + } + if rmErr != nil { + return fmt.Errorf("failed to run docker-compose to rm test enviroment: %w", rmErr) + } + return nil +} + +func prepareTestTopics(ctx context.Context, env *testEnvironment) error { + Logger.Println("creating test topics") + var testTopicNames []string + for topic, _ := range testTopicDetails { + testTopicNames = append(testTopicNames, topic) + } + + Logger.Println("Creating topics") + config := NewConfig() + config.Metadata.Retry.Max = 5 + config.Metadata.Retry.Backoff = 10 * time.Second + config.ClientID = "sarama-tests" + var err error + config.Version, err = ParseKafkaVersion(env.KafkaVersion) + if err != nil { + return fmt.Errorf("failed to parse kafka version %s: %w", env.KafkaVersion, err) + } + + client, err := NewClient(env.KafkaBrokerAddrs, config) + if err != nil { + return fmt.Errorf("failed to connect to kafka: %w", err) + } + defer client.Close() + + controller, err := client.Controller() + if err != nil { + return fmt.Errorf("failed to connect to kafka controller: %w", err) + } + defer controller.Close() + + // Start by deleting the test topics (if they already exist) + deleteRes, err := controller.DeleteTopics(&DeleteTopicsRequest{ + Topics: testTopicNames, + Timeout: 30 * time.Second, + }) + if err != nil { + return fmt.Errorf("failed to delete test topics: %w", err) + } + for topic, topicErr := range deleteRes.TopicErrorCodes { + if !isTopicNotExistsErrorOrOk(topicErr) { + return fmt.Errorf("failed to delete topic %s: %w", topic, topicErr) + } + } + + // wait for the topics to _actually_ be gone - the delete is not guaranteed to be processed + // synchronously + var topicsOk bool + for i := 0; i < 20 && !topicsOk; i++ { + time.Sleep(1 * time.Second) + md, err := controller.GetMetadata(&MetadataRequest{ + Topics: testTopicNames, + }) + if err != nil { + return fmt.Errorf("failed to get metadata for test topics: %w", err) + } + + topicsOk = true + for _, topicsMd := range md.Topics { + if !isTopicNotExistsErrorOrOk(topicsMd.Err) { + topicsOk = false + } } } + if !topicsOk { + return fmt.Errorf("timed out waiting for test topics to be gone") + } + + // now create the topics empty + createRes, err := controller.CreateTopics(&CreateTopicsRequest{ + TopicDetails: testTopicDetails, + Timeout: 30 * time.Second, + }) + if err != nil { + return fmt.Errorf("failed to create test topics: %w", err) + } + for topic, topicErr := range createRes.TopicErrors { + if !isTopicExistsErrorOrOk(topicErr.Err) { + return fmt.Errorf("failed to create test topic %s: %w", topic, topicErr) + } + } + + // This is kind of gross, but we don't actually have support for doing transactional publishing + // with sarama, so we need to use a java-based tool to publish uncomitted messages to + // the uncommitted-topic-test-4 topic + jarName := filepath.Base(uncomittedMsgJar) + if _, err := os.Stat(jarName); err != nil { + Logger.Printf("Downloading %s\n", uncomittedMsgJar) + req, err := http.NewRequest("GET", uncomittedMsgJar, nil) + if err != nil { + return fmt.Errorf("failed creating requst for uncomitted msg jar: %w", err) + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("failed fetching the uncommitted msg jar: %w", err) + } + defer res.Body.Close() + jarFile, err := os.OpenFile(jarName, os.O_WRONLY | os.O_TRUNC | os.O_CREATE, 0644) + if err != nil { + return fmt.Errorf("failed opening the uncomitted msg jar: %w", err) + } + defer jarFile.Close() + + _, err = io.Copy(jarFile, res.Body) + if err != nil { + return fmt.Errorf("failed writing the uncomitted msg jar: %w", err) + } + } + + c := exec.Command("java", "-jar", jarName, "-b", env.KafkaBrokerAddrs[0], "-c", "4") + c.Stdout = os.Stdout + c.Stderr = os.Stderr + err = c.Run() + if err != nil { + return fmt.Errorf("failed running uncomitted msg jar: %w", err) + } + return nil +} + +func isTopicNotExistsErrorOrOk(err KError) bool { + return err == ErrUnknownTopicOrPartition || err == ErrInvalidTopic || err == ErrNoError +} + +func isTopicExistsErrorOrOk(err KError) bool { + return err == ErrTopicAlreadyExists || err == ErrNoError } func checkKafkaVersion(t testing.TB, requiredVersion string) { - kafkaVersion := os.Getenv("KAFKA_VERSION") + kafkaVersion := FunctionalTestEnv.KafkaVersion if kafkaVersion == "" { - t.Logf("No KAFKA_VERSION set. This test requires Kafka version %s or higher. Continuing...", requiredVersion) + t.Skipf("No KAFKA_VERSION set. This test requires Kafka version %s or higher. Continuing...", requiredVersion) } else { available := parseKafkaVersion(kafkaVersion) required := parseKafkaVersion(requiredVersion) @@ -84,30 +383,19 @@ func checkKafkaVersion(t testing.TB, requiredVersion string) { } func resetProxies(t testing.TB) { - if err := proxyClient.ResetState(); err != nil { + if err := FunctionalTestEnv.ToxiproxyClient.ResetState(); err != nil { t.Error(err) } - Proxies = nil -} - -func fetchProxies(t testing.TB) { - var err error - Proxies, err = proxyClient.Proxies() - if err != nil { - t.Fatal(err) - } } func SaveProxy(t *testing.T, px string) { - if err := Proxies[px].Save(); err != nil { + if err := FunctionalTestEnv.Proxies[px].Save(); err != nil { t.Fatal(err) } } func setupFunctionalTest(t testing.TB) { - checkKafkaAvailability(t) resetProxies(t) - fetchProxies(t) } func teardownFunctionalTest(t testing.TB) { From 75ff946b5267463981395104c6988cb6b3a764d7 Mon Sep 17 00:00:00 2001 From: KJ Tsanaktsidis Date: Sun, 10 May 2020 16:02:36 +1000 Subject: [PATCH 25/41] Map confluent platform -> kafka version --- Makefile | 4 ++++ docker-compose.yml | 16 ++++++++-------- functional_test.go | 21 +++++++++++++++++++++ 3 files changed, 33 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index c3b431a56..18981cec9 100644 --- a/Makefile +++ b/Makefile @@ -25,3 +25,7 @@ lint: test: $(GOTEST) ./... + +.PHONY: test_functional +test_functional: + $(GOTEST) -tags=functional ./... diff --git a/docker-compose.yml b/docker-compose.yml index 03425a6f9..25593fd3b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,7 @@ version: '3.7' services: zookeeper-1: - image: 'confluentinc/cp-zookeeper:5.5.0' + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' restart: always environment: ZOOKEEPER_SERVER_ID: '1' @@ -13,7 +13,7 @@ services: ZOOKEEPER_SYNC_LIMIT: '5' ZOOKEEPER_MAX_CLIENT_CONNS: '0' zookeeper-2: - image: 'confluentinc/cp-zookeeper:5.5.0' + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' restart: always environment: ZOOKEEPER_SERVER_ID: '2' @@ -25,7 +25,7 @@ services: ZOOKEEPER_SYNC_LIMIT: '5' ZOOKEEPER_MAX_CLIENT_CONNS: '0' zookeeper-3: - image: 'confluentinc/cp-zookeeper:5.5.0' + image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' restart: always environment: ZOOKEEPER_SERVER_ID: '3' @@ -37,7 +37,7 @@ services: ZOOKEEPER_SYNC_LIMIT: '5' ZOOKEEPER_MAX_CLIENT_CONNS: '0' kafka-1: - image: 'confluentinc/cp-kafka:5.5.0' + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' restart: always environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' @@ -54,7 +54,7 @@ services: KAFKA_DELETE_TOPIC_ENABLE: 'true' KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-2: - image: 'confluentinc/cp-kafka:5.5.0' + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' restart: always environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' @@ -71,7 +71,7 @@ services: KAFKA_DELETE_TOPIC_ENABLE: 'true' KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-3: - image: 'confluentinc/cp-kafka:5.5.0' + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' restart: always environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' @@ -88,7 +88,7 @@ services: KAFKA_DELETE_TOPIC_ENABLE: 'true' KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-4: - image: 'confluentinc/cp-kafka:5.5.0' + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' restart: always environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' @@ -105,7 +105,7 @@ services: KAFKA_DELETE_TOPIC_ENABLE: 'true' KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false' kafka-5: - image: 'confluentinc/cp-kafka:5.5.0' + image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-5.5.0}' restart: always environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' diff --git a/functional_test.go b/functional_test.go index e56182be0..0e0c9e216 100644 --- a/functional_test.go +++ b/functional_test.go @@ -107,9 +107,30 @@ func prepareDockerTestEnvironment(ctx context.Context, env *testEnvironment) err return fmt.Errorf("failed to tear down existing env: %w", err) } + if version, ok := os.LookupEnv("KAFKA_VERSION"); ok { + env.KafkaVersion = version + } else { + // We have cp-5.5.0 as the default in the docker-compose file, so that's kafka 2.5.0. + env.KafkaVersion = "2.5.0" + } + + // the mapping of confluent platform docker image versions -> kafka versions can be + // found here: https://docs.confluent.io/current/installation/versions-interoperability.html + var confluentPlatformVersion string + switch env.KafkaVersion { + case "2.5.0": + confluentPlatformVersion = "5.5.0" + case "2.4.1": + confluentPlatformVersion = "5.4.2" + default: + return fmt.Errorf("don't know what confluent platform version to use for kafka %s", env.KafkaVersion) + } + + c := exec.Command("docker-compose", "up", "-d") c.Stdout = os.Stdout c.Stderr = os.Stderr + c.Env = append(os.Environ(), fmt.Sprintf("CONFLUENT_PLATFORM_VERSION=%s", confluentPlatformVersion)) err := c.Run() if err != nil { return fmt.Errorf("failed to run docker-compose to start test enviroment: %w", err) From ddb342c2a4143262cfdde12885f86ea4fbf08847 Mon Sep 17 00:00:00 2001 From: KJ Tsanaktsidis Date: Sun, 10 May 2020 16:11:14 +1000 Subject: [PATCH 26/41] Use the built-in docker compose support to run CI tests --- .github/workflows/ci.yml | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 116deb5e5..fca6cb201 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,13 +14,8 @@ jobs: platform: [ubuntu-latest] env: - KAFKA_PEERS: localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095 - TOXIPROXY_ADDR: http://localhost:8474 - KAFKA_INSTALL_ROOT: /home/runner/kafka - KAFKA_HOSTNAME: localhost DEBUG: true KAFKA_VERSION: ${{ matrix.kafka-version }} - KAFKA_SCALA_VERSION: 2.12 steps: - uses: actions/checkout@v1 @@ -48,16 +43,9 @@ jobs: run: | curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.23.6 export REPOSITORY_ROOT=${GITHUB_WORKSPACE} - vagrant/install_cluster.sh - vagrant/boot_cluster.sh - vagrant/create_topics.sh - vagrant/run_java_producer.sh - name: Run test suite - run: make test + run: make test_functional - name: Run linter run: make lint - - - name: Teardown - run: vagrant/halt_cluster.sh From 42bbd066c0c6758fb18cd612e360f9fa0581308f Mon Sep 17 00:00:00 2001 From: KJ Tsanaktsidis Date: Sun, 10 May 2020 16:13:20 +1000 Subject: [PATCH 27/41] Delete vagrant test harness setup --- vagrant/create_topics.sh | 9 ---- vagrant/halt_cluster.sh | 25 ----------- vagrant/install_cluster.sh | 86 ------------------------------------ vagrant/kafka.conf | 9 ---- vagrant/provision.sh | 17 ------- vagrant/run_java_producer.sh | 6 --- vagrant/run_toxiproxy.sh | 22 --------- vagrant/setup_services.sh | 29 ------------ vagrant/toxiproxy.conf | 6 --- vagrant/zookeeper.conf | 7 --- vagrant/zookeeper.properties | 36 --------------- 11 files changed, 252 deletions(-) delete mode 100755 vagrant/create_topics.sh delete mode 100755 vagrant/halt_cluster.sh delete mode 100755 vagrant/install_cluster.sh delete mode 100644 vagrant/kafka.conf delete mode 100755 vagrant/provision.sh delete mode 100755 vagrant/run_java_producer.sh delete mode 100755 vagrant/run_toxiproxy.sh delete mode 100755 vagrant/setup_services.sh delete mode 100644 vagrant/toxiproxy.conf delete mode 100644 vagrant/zookeeper.conf delete mode 100644 vagrant/zookeeper.properties diff --git a/vagrant/create_topics.sh b/vagrant/create_topics.sh deleted file mode 100755 index 959d3a053..000000000 --- a/vagrant/create_topics.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh - -set -ex - -cd ${KAFKA_INSTALL_ROOT}/kafka-9092 -bin/kafka-topics.sh --create --partitions 1 --replication-factor 3 --topic test.1 --zookeeper localhost:2181 -bin/kafka-topics.sh --create --partitions 4 --replication-factor 3 --topic test.4 --zookeeper localhost:2181 -bin/kafka-topics.sh --create --partitions 64 --replication-factor 3 --topic test.64 --zookeeper localhost:2181 -bin/kafka-topics.sh --create --partitions 1 --replication-factor 3 --topic uncommitted-topic-test-4 --zookeeper localhost:2181 \ No newline at end of file diff --git a/vagrant/halt_cluster.sh b/vagrant/halt_cluster.sh deleted file mode 100755 index e671c4812..000000000 --- a/vagrant/halt_cluster.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# If the functional tests failed (or some other task) then -# we might want to look into the broker logs -if [ "$TRAVIS_TEST_RESULT" = "1" ]; then - echo "Dumping Kafka brokers server.log:" - for i in 1 2 3 4 5; do - KAFKA_PORT=`expr $i + 9090` - sed -e "s/^/kafka-${KAFKA_PORT} /" ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/logs/server.log{.*,} - done -fi - -set -ex - -for i in 1 2 3 4 5; do - KAFKA_PORT=`expr $i + 9090` - cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/kafka-server-stop.sh -done - -for i in 1 2 3 4 5; do - KAFKA_PORT=`expr $i + 9090` - cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/zookeeper-server-stop.sh -done - -killall toxiproxy diff --git a/vagrant/install_cluster.sh b/vagrant/install_cluster.sh deleted file mode 100755 index aa22261e4..000000000 --- a/vagrant/install_cluster.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/sh - -set -ex - -TOXIPROXY_VERSION=2.1.4 - -mkdir -p ${KAFKA_INSTALL_ROOT} -if [ ! -f ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz ]; then - wget --quiet https://archive.apache.org/dist/kafka/${KAFKA_VERSION}/kafka_${KAFKA_SCALA_VERSION}-${KAFKA_VERSION}.tgz -O ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz -fi -if [ ! -f ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ]; then - wget --quiet https://github.com/Shopify/toxiproxy/releases/download/v${TOXIPROXY_VERSION}/toxiproxy-server-linux-amd64 -O ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} - chmod +x ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} -fi -rm -f ${KAFKA_INSTALL_ROOT}/toxiproxy -ln -s ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ${KAFKA_INSTALL_ROOT}/toxiproxy - -for i in 1 2 3 4 5; do - ZK_PORT=$((i + 2180)) - ZK_PORT_REAL=$((i + 21800)) - KAFKA_PORT=$((i + 9090)) - KAFKA_PORT_REAL=$((i + 29090)) - - # unpack kafka - mkdir -p ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} - tar xzf ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz -C ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} --strip-components 1 - - # broker configuration - mkdir -p "${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/data" - - # Append to default server.properties with a small number of customisations - printf "\n\n" >> "${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties" - cat << EOF >> "${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties" -############################# Sarama Test Cluster ############################# - -broker.id=${KAFKA_PORT} -broker.rack=${i} - -# Listen on "real" port -listeners=PLAINTEXT://:${KAFKA_PORT_REAL} -# Advertise Toxiproxy port -advertised.listeners=PLAINTEXT://${KAFKA_HOSTNAME}:${KAFKA_PORT} - -# Connect to Zookeeper via Toxiproxy port -zookeeper.connect=127.0.0.1:${ZK_PORT} - -# Data directory -log.dirs="${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/data" - -# Create new topics with a replication factor of 2 so failover can be tested -# more easily. -default.replication.factor=2 - -# Turn on log.retention.bytes to avoid filling up the VM's disk -log.retention.bytes=268435456 -log.segment.bytes=268435456 - -# Enable topic deletion and disable auto-creation -delete.topic.enable=true -auto.create.topics.enable=false - -# Lower the zookeeper timeouts so we don't have to wait forever for a node -# to die when we use toxiproxy to kill its zookeeper connection -zookeeper.session.timeout.ms=3000 -zookeeper.connection.timeout.ms=3000 - -# Disable broker ID length constraint -reserved.broker.max.id=10000 - -# Permit follower fetching (KIP-392) -replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector - -############################################################################### -EOF - - # zookeeper configuration - cp ${REPOSITORY_ROOT}/vagrant/zookeeper.properties ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/ - sed -i s/KAFKAID/${KAFKA_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties - sed -i s/ZK_PORT/${ZK_PORT_REAL}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties - - ZK_DATADIR="${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}" - mkdir -p ${ZK_DATADIR} - sed -i s#ZK_DATADIR#${ZK_DATADIR}#g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties - - echo $i > ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid -done diff --git a/vagrant/kafka.conf b/vagrant/kafka.conf deleted file mode 100644 index 25101df5a..000000000 --- a/vagrant/kafka.conf +++ /dev/null @@ -1,9 +0,0 @@ -start on started zookeeper-ZK_PORT -stop on stopping zookeeper-ZK_PORT - -# Use a script instead of exec (using env stanza leaks KAFKA_HEAP_OPTS from zookeeper) -script - sleep 2 - export KAFKA_HEAP_OPTS="-Xmx320m" - exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties -end script diff --git a/vagrant/provision.sh b/vagrant/provision.sh deleted file mode 100755 index 7f10de74a..000000000 --- a/vagrant/provision.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -set -ex - -apt-get update -yes | apt-get install default-jre - -export KAFKA_INSTALL_ROOT=/opt -export KAFKA_HOSTNAME=192.168.100.67 -export KAFKA_VERSION=1.0.2 -export KAFKA_SCALA_VERSION=2.11 -export REPOSITORY_ROOT=/vagrant - -sh /vagrant/vagrant/install_cluster.sh -sh /vagrant/vagrant/setup_services.sh -sh /vagrant/vagrant/create_topics.sh -sh /vagrant/vagrant/run_java_producer.sh diff --git a/vagrant/run_java_producer.sh b/vagrant/run_java_producer.sh deleted file mode 100755 index 5851b7484..000000000 --- a/vagrant/run_java_producer.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -set -ex - -wget https://github.com/FrancoisPoinsot/simplest-uncommitted-msg/releases/download/0.1/simplest-uncommitted-msg-0.1-jar-with-dependencies.jar -java -jar simplest-uncommitted-msg-0.1-jar-with-dependencies.jar -b ${KAFKA_HOSTNAME}:9092 -c 4 \ No newline at end of file diff --git a/vagrant/run_toxiproxy.sh b/vagrant/run_toxiproxy.sh deleted file mode 100755 index e52c00e7b..000000000 --- a/vagrant/run_toxiproxy.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -set -ex - -${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 & -PID=$! - -while ! nc -q 1 localhost 8474 Date: Sun, 10 May 2020 17:41:42 +1000 Subject: [PATCH 28/41] Run the linter across functional tests as well --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 18981cec9..a05863480 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ fmt: gofmt -s -l -w $(FILES) $(TESTS) lint: - golangci-lint run + GOFLAGS="-tags=functional" golangci-lint run test: $(GOTEST) ./... From a9e0b4f16371e7c1c065fcc91b0fc081157361d2 Mon Sep 17 00:00:00 2001 From: KJ Tsanaktsidis Date: Sun, 10 May 2020 18:55:21 +1000 Subject: [PATCH 29/41] Make linter pass on functional tests --- functional_test.go | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/functional_test.go b/functional_test.go index 0e0c9e216..2391725a4 100644 --- a/functional_test.go +++ b/functional_test.go @@ -5,7 +5,6 @@ package sarama import ( "context" "fmt" - toxiproxy "github.com/Shopify/toxiproxy/client" "io" "log" "net" @@ -18,6 +17,8 @@ import ( "strings" "testing" "time" + + toxiproxy "github.com/Shopify/toxiproxy/client" ) const ( @@ -27,19 +28,19 @@ const ( var ( testTopicDetails = map[string]*TopicDetail{ "test.1": { - NumPartitions: 1, + NumPartitions: 1, ReplicationFactor: 3, }, "test.4": { - NumPartitions: 4, + NumPartitions: 4, ReplicationFactor: 3, }, "test.64": { - NumPartitions: 64, + NumPartitions: 64, ReplicationFactor: 3, }, "uncommitted-topic-test-4": { - NumPartitions: 1, + NumPartitions: 1, ReplicationFactor: 3, }, } @@ -80,10 +81,10 @@ func testMain(m *testing.M) int { if !usingExisting { err := prepareDockerTestEnvironment(ctx, &env) if err != nil { - tearDownDockerTestEnvironment(ctx, &env) + _ = tearDownDockerTestEnvironment(ctx, &env) panic(err) } - defer tearDownDockerTestEnvironment(ctx, &env) + defer tearDownDockerTestEnvironment(ctx, &env) // nolint:errcheck } if err := prepareTestTopics(ctx, &env); err != nil { panic(err) @@ -126,7 +127,6 @@ func prepareDockerTestEnvironment(ctx context.Context, env *testEnvironment) err return fmt.Errorf("don't know what confluent platform version to use for kafka %s", env.KafkaVersion) } - c := exec.Command("docker-compose", "up", "-d") c.Stdout = os.Stdout c.Stderr = os.Stderr @@ -143,14 +143,14 @@ func prepareDockerTestEnvironment(ctx context.Context, env *testEnvironment) err proxyName := fmt.Sprintf("kafka%d", i) proxy, err := env.ToxiproxyClient.CreateProxy( proxyName, - fmt.Sprintf("0.0.0.0:%d", 29090 + i), - fmt.Sprintf("kafka-%d:%d", i, 29090 + i), + fmt.Sprintf("0.0.0.0:%d", 29090+i), + fmt.Sprintf("kafka-%d:%d", i, 29090+i), ) if err != nil { return fmt.Errorf("failed to create toxiproxy: %w", err) } env.Proxies[proxyName] = proxy - env.KafkaBrokerAddrs = append(env.KafkaBrokerAddrs, fmt.Sprintf("127.0.0.1:%d", 29090 + i)) + env.KafkaBrokerAddrs = append(env.KafkaBrokerAddrs, fmt.Sprintf("127.0.0.1:%d", 29090+i)) } // the mapping of confluent platform docker image vesions -> kafka versions can be @@ -175,7 +175,7 @@ func prepareDockerTestEnvironment(ctx context.Context, env *testEnvironment) err brokersOk := make([]bool, len(env.KafkaBrokerAddrs)) retryLoop: for j, addr := range env.KafkaBrokerAddrs { - client, err := NewClient([]string{addr},config) + client, err := NewClient([]string{addr}, config) if err != nil { continue } @@ -267,7 +267,7 @@ func tearDownDockerTestEnvironment(ctx context.Context, env *testEnvironment) er func prepareTestTopics(ctx context.Context, env *testEnvironment) error { Logger.Println("creating test topics") var testTopicNames []string - for topic, _ := range testTopicDetails { + for topic := range testTopicDetails { testTopicNames = append(testTopicNames, topic) } @@ -296,7 +296,7 @@ func prepareTestTopics(ctx context.Context, env *testEnvironment) error { // Start by deleting the test topics (if they already exist) deleteRes, err := controller.DeleteTopics(&DeleteTopicsRequest{ - Topics: testTopicNames, + Topics: testTopicNames, Timeout: 30 * time.Second, }) if err != nil { @@ -334,7 +334,7 @@ func prepareTestTopics(ctx context.Context, env *testEnvironment) error { // now create the topics empty createRes, err := controller.CreateTopics(&CreateTopicsRequest{ TopicDetails: testTopicDetails, - Timeout: 30 * time.Second, + Timeout: 30 * time.Second, }) if err != nil { return fmt.Errorf("failed to create test topics: %w", err) @@ -360,7 +360,7 @@ func prepareTestTopics(ctx context.Context, env *testEnvironment) error { return fmt.Errorf("failed fetching the uncommitted msg jar: %w", err) } defer res.Body.Close() - jarFile, err := os.OpenFile(jarName, os.O_WRONLY | os.O_TRUNC | os.O_CREATE, 0644) + jarFile, err := os.OpenFile(jarName, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644) if err != nil { return fmt.Errorf("failed opening the uncomitted msg jar: %w", err) } From c0335976b29fd7132b4987269a9103cb5f364eca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Mon, 11 May 2020 13:40:07 +0200 Subject: [PATCH 30/41] Revert "removes gofork direct usage as fix is part of go" This reverts commit bfff38f6e549a57ac7209f146d3b271ebcf6e044. --- go.mod | 2 +- gssapi_kerberos.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 1dca1cc33..d3a93763a 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/golang/snappy v0.0.1 // indirect github.com/google/go-cmp v0.4.0 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect - github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jcmturner/gofork v1.0.0 github.com/klauspost/compress v1.9.8 github.com/kr/pretty v0.2.0 // indirect github.com/pierrec/lz4 v2.4.1+incompatible diff --git a/gssapi_kerberos.go b/gssapi_kerberos.go index 98be0f508..d585f1a2b 100644 --- a/gssapi_kerberos.go +++ b/gssapi_kerberos.go @@ -1,13 +1,13 @@ package sarama import ( - "encoding/asn1" "encoding/binary" "fmt" "io" "strings" "time" + "github.com/jcmturner/gofork/encoding/asn1" "gopkg.in/jcmturner/gokrb5.v7/asn1tools" "gopkg.in/jcmturner/gokrb5.v7/gssapi" "gopkg.in/jcmturner/gokrb5.v7/iana/chksumtype" From d871f336e630a41a3ed0e238d921d54225856b87 Mon Sep 17 00:00:00 2001 From: Diego Alvarez Date: Tue, 12 May 2020 15:22:50 -0700 Subject: [PATCH 31/41] Set server name only for the current broker Fixes https://github.com/Shopify/sarama/issues/1700 --- broker.go | 42 +++++++++++++++++++++++------------------- client_tls_test.go | 15 +++++++++++++++ 2 files changed, 38 insertions(+), 19 deletions(-) diff --git a/broker.go b/broker.go index cda5da8f7..916dd0d6e 100644 --- a/broker.go +++ b/broker.go @@ -162,29 +162,11 @@ func (b *Broker) Open(conf *Config) error { atomic.StoreInt32(&b.opened, 0) return } - if conf.Net.TLS.Enable { - Logger.Printf("Using tls") - cfg := conf.Net.TLS.Config - if cfg == nil { - cfg = &tls.Config{} - } - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - // Gets the hostname as tls.DialWithDialer does it. - if cfg.ServerName == "" { - colonPos := strings.LastIndex(b.addr, ":") - if colonPos == -1 { - colonPos = len(b.addr) - } - hostname := b.addr[:colonPos] - cfg.ServerName = hostname - } - b.conn = tls.Client(b.conn, cfg) + b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config)) } b.conn = newBufConn(b.conn) - b.conf = conf // Create or reuse the global metrics shared between brokers @@ -1440,3 +1422,25 @@ func (b *Broker) registerCounter(name string) metrics.Counter { b.registeredMetrics = append(b.registeredMetrics, nameForBroker) return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry) } + +func validServerNameTLS(addr string, conf *tls.Config) *tls.Config { + cfg := conf + if cfg == nil { + cfg = &tls.Config{} + } + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + // Gets the hostname as tls.DialWithDialer does it. + if cfg.ServerName == "" { + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + // Make a copy to avoid polluting argument or default. + c := cfg.Clone() + c.ServerName = hostname + cfg = c + } + return cfg +} diff --git a/client_tls_test.go b/client_tls_test.go index 750145610..2d9d8fdd5 100644 --- a/client_tls_test.go +++ b/client_tls_test.go @@ -210,3 +210,18 @@ func doListenerTLSTest(t *testing.T, expectSuccess bool, serverConfig, clientCon } } } + +func TestSetServerName(t *testing.T) { + if validServerNameTLS("kafka-server.domain.com", nil).ServerName != "kafka-server.domain.com" { + t.Fatal("Expected kafka-server.domain.com as tls.ServerName when tls config is nil") + } + + if validServerNameTLS("kafka-server.domain.com", &tls.Config{}).ServerName != "kafka-server.domain.com" { + t.Fatal("Expected kafka-server.domain.com as tls.ServerName when tls config ServerName is not provided") + } + + c := &tls.Config{ServerName: "kafka-server-other.domain.com"} + if validServerNameTLS("", c).ServerName != "kafka-server-other.domain.com" { + t.Fatal("Expected kafka-server-other.domain.com as tls.ServerName when tls config ServerName is provided") + } +} From 1628dc1dbfa4b0676d77af43b8c0b9f17d7a59e0 Mon Sep 17 00:00:00 2001 From: Diego Alvarez Date: Thu, 14 May 2020 16:10:05 -0700 Subject: [PATCH 32/41] use net.SplitHostPort and don't separate conf var --- broker.go | 18 ++++++------------ client_tls_test.go | 8 ++++++-- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/broker.go b/broker.go index 916dd0d6e..e8bb2ed10 100644 --- a/broker.go +++ b/broker.go @@ -1423,23 +1423,17 @@ func (b *Broker) registerCounter(name string) metrics.Counter { return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry) } -func validServerNameTLS(addr string, conf *tls.Config) *tls.Config { - cfg := conf +func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config { if cfg == nil { cfg = &tls.Config{} } - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - // Gets the hostname as tls.DialWithDialer does it. if cfg.ServerName == "" { - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - // Make a copy to avoid polluting argument or default. c := cfg.Clone() - c.ServerName = hostname + sn, _, err := net.SplitHostPort(addr) + if err != nil { + Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err)) + } + c.ServerName = sn cfg = c } return cfg diff --git a/client_tls_test.go b/client_tls_test.go index 2d9d8fdd5..9731e44b7 100644 --- a/client_tls_test.go +++ b/client_tls_test.go @@ -212,11 +212,11 @@ func doListenerTLSTest(t *testing.T, expectSuccess bool, serverConfig, clientCon } func TestSetServerName(t *testing.T) { - if validServerNameTLS("kafka-server.domain.com", nil).ServerName != "kafka-server.domain.com" { + if validServerNameTLS("kafka-server.domain.com:9093", nil).ServerName != "kafka-server.domain.com" { t.Fatal("Expected kafka-server.domain.com as tls.ServerName when tls config is nil") } - if validServerNameTLS("kafka-server.domain.com", &tls.Config{}).ServerName != "kafka-server.domain.com" { + if validServerNameTLS("kafka-server.domain.com:9093", &tls.Config{}).ServerName != "kafka-server.domain.com" { t.Fatal("Expected kafka-server.domain.com as tls.ServerName when tls config ServerName is not provided") } @@ -224,4 +224,8 @@ func TestSetServerName(t *testing.T) { if validServerNameTLS("", c).ServerName != "kafka-server-other.domain.com" { t.Fatal("Expected kafka-server-other.domain.com as tls.ServerName when tls config ServerName is provided") } + + if validServerNameTLS("host-no-port", nil).ServerName != "" { + t.Fatal("Expected empty ServerName as the broker addr is missing the port") + } } From 5ff9581b104a24f3b99dfa18704ada932af74a01 Mon Sep 17 00:00:00 2001 From: Diego Alvarez Date: Thu, 14 May 2020 16:14:48 -0700 Subject: [PATCH 33/41] more go like code, returning earlier --- broker.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/broker.go b/broker.go index e8bb2ed10..232559aea 100644 --- a/broker.go +++ b/broker.go @@ -1427,14 +1427,15 @@ func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config { if cfg == nil { cfg = &tls.Config{} } - if cfg.ServerName == "" { - c := cfg.Clone() - sn, _, err := net.SplitHostPort(addr) - if err != nil { - Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err)) - } - c.ServerName = sn - cfg = c + if cfg.ServerName != "" { + return cfg + } + + c := cfg.Clone() + sn, _, err := net.SplitHostPort(addr) + if err != nil { + Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err)) } - return cfg + c.ServerName = sn + return c } From 97156e80365749a3263a84de568b112d9b02c986 Mon Sep 17 00:00:00 2001 From: Vlad Gorodetsky Date: Tue, 2 Jun 2020 09:57:21 +0300 Subject: [PATCH 34/41] Bump Go to version 1.14.3 --- dev.yml | 2 +- go.mod | 23 ++++++++++++----------- go.sum | 51 ++++++++++++++++++++++++++++----------------------- 3 files changed, 41 insertions(+), 35 deletions(-) diff --git a/dev.yml b/dev.yml index 57b2d3ca8..6468f1549 100644 --- a/dev.yml +++ b/dev.yml @@ -2,7 +2,7 @@ name: sarama up: - go: - version: '1.14' + version: '1.14.3' commands: test: diff --git a/go.mod b/go.mod index d3a93763a..ee12ba1ff 100644 --- a/go.mod +++ b/go.mod @@ -9,26 +9,27 @@ require ( github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 github.com/eapache/queue v1.1.0 github.com/fortytw2/leaktest v1.3.0 - github.com/frankban/quicktest v1.7.2 // indirect + github.com/frankban/quicktest v1.10.0 // indirect github.com/golang/snappy v0.0.1 // indirect - github.com/google/go-cmp v0.4.0 // indirect + github.com/google/go-cmp v0.4.1 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/jcmturner/gofork v1.0.0 - github.com/klauspost/compress v1.9.8 - github.com/kr/pretty v0.2.0 // indirect - github.com/pierrec/lz4 v2.4.1+incompatible - github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 - github.com/stretchr/testify v1.4.0 + github.com/klauspost/compress v1.10.7 + github.com/kr/text v0.2.0 // indirect + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/pierrec/lz4 v2.5.2+incompatible + github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 + github.com/stretchr/testify v1.6.0 github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/xdg/stringprep v1.0.0 // indirect - golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 // indirect - golang.org/x/net v0.0.0-20200202094626-16171245cfb2 + golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 // indirect + golang.org/x/net v0.0.0-20200528225125-3c3fba18258b golang.org/x/text v0.3.2 // indirect - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect gopkg.in/jcmturner/gokrb5.v7 v7.5.0 gopkg.in/jcmturner/rpc.v1 v1.1.0 // indirect - gopkg.in/yaml.v2 v2.2.8 // indirect + gopkg.in/yaml.v3 v3.0.0-20200601152816-913338de1bd2 // indirect ) diff --git a/go.sum b/go.sum index 06ec3280c..623aba373 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -11,49 +12,53 @@ github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= -github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/frankban/quicktest v1.10.0 h1:Gfh+GAJZOAoKZsIZeZbdn2JF10kN1XHNvjsvQK8gVkE= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= -github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg= -github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200528225125-3c3fba18258b h1:IYiJPiJfzktmDAO1HQiwjMjwjlYKHAL7KzeD544RJPs= +golang.org/x/net v0.0.0-20200528225125-3c3fba18258b/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= @@ -63,8 +68,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IV golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= @@ -75,7 +80,7 @@ gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlI gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200601152816-913338de1bd2 h1:VEmvx0P+GVTgkNu2EdTN988YCZPcD3lo9AoczZpucwc= +gopkg.in/yaml.v3 v3.0.0-20200601152816-913338de1bd2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 535a77dcfe1da3a09b38f3da5f8a5c05eb6bc949 Mon Sep 17 00:00:00 2001 From: Vlad Gorodetsky Date: Tue, 2 Jun 2020 09:57:39 +0300 Subject: [PATCH 35/41] Update supported kafka versions in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9b7478d7c..a3dd59056 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest stable releases of Kafka and Go, and we provide a two month grace period for older releases. This means we currently officially support -Go 1.12 through 1.14, and Kafka 2.1 through 2.4, although older releases are +Go 1.12 through 1.14, and Kafka 2.3 through 2.5, although older releases are still likely to work. Sarama follows semantic versioning and provides API stability via the gopkg.in service. From db2dbead927333dcc1b496bc8318f6585bc1d628 Mon Sep 17 00:00:00 2001 From: Vlad Gorodetsky Date: Tue, 2 Jun 2020 09:58:13 +0300 Subject: [PATCH 36/41] Update golangci-lint to v1.27.0 --- .github/workflows/ci.yml | 2 +- .golangci.yml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 116deb5e5..b5e890c88 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -46,7 +46,7 @@ jobs: - name: Install dependencies run: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.23.6 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.27.0 export REPOSITORY_ROOT=${GITHUB_WORKSPACE} vagrant/install_cluster.sh vagrant/boot_cluster.sh diff --git a/.golangci.yml b/.golangci.yml index 47624f3de..077ef653f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -72,3 +72,4 @@ issues: exclude: - consider giving a name to these results - include an explanation for nolint directive + - Potential Integer overflow made by strconv.Atoi result conversion to int16/32 From bb1b6ac12b3fe0aa63c937dd56dec1585e85bab1 Mon Sep 17 00:00:00 2001 From: Diego Alvarez Date: Wed, 17 Jun 2020 16:05:31 -0700 Subject: [PATCH 37/41] include zstd on the functional tests --- functional_producer_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/functional_producer_test.go b/functional_producer_test.go index e589a8eb8..b6bac585b 100644 --- a/functional_producer_test.go +++ b/functional_producer_test.go @@ -31,6 +31,12 @@ func TestFuncProducingSnappy(t *testing.T) { testProducingMessages(t, config) } +func TestFuncProducingZstd(t *testing.T) { + config := NewConfig() + config.Producer.Compression = CompressionZSTD + testProducingMessages(t, config) +} + func TestFuncProducingNoResponse(t *testing.T) { config := NewConfig() config.Producer.RequiredAcks = NoResponse From 0af1b4d7562155751a841806bf07e9397afe6a23 Mon Sep 17 00:00:00 2001 From: Diego Alvarez Date: Thu, 18 Jun 2020 09:36:40 -0700 Subject: [PATCH 38/41] zstd needs client >= 2.1.0.0 --- functional_producer_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/functional_producer_test.go b/functional_producer_test.go index b6bac585b..9e153b0d9 100644 --- a/functional_producer_test.go +++ b/functional_producer_test.go @@ -33,6 +33,7 @@ func TestFuncProducingSnappy(t *testing.T) { func TestFuncProducingZstd(t *testing.T) { config := NewConfig() + config.Version = V2_1_0_0 config.Producer.Compression = CompressionZSTD testProducingMessages(t, config) } From cfd5999fa195c5c7ae785e533166b06947bae7ce Mon Sep 17 00:00:00 2001 From: Konstantinos Tsanaktsidis Date: Wed, 24 Jun 2020 13:51:24 +1000 Subject: [PATCH 39/41] Remove redundant hard-coding of env.KafkaVersion --- functional_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/functional_test.go b/functional_test.go index 2391725a4..b61a43daf 100644 --- a/functional_test.go +++ b/functional_test.go @@ -153,11 +153,6 @@ func prepareDockerTestEnvironment(ctx context.Context, env *testEnvironment) err env.KafkaBrokerAddrs = append(env.KafkaBrokerAddrs, fmt.Sprintf("127.0.0.1:%d", 29090+i)) } - // the mapping of confluent platform docker image vesions -> kafka versions can be - // found here: https://docs.confluent.io/current/installation/versions-interoperability.html - // We have cp-5.5.0 in the docker-compose file, so that's kafka 2.5.0. - env.KafkaVersion = "2.5.0" - // Wait for the kafka broker to come up allBrokersUp := false for i := 0; i < 45 && !allBrokersUp; i++ { From cb9d1e8b85290f42c25bc72d120397ccfec3500a Mon Sep 17 00:00:00 2001 From: Wim Claeys Date: Wed, 24 Jun 2020 09:04:25 +0100 Subject: [PATCH 40/41] Add support for manual commit to ConsumerGroup - expose a `Commit()` sync method on ConsumerGroupSession - don't create mainLoop in OffsetManager unless AutoCommit is enabled --- consumer_group.go | 9 +++++++ offset_manager.go | 27 ++++++++++++-------- offset_manager_test.go | 58 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 84 insertions(+), 10 deletions(-) diff --git a/consumer_group.go b/consumer_group.go index 056b9e387..aae6599ca 100644 --- a/consumer_group.go +++ b/consumer_group.go @@ -513,6 +513,11 @@ type ConsumerGroupSession interface { // message twice, and your processing should ideally be idempotent. MarkOffset(topic string, partition int32, offset int64, metadata string) + // Commit the offset to the backend + // + // Note: calling Commit performs a blocking synchronous operation. + Commit() + // ResetOffset resets to the provided offset, alongside a metadata string that // represents the state of the partition consumer at that point in time. Reset // acts as a counterpart to MarkOffset, the difference being that it allows to @@ -624,6 +629,10 @@ func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset } } +func (s *consumerGroupSession) Commit() { + s.offsets.Commit() +} + func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { if pom := s.offsets.findPOM(topic, partition); pom != nil { pom.ResetOffset(offset, metadata) diff --git a/offset_manager.go b/offset_manager.go index 19408729f..b4fea8226 100644 --- a/offset_manager.go +++ b/offset_manager.go @@ -19,6 +19,10 @@ type OffsetManager interface { // will otherwise leak memory. You must call this after all the // PartitionOffsetManagers are closed. Close() error + + // Commit commits the offsets. This method can be used if AutoCommit.Enable is + // set to false. + Commit() } type offsetManager struct { @@ -58,7 +62,6 @@ func newOffsetManagerFromClient(group, memberID string, generation int32, client client: client, conf: conf, group: group, - ticker: time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval), poms: make(map[string]map[int32]*partitionOffsetManager), memberID: memberID, @@ -67,7 +70,10 @@ func newOffsetManagerFromClient(group, memberID string, generation int32, client closing: make(chan none), closed: make(chan none), } - go withRecover(om.mainLoop) + if conf.Consumer.Offsets.AutoCommit.Enable { + om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval) + go withRecover(om.mainLoop) + } return om, nil } @@ -99,7 +105,9 @@ func (om *offsetManager) Close() error { om.closeOnce.Do(func() { // exit the mainLoop close(om.closing) - <-om.closed + if om.conf.Consumer.Offsets.AutoCommit.Enable { + <-om.closed + } // mark all POMs as closed om.asyncClosePOMs() @@ -225,20 +233,19 @@ func (om *offsetManager) mainLoop() { for { select { case <-om.ticker.C: - om.flushToBroker() - om.releasePOMs(false) + om.Commit() case <-om.closing: return } } } -// flushToBroker is ignored if auto-commit offsets is disabled -func (om *offsetManager) flushToBroker() { - if !om.conf.Consumer.Offsets.AutoCommit.Enable { - return - } +func (om *offsetManager) Commit() { + om.flushToBroker() + om.releasePOMs(false) +} +func (om *offsetManager) flushToBroker() { req := om.constructRequest() if req == nil { return diff --git a/offset_manager_test.go b/offset_manager_test.go index f1baa9cdb..5aa2ee0ff 100644 --- a/offset_manager_test.go +++ b/offset_manager_test.go @@ -169,6 +169,64 @@ func TestNewOffsetManagerOffsetsAutoCommit(t *testing.T) { } } +func TestNewOffsetManagerOffsetsManualCommit(t *testing.T) { + // Tests to validate configuration when `Consumer.Offsets.AutoCommit.Enable` is false + config := NewConfig() + config.Consumer.Offsets.AutoCommit.Enable = false + + om, testClient, broker, coordinator := initOffsetManagerWithBackoffFunc(t, 0, nil, config) + pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") + + // Wait long enough for the test not to fail.. + timeout := 50 * config.Consumer.Offsets.AutoCommit.Interval + + ocResponse := new(OffsetCommitResponse) + ocResponse.AddError("my_topic", 0, ErrNoError) + called := make(chan none) + handler := func(req *request) (res encoderWithHeader) { + close(called) + return ocResponse + } + coordinator.setHandler(handler) + + // Should not trigger an auto-commit + expected := int64(1) + pom.ResetOffset(expected, "modified_meta") + _, _ = pom.NextOffset() + + select { + case <-called: + // OffsetManager called on the wire. + t.Errorf("Received request when AutoCommit is disabled") + case <-time.After(timeout): + // Timeout waiting for OffsetManager to call on the wire. + // OK + } + + // Setup again to test manual commit + called = make(chan none) + + om.Commit() + + select { + case <-called: + // OffsetManager called on the wire. + // OK + case <-time.After(timeout): + // Timeout waiting for OffsetManager to call on the wire. + t.Errorf("No request received for after waiting for %v", timeout) + } + + // Close up + broker.Close() + coordinator.Close() + + // !! om must be closed before the pom so pom.release() is called before pom.Close() + safeClose(t, om) + safeClose(t, pom) + safeClose(t, testClient) +} + // Test recovery from ErrNotCoordinatorForConsumer // on first fetchInitialOffset call func TestOffsetManagerFetchInitialFail(t *testing.T) { From 5933302b8bc4b51c43f1bc2531146d59dc88a91d Mon Sep 17 00:00:00 2001 From: Varun Date: Mon, 6 Jul 2020 21:26:47 -0500 Subject: [PATCH 41/41] updated zstd dependency to latest (#1741) --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index ee12ba1ff..d775035ce 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/google/go-cmp v0.4.1 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/jcmturner/gofork v1.0.0 - github.com/klauspost/compress v1.10.7 + github.com/klauspost/compress v1.10.10 github.com/kr/text v0.2.0 // indirect github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect github.com/pierrec/lz4 v2.5.2+incompatible diff --git a/go.sum b/go.sum index 623aba373..ad0889d49 100644 --- a/go.sum +++ b/go.sum @@ -26,6 +26,8 @@ github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/klauspost/compress v1.10.7 h1:7rix8v8GpI3ZBb0nSozFRgbtXKv+hOe+qfEpZqybrAg= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I= +github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=