Skip to content

Commit

Permalink
Merge branch 'master' into clientQuotasApi
Browse files Browse the repository at this point in the history
  • Loading branch information
bai authored Sep 13, 2021
2 parents 5ce40d4 + 5f7f224 commit e1354e2
Show file tree
Hide file tree
Showing 31 changed files with 715 additions and 40 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ jobs:
strategy:
fail-fast: false
matrix:
go-version: [1.15.x, 1.16.x]
go-version: [1.16.x, 1.17.x]
kafka-version: [2.7.1, 2.8.0]
platform: [ubuntu-latest]

Expand All @@ -34,7 +34,7 @@ jobs:
- name: Install dependencies
run: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.37.1
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.42.1
export REPOSITORY_ROOT=${GITHUB_WORKSPACE}
- name: Run test suite
Expand Down
4 changes: 3 additions & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ linters:
- bodyclose
- deadcode
- depguard
- exportloopref
- dogsled
# - dupl
- errcheck
Expand All @@ -55,8 +56,9 @@ linters:
# - gosimple
- govet
# - ineffassign
# - misspell
- misspell
# - nakedret
- nilerr
# - scopelint
- staticcheck
- structcheck
Expand Down
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@

#### Unreleased

# Improvements

- #1912 - @faillefer Support for --delete-offsets for consumer group topic

#### Version 1.28.0 (2021-02-15)

**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.**
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ You might also want to look at the [Frequently Asked Questions](https://github.c
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
the two latest stable releases of Kafka and Go, and we provide a two month
grace period for older releases. This means we currently officially support
Go 1.15 through 1.16, and Kafka 2.6 through 2.8, although older releases are
Go 1.15 through 1.16, and Kafka 2.7 through 2.8, although older releases are
still likely to work.

Sarama follows semantic versioning and provides API stability via the gopkg.in service.
Expand Down
31 changes: 31 additions & 0 deletions admin.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,9 @@ type ClusterAdmin interface {
// List the consumer group offsets available in the cluster.
ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)

// Deletes a consumer group offset
DeleteConsumerGroupOffset(group string, topic string, partition int32) error

// Delete a consumer group.
DeleteConsumerGroup(group string) error

Expand Down Expand Up @@ -883,6 +886,34 @@ func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions m
return coordinator.FetchOffset(request)
}

func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, partition int32) error {
coordinator, err := ca.client.Coordinator(group)
if err != nil {
return err
}

request := &DeleteOffsetsRequest{
Group: group,
partitions: map[string][]int32{
topic: {partition},
},
}

resp, err := coordinator.DeleteOffsets(request)
if err != nil {
return err
}

if resp.ErrorCode != ErrNoError {
return resp.ErrorCode
}

if resp.Errors[topic][partition] != ErrNoError {
return resp.Errors[topic][partition]
}
return nil
}

func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
coordinator, err := ca.client.Coordinator(group)
if err != nil {
Expand Down
46 changes: 46 additions & 0 deletions admin_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1326,6 +1326,52 @@ func TestDeleteConsumerGroup(t *testing.T) {
}
}

func TestDeleteOffset(t *testing.T) {
seedBroker := NewMockBroker(t, 1)
defer seedBroker.Close()

group := "group-delete-offset"
topic := "topic-delete-offset"
partition := int32(0)

handlerMap := map[string]MockResponse{
"MetadataRequest": NewMockMetadataResponse(t).
SetController(seedBroker.BrokerID()).
SetBroker(seedBroker.Addr(), seedBroker.BrokerID()),
"FindCoordinatorRequest": NewMockFindCoordinatorResponse(t).SetCoordinator(CoordinatorGroup, group, seedBroker),
}
seedBroker.SetHandlerByMap(handlerMap)

config := NewTestConfig()
config.Version = V2_4_0_0

admin, err := NewClusterAdmin([]string{seedBroker.Addr()}, config)
if err != nil {
t.Fatal(err)
}

// Test NoError
handlerMap["DeleteOffsetsRequest"] = NewMockDeleteOffsetRequest(t).SetDeletedOffset(ErrNoError, topic, partition, ErrNoError)
err = admin.DeleteConsumerGroupOffset(group, topic, partition)
if err != nil {
t.Fatalf("DeleteConsumerGroupOffset failed with error %v", err)
}

// Test Error
handlerMap["DeleteOffsetsRequest"] = NewMockDeleteOffsetRequest(t).SetDeletedOffset(ErrNotCoordinatorForConsumer, topic, partition, ErrNoError)
err = admin.DeleteConsumerGroupOffset(group, topic, partition)
if err != ErrNotCoordinatorForConsumer {
t.Fatalf("DeleteConsumerGroupOffset should have failed with error %v", ErrNotCoordinatorForConsumer)
}

// Test Error for partition
handlerMap["DeleteOffsetsRequest"] = NewMockDeleteOffsetRequest(t).SetDeletedOffset(ErrNoError, topic, partition, ErrGroupSubscribedToTopic)
err = admin.DeleteConsumerGroupOffset(group, topic, partition)
if err != ErrGroupSubscribedToTopic {
t.Fatalf("DeleteConsumerGroupOffset should have failed with error %v", ErrGroupSubscribedToTopic)
}
}

// TestRefreshMetaDataWithDifferentController ensures that the cached
// controller can be forcibly updated from Metadata by the admin client
func TestRefreshMetaDataWithDifferentController(t *testing.T) {
Expand Down
4 changes: 2 additions & 2 deletions alter_configs_response.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,12 @@ func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error {
pe.putInt16(a.ErrorCode)
err := pe.putString(a.ErrorMsg)
if err != nil {
return nil
return err
}
pe.putInt8(int8(a.Type))
err = pe.putString(a.Name)
if err != nil {
return nil
return err
}
return nil
}
Expand Down
9 changes: 5 additions & 4 deletions async_producer.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ import (
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
// and parses responses for errors. You must read from the Errors() channel or the
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
// leaks: it will not be garbage-collected automatically when it passes out of
// scope.
// leaks and message lost: it will not be garbage-collected automatically when it passes
// out of scope and buffered messages may not be flushed.
type AsyncProducer interface {

// AsyncClose triggers a shutdown of the producer. The shutdown has completed
Expand All @@ -26,7 +26,8 @@ type AsyncProducer interface {

// Close shuts down the producer and waits for any buffered messages to be
// flushed. You must call this function before a producer object passes out of
// scope, as it may otherwise leak memory. You must call this before calling
// scope, as it may otherwise leak memory. You must call this before process
// shutting down, or you may lose messages. You must call this before calling
// Close on the underlying client.
Close() error

Expand Down Expand Up @@ -206,7 +207,7 @@ type ProducerMessage struct {
// Partition is the partition that the message was sent to. This is only
// guaranteed to be defined if the message was successfully delivered.
Partition int32
// Timestamp can vary in behaviour depending on broker configuration, being
// Timestamp can vary in behavior depending on broker configuration, being
// in either one of the CreateTime or LogAppendTime modes (default CreateTime),
// and requiring version at least 0.10.0.
//
Expand Down
79 changes: 77 additions & 2 deletions broker.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ func (b *Broker) Open(conf *Config) error {
b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry)
// Do not gather metrics for seeded broker (only used during bootstrap) because they share
// the same id (-1) and are already exposed through the global metrics above
if b.id >= 0 {
if b.id >= 0 && !metrics.UseNilMetrics {
b.registerMetrics()
}

Expand Down Expand Up @@ -689,6 +689,17 @@ func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsRespon
return response, nil
}

// DeleteOffsets sends a request to delete group offsets and returns a response or error
func (b *Broker) DeleteOffsets(request *DeleteOffsetsRequest) (*DeleteOffsetsResponse, error) {
response := new(DeleteOffsetsResponse)

if err := b.sendAndReceive(request, response); err != nil {
return nil, err
}

return response, nil
}

// DescribeLogDirs sends a request to get the broker's log dir paths and sizes
func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) {
response := new(DescribeLogDirsResponse)
Expand Down Expand Up @@ -965,7 +976,7 @@ func (b *Broker) authenticateViaSASL() error {
case SASLTypeOAuth:
return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider)
case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
return b.sendAndReceiveSASLSCRAMv1()
return b.sendAndReceiveSASLSCRAM()
case SASLTypeGSSAPI:
return b.sendAndReceiveKerberos()
default:
Expand Down Expand Up @@ -1204,6 +1215,70 @@ func (b *Broker) sendClientMessage(message []byte) (bool, error) {
return isChallenge, err
}

func (b *Broker) sendAndReceiveSASLSCRAM() error {
if b.conf.Net.SASL.Version == SASLHandshakeV0 {
return b.sendAndReceiveSASLSCRAMv0()
}
return b.sendAndReceiveSASLSCRAMv1()
}

func (b *Broker) sendAndReceiveSASLSCRAMv0() error {
if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV0); err != nil {
return err
}

scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc()
if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error())
}

msg, err := scramClient.Step("")
if err != nil {
return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error())
}

for !scramClient.Done() {
requestTime := time.Now()
// Will be decremented in updateIncomingCommunicationMetrics (except error)
b.addRequestInFlightMetrics(1)
length := len(msg)
authBytes := make([]byte, length+4) //4 byte length header + auth data
binary.BigEndian.PutUint32(authBytes, uint32(length))
copy(authBytes[4:], []byte(msg))
_, err := b.write(authBytes)
b.updateOutgoingCommunicationMetrics(length + 4)
if err != nil {
b.addRequestInFlightMetrics(-1)
Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
return err
}
b.correlationID++
header := make([]byte, 4)
_, err = b.readFull(header)
if err != nil {
b.addRequestInFlightMetrics(-1)
Logger.Printf("Failed to read response header while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
return err
}
payload := make([]byte, int32(binary.BigEndian.Uint32(header)))
n, err := b.readFull(payload)
if err != nil {
b.addRequestInFlightMetrics(-1)
Logger.Printf("Failed to read response payload while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
return err
}
b.updateIncomingCommunicationMetrics(n+4, time.Since(requestTime))
msg, err = scramClient.Step(string(payload))
if err != nil {
Logger.Println("SASL authentication failed", err)
return err
}
}

Logger.Println("SASL authentication succeeded")
return nil
}

func (b *Broker) sendAndReceiveSASLSCRAMv1() error {
if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil {
return err
Expand Down
51 changes: 51 additions & 0 deletions broker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -359,9 +359,11 @@ func TestSASLSCRAMSHAXXX(t *testing.T) {

conf := NewTestConfig()
conf.Net.SASL.Mechanism = SASLTypeSCRAMSHA512
conf.Net.SASL.Version = SASLHandshakeV1
conf.Net.SASL.SCRAMClientGeneratorFunc = func() SCRAMClient { return test.scramClient }

broker.conf = conf
broker.conf.Version = V1_0_0_0
dialer := net.Dialer{
Timeout: conf.Net.DialTimeout,
KeepAlive: conf.Net.KeepAlive,
Expand Down Expand Up @@ -1009,6 +1011,22 @@ var brokerTestTable = []struct {
}
},
},

{
V2_4_0_0,
"DeleteOffsetsRequest",
[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
func(t *testing.T, broker *Broker) {
request := DeleteOffsetsRequest{}
response, err := broker.DeleteOffsets(&request)
if err != nil {
t.Error(err)
}
if response == nil {
t.Error("DeleteGroups request got no response!")
}
},
},
}

func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics brokerMetrics) {
Expand Down Expand Up @@ -1041,3 +1059,36 @@ func validateBrokerMetrics(t *testing.T, broker *Broker, mockBrokerMetrics broke
// Run the validators
metricValidators.run(t, broker.conf.MetricRegistry)
}

func BenchmarkBroker_Open(b *testing.B) {
mb := NewMockBroker(nil, 0)
broker := NewBroker(mb.Addr())
// Set the broker id in order to validate local broker metrics
broker.id = 0
metrics.UseNilMetrics = false
conf := NewTestConfig()
conf.Version = V1_0_0_0
for i := 0; i < b.N; i++ {
err := broker.Open(conf)
if err != nil {
b.Fatal(err)
}
broker.Close()
}
}

func BenchmarkBroker_No_Metrics_Open(b *testing.B) {
mb := NewMockBroker(nil, 0)
broker := NewBroker(mb.Addr())
broker.id = 0
metrics.UseNilMetrics = true
conf := NewTestConfig()
conf.Version = V1_0_0_0
for i := 0; i < b.N; i++ {
err := broker.Open(conf)
if err != nil {
b.Fatal(err)
}
broker.Close()
}
}
4 changes: 2 additions & 2 deletions config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func TestNetConfigValidates(t *testing.T) {
cfg.Net.SASL.Mechanism = SASLTypeSCRAMSHA256
cfg.Net.SASL.SCRAMClientGeneratorFunc = nil
cfg.Net.SASL.User = "user"
cfg.Net.SASL.Password = "stong_password"
cfg.Net.SASL.Password = "strong_password"
},
"A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc",
},
Expand All @@ -126,7 +126,7 @@ func TestNetConfigValidates(t *testing.T) {
cfg.Net.SASL.Mechanism = SASLTypeSCRAMSHA512
cfg.Net.SASL.SCRAMClientGeneratorFunc = nil
cfg.Net.SASL.User = "user"
cfg.Net.SASL.Password = "stong_password"
cfg.Net.SASL.Password = "strong_password"
},
"A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc",
},
Expand Down
Loading

0 comments on commit e1354e2

Please sign in to comment.