本文介绍如何使用Go SDK通过接入点接入云消息队列 Kafka 版并收发消息。
环境准备
您已安装Go。更多信息,请参见安装Go。
该kafka-confluent-go-demo不支持Windows系统。
准备配置
可选:下载SSL根证书。如果是SSL接入点,需下载该证书。
访问aliware-kafka-demos,单击图标,然后在下拉框选择Download ZIP,下载Demo工程并解压。
在解压的Demo工程中,找到kafka-confluent-go-demo文件夹,将此文件夹上传在Linux系统的/home路径下。
登录Linux系统,进入/home/kafka-confluent-go-demo路径,修改配置文件conf/kafka.json。
{ "topic": "XXX", "group.id": "XXX", "bootstrap.servers" : "XXX:XX,XXX:XX,XXX:XX", "security.protocol" : "plaintext", "sasl.mechanism" : "XXX", "sasl.username" : "XXX", "sasl.password" : "XXX" }
参数
描述
topic
实例的Topic名称。您可在云消息队列 Kafka 版控制台的Topic 管理页面获取。
group.id
实例的Group。您可在云消息队列 Kafka 版控制台的Group 管理页面获取。
说明如果应用运行producer.go发送消息,该参数可以不配置;如果应用运行consumer.go订阅消息,该参数必须配置。
bootstrap.servers
SSL接入点的IP地址以及端口。您可在云消息队列 Kafka 版控制台的实例详情页面的接入点信息区域获取。
security.protocol
SASL用户认证协议,默认为plaintext。各类型接入点对应取值如下:
默认接入点:plaintext。
SSL接入点:sasl_ssl。
SASL接入点:sasl_plaintext。
sasl.mechanism
消息收发的机制。各类型接入点对应取值如下:
默认接入点:不涉及,无需配置。
SSL接入点:PLAIN。
SASL接入点:PLAIN机制需配置为PLAIN;SCRAM机制需配置为SCRAM-SHA-256。
sasl.username
SASL用户名。如果是SSL接入点或SASL接入点,需配置该参数。
说明- 如果实例未开启ACL,您可以在云消息队列 Kafka 版控制台的实例详情页面的配置信息区域获取默认的用户名和密码。
- 如果实例已开启ACL,请确保要使用的SASL用户已被授予向云消息队列 Kafka 版实例收发消息的权限。具体操作,请参见SASL用户授权。
sasl.password
SASL用户密码。如果是SSL接入点或SASL接入点,需配置该参数。
发送消息
执行以下命令运行producer.go发送消息。
go run -mod=vendor producer/producer.go
消息程序producer.go示例代码如下:
package main
import (
"encoding/json"
"fmt"
"github.com/confluentinc/confluent-kafka-go/kafka"
"os"
"path/filepath"
)
type KafkaConfig struct {
Topic string `json:"topic"`
GroupId string `json:"group.id"`
BootstrapServers string `json:"bootstrap.servers"`
SecurityProtocol string `json:"security.protocol"`
SslCaLocation string `json:"ssl.ca.location"`
SaslMechanism string `json:"sasl.mechanism"`
SaslUsername string `json:"sasl.username"`
SaslPassword string `json:"sasl.password"`
}
// config should be a pointer to structure, if not, panic
func loadJsonConfig() *KafkaConfig {
workPath, err := os.Getwd()
if err != nil {
panic(err)
}
configPath := filepath.Join(workPath, "conf")
fullPath := filepath.Join(configPath, "kafka.json")
file, err := os.Open(fullPath);
if (err != nil) {
msg := fmt.Sprintf("Can not load config at %s. Error: %v", fullPath, err)
panic(msg)
}
defer file.Close()
decoder := json.NewDecoder(file)
var config = &KafkaConfig{}
err = decoder.Decode(config);
if (err != nil) {
msg := fmt.Sprintf("Decode json fail for config file at %s. Error: %v", fullPath, err)
panic(msg)
}
json.Marshal(config)
return config
}
func doInitProducer(cfg *KafkaConfig) *kafka.Producer {
fmt.Print("init kafka producer, it may take a few seconds to init the connection\n")
//common arguments
var kafkaconf = &kafka.ConfigMap{
"api.version.request": "true",
"message.max.bytes": 1000000,
"linger.ms": 10,
"retries": 30,
"retry.backoff.ms": 1000,
"acks": "1"}
kafkaconf.SetKey("bootstrap.servers", cfg.BootstrapServers)
switch cfg.SecurityProtocol {
case "plaintext" :
kafkaconf.SetKey("security.protocol", "plaintext");
case "sasl_ssl":
kafkaconf.SetKey("security.protocol", "sasl_ssl");
kafkaconf.SetKey("ssl.ca.location", "conf/ca-cert.pem");
kafkaconf.SetKey("sasl.username", cfg.SaslUsername);
kafkaconf.SetKey("sasl.password", cfg.SaslPassword);
kafkaconf.SetKey("sasl.mechanism", cfg.SaslMechanism);
kafkaconf.SetKey("enable.ssl.certificate.verification", "false");
kafkaconf.SetKey("ssl.endpoint.identification.algorithm", "None")
case "sasl_plaintext":
kafkaconf.SetKey("sasl.mechanism", "PLAIN")
kafkaconf.SetKey("security.protocol", "sasl_plaintext");
kafkaconf.SetKey("sasl.username", cfg.SaslUsername);
kafkaconf.SetKey("sasl.password", cfg.SaslPassword);
kafkaconf.SetKey("sasl.mechanism", cfg.SaslMechanism)
default:
panic(kafka.NewError(kafka.ErrUnknownProtocol, "unknown protocol", true))
}
producer, err := kafka.NewProducer(kafkaconf)
if err != nil {
panic(err)
}
fmt.Print("init kafka producer success\n")
return producer;
}
func main() {
// Choose the correct protocol
// 9092 for PLAINTEXT
// 9093 for SASL_SSL, need to provide sasl.username and sasl.password
// 9094 for SASL_PLAINTEXT, need to provide sasl.username and sasl.password
cfg := loadJsonConfig();
producer := doInitProducer(cfg)
defer producer.Close()
// Delivery report handler for produced messages
go func() {
for e := range producer.Events() {
switch ev := e.(type) {
case *kafka.Message:
if ev.TopicPartition.Error != nil {
fmt.Printf("Delivery failed: %v\n", ev.TopicPartition)
} else {
fmt.Printf("Delivered message to %v\n", ev.TopicPartition)
}
}
}
}()
// Produce messages to topic (asynchronously)
topic := cfg.Topic
for _, word := range []string{"Welcome", "to", "the", "Confluent", "Kafka", "Golang", "client"} {
producer.Produce(&kafka.Message{
TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny},
Value: []byte(word),
}, nil)
}
// Wait for message deliveries before shutting down
producer.Flush(15 * 1000)
}
订阅消息
执行以下命令运行consumer.go订阅消息。
go run -mod=vendor consumer/consumer.go
消息程序consumer.go示例代码如下:
package main
import (
"encoding/json"
"fmt"
"github.com/confluentinc/confluent-kafka-go/kafka"
"os"
"path/filepath"
)
type KafkaConfig struct {
Topic string `json:"topic"`
GroupId string `json:"group.id"`
BootstrapServers string `json:"bootstrap.servers"`
SecurityProtocol string `json:"security.protocol"`
SaslMechanism string `json:"sasl.mechanism"`
SaslUsername string `json:"sasl.username"`
SaslPassword string `json:"sasl.password"`
}
// config should be a pointer to structure, if not, panic
func loadJsonConfig() *KafkaConfig {
workPath, err := os.Getwd()
if err != nil {
panic(err)
}
configPath := filepath.Join(workPath, "conf")
fullPath := filepath.Join(configPath, "kafka.json")
file, err := os.Open(fullPath);
if (err != nil) {
msg := fmt.Sprintf("Can not load config at %s. Error: %v", fullPath, err)
panic(msg)
}
defer file.Close()
decoder := json.NewDecoder(file)
var config = &KafkaConfig{}
err = decoder.Decode(config);
if (err != nil) {
msg := fmt.Sprintf("Decode json fail for config file at %s. Error: %v", fullPath, err)
panic(msg)
}
json.Marshal(config)
return config
}
func doInitConsumer(cfg *KafkaConfig) *kafka.Consumer {
fmt.Print("init kafka consumer, it may take a few seconds to init the connection\n")
//common arguments
var kafkaconf = &kafka.ConfigMap{
"api.version.request": "true",
"auto.offset.reset": "latest",
"heartbeat.interval.ms": 3000,
"session.timeout.ms": 30000,
"max.poll.interval.ms": 120000,
"fetch.max.bytes": 1024000,
"max.partition.fetch.bytes": 256000}
kafkaconf.SetKey("bootstrap.servers", cfg.BootstrapServers);
kafkaconf.SetKey("group.id", cfg.GroupId)
switch cfg.SecurityProtocol {
case "plaintext" :
kafkaconf.SetKey("security.protocol", "plaintext");
case "sasl_ssl":
kafkaconf.SetKey("security.protocol", "sasl_ssl");
kafkaconf.SetKey("ssl.ca.location", "./conf/ca-cert.pem");
kafkaconf.SetKey("sasl.username", cfg.SaslUsername);
kafkaconf.SetKey("sasl.password", cfg.SaslPassword);
kafkaconf.SetKey("sasl.mechanism", cfg.SaslMechanism);
kafkaconf.SetKey("ssl.endpoint.identification.algorithm", "None");
kafkaconf.SetKey("enable.ssl.certificate.verification", "false")
case "sasl_plaintext":
kafkaconf.SetKey("security.protocol", "sasl_plaintext");
kafkaconf.SetKey("sasl.username", cfg.SaslUsername);
kafkaconf.SetKey("sasl.password", cfg.SaslPassword);
kafkaconf.SetKey("sasl.mechanism", cfg.SaslMechanism)
default:
panic(kafka.NewError(kafka.ErrUnknownProtocol, "unknown protocol", true))
}
consumer, err := kafka.NewConsumer(kafkaconf)
if err != nil {
panic(err)
}
fmt.Print("init kafka consumer success\n")
return consumer;
}
func main() {
// Choose the correct protocol
// 9092 for PLAINTEXT
// 9093 for SASL_SSL, need to provide sasl.username and sasl.password
// 9094 for SASL_PLAINTEXT, need to provide sasl.username and sasl.password
cfg := loadJsonConfig();
consumer := doInitConsumer(cfg)
consumer.SubscribeTopics([]string{cfg.Topic}, nil)
for {
msg, err := consumer.ReadMessage(-1)
if err == nil {
fmt.Printf("Message on %s: %s\n", msg.TopicPartition, string(msg.Value))
} else {
// The client will automatically try to recover from all errors.
fmt.Printf("Consumer error: %v (%v)\n", err, msg)
}
}
consumer.Close()
}