2023-07-03 06:06:13 +00:00
|
|
|
package kafconn
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"crypto/tls"
|
|
|
|
"crypto/x509"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"os"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/abh/certman"
|
|
|
|
"github.com/segmentio/kafka-go"
|
2023-07-23 06:47:37 +00:00
|
|
|
"go.ntppool.org/common/logger"
|
2023-07-03 06:06:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// bootstrapAddress = "logs-kafka-bootstrap:9093"
|
|
|
|
bootstrapAddress = "logs-kafka-0.logs-kafka-brokers.ntppipeline.svc.cluster.local:9093"
|
|
|
|
kafkaGroup = "logferry-api"
|
|
|
|
|
|
|
|
// kafkaMaxBatchSize = 250000
|
|
|
|
// kafkaMinBatchSize = 1000
|
|
|
|
)
|
|
|
|
|
|
|
|
type TLSSetup struct {
|
|
|
|
CA string
|
|
|
|
Key string
|
|
|
|
Cert string
|
|
|
|
}
|
|
|
|
|
|
|
|
type Kafka struct {
|
|
|
|
tls TLSSetup
|
|
|
|
|
|
|
|
transport *kafka.Transport
|
|
|
|
brokers []kafka.Broker
|
|
|
|
|
|
|
|
dialer *kafka.Dialer
|
|
|
|
conn *kafka.Conn
|
|
|
|
|
|
|
|
l *log.Logger
|
|
|
|
|
|
|
|
// wr *kafka.Writer
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *Kafka) tlsConfig() (*tls.Config, error) {
|
|
|
|
|
|
|
|
cm, err := certman.New(k.tls.Cert, k.tls.Key)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cm.Logger(k.l)
|
|
|
|
|
|
|
|
err = cm.Watch()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
capool, err := k.caPool()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsConfig := &tls.Config{
|
|
|
|
InsecureSkipVerify: true,
|
|
|
|
// MinVersion: tls.VersionTLS12,
|
|
|
|
RootCAs: capool,
|
|
|
|
GetClientCertificate: cm.GetClientCertificate,
|
|
|
|
}
|
|
|
|
return tlsConfig, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *Kafka) caPool() (*x509.CertPool, error) {
|
|
|
|
capool := x509.NewCertPool()
|
|
|
|
|
|
|
|
pem, err := os.ReadFile(k.tls.CA)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !capool.AppendCertsFromPEM(pem) {
|
|
|
|
return nil, errors.New("credentials: failed to append certificates")
|
|
|
|
}
|
|
|
|
|
|
|
|
return capool, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *Kafka) kafkaDialer() (*kafka.Dialer, error) {
|
|
|
|
tlsConfig, err := k.tlsConfig()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
dialer := &kafka.Dialer{
|
|
|
|
Timeout: 10 * time.Second,
|
|
|
|
DualStack: false,
|
|
|
|
TLS: tlsConfig,
|
|
|
|
}
|
|
|
|
|
|
|
|
return dialer, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *Kafka) kafkaTransport(ctx context.Context) (*kafka.Transport, error) {
|
|
|
|
tlsConfig, err := k.tlsConfig()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
transport := &kafka.Transport{
|
|
|
|
DialTimeout: 10 * time.Second,
|
|
|
|
IdleTimeout: 60 * time.Second,
|
|
|
|
TLS: tlsConfig,
|
|
|
|
Context: ctx,
|
|
|
|
}
|
|
|
|
|
|
|
|
return transport, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewKafka(ctx context.Context, tls TLSSetup) (*Kafka, error) {
|
2023-07-03 06:22:58 +00:00
|
|
|
l := log.New(os.Stdout, "kafka: ", log.Ldate|log.Ltime|log.LUTC|log.Lmsgprefix|log.Lmicroseconds)
|
2023-07-03 06:06:13 +00:00
|
|
|
|
|
|
|
k := &Kafka{
|
|
|
|
l: l,
|
|
|
|
tls: tls,
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(k.tls.CA) == 0 || len(k.tls.Cert) == 0 {
|
|
|
|
return nil, fmt.Errorf("tls setup missing")
|
|
|
|
}
|
|
|
|
|
|
|
|
dialer, err := k.kafkaDialer()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
k.dialer = dialer
|
|
|
|
|
|
|
|
transport, err := k.kafkaTransport(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
k.transport = transport
|
|
|
|
|
|
|
|
conn, err := dialer.DialContext(ctx, "tcp", bootstrapAddress)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("kafka conn error: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
k.conn = conn
|
|
|
|
|
|
|
|
brokers, err := conn.Brokers()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("could not get brokers: %s", err)
|
|
|
|
}
|
|
|
|
k.brokers = brokers
|
|
|
|
|
|
|
|
for _, b := range brokers {
|
|
|
|
l.Printf("broker host: %s", b.Host)
|
|
|
|
}
|
|
|
|
|
|
|
|
// wr := kafka.NewWriter(
|
|
|
|
// kafka.WriterConfig{
|
|
|
|
// Brokers: []string{brokerAddress},
|
|
|
|
// Topic: kafkaTopic,
|
|
|
|
// Dialer: dialer,
|
|
|
|
// Logger: l,
|
|
|
|
// },
|
|
|
|
// )
|
|
|
|
|
|
|
|
// k.wr = wr
|
|
|
|
|
|
|
|
return k, nil
|
|
|
|
}
|
|
|
|
|
2023-07-03 06:22:58 +00:00
|
|
|
func (k *Kafka) NewReader(config kafka.ReaderConfig) *kafka.Reader {
|
|
|
|
config.Brokers = k.brokerAddrs()
|
|
|
|
config.Dialer = k.dialer
|
2023-07-03 06:06:13 +00:00
|
|
|
|
2023-07-03 06:22:58 +00:00
|
|
|
return kafka.NewReader(config)
|
|
|
|
}
|
2023-07-03 06:06:13 +00:00
|
|
|
|
2023-07-03 06:22:58 +00:00
|
|
|
func (k *Kafka) brokerAddrs() []string {
|
|
|
|
addrs := []string{}
|
2023-07-03 06:06:13 +00:00
|
|
|
for _, b := range k.brokers {
|
|
|
|
addrs = append(addrs, fmt.Sprintf("%s:%d", b.Host, b.Port))
|
|
|
|
}
|
2023-07-03 06:22:58 +00:00
|
|
|
return addrs
|
|
|
|
}
|
|
|
|
|
|
|
|
func (k *Kafka) NewWriter(topic string) (*kafka.Writer, error) {
|
2023-07-03 06:06:13 +00:00
|
|
|
|
|
|
|
// https://pkg.go.dev/github.com/segmentio/kafka-go#Writer
|
|
|
|
w := &kafka.Writer{
|
2023-07-03 06:22:58 +00:00
|
|
|
Addr: kafka.TCP(k.brokerAddrs()...),
|
2023-07-03 06:06:13 +00:00
|
|
|
Transport: k.transport,
|
|
|
|
Topic: topic,
|
|
|
|
Balancer: &kafka.LeastBytes{},
|
|
|
|
BatchSize: 2000,
|
2023-07-08 20:08:52 +00:00
|
|
|
Compression: kafka.Lz4,
|
|
|
|
// Logger: k.l,
|
2023-07-03 06:06:13 +00:00
|
|
|
ErrorLogger: k.l,
|
|
|
|
}
|
|
|
|
|
|
|
|
return w, nil
|
|
|
|
}
|
2023-07-23 06:47:37 +00:00
|
|
|
|
|
|
|
func (k *Kafka) CheckPartitions() error {
|
|
|
|
partitions, err := k.conn.ReadPartitions()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// should this result in an error?
|
|
|
|
if len(partitions) == 0 {
|
|
|
|
log := logger.Setup()
|
|
|
|
log.Info("kafka connection has no partitions")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|