5 Commits

Author SHA1 Message Date
ca190b0085 docs: add v0.5.2 release notes
Add changelog entries for recent commits:
- Health package: Kubernetes health probes
- Logger package: runtime level control and fixes
- Database package: config file override support
2025-09-21 12:10:05 -07:00
10864363e2 feat(health): enhance server with probe-specific handlers
- Add separate handlers for liveness (/healthz), readiness (/readyz),
  and startup (/startupz) probes
- Implement WithLivenessHandler, WithReadinessHandler, WithStartupHandler,
  and WithServiceName options
- Add probe-specific JSON response formats
- Add comprehensive package documentation with usage examples
- Maintain backward compatibility for /__health and / endpoints
- Add tests for all probe types and fallback scenarios

Enables proper Kubernetes health monitoring with different probe types.
2025-09-21 10:52:29 -07:00
66b51df2af feat(logger): add runtime log level control API
Add independent log level control for stderr and OTLP loggers.
Both can be configured via environment variables or programmatically
at runtime.

- Add SetLevel() and SetOTLPLevel() for runtime control
- Add ParseLevel() to convert strings to slog.Level
- Support LOG_LEVEL and OTLP_LOG_LEVEL env vars
- Maintain backward compatibility with DEBUG env var
- Add comprehensive test coverage
2025-09-06 05:21:33 -07:00
28d05d1d0e feat(database): add DATABASE_CONFIG_FILE env override
Allow overriding default database.yaml paths via DATABASE_CONFIG_FILE
environment variable. When set, uses single specified file instead of
default ["database.yaml", "/vault/secrets/database.yaml"] search paths.

Maintains backward compatibility when env var not set.
2025-08-03 12:20:35 -07:00
a774f92bf7 fix(logger): prevent mutex crash in bufferingExporter
Remove sync.Once reset that caused "unlock of unlocked mutex" panic.
Redesign initialization to use only checkReadiness goroutine for
retry attempts, eliminating race condition while preserving retry
functionality for TLS/tracing setup delays.
2025-08-02 22:55:57 -07:00
8 changed files with 810 additions and 57 deletions

View File

@@ -1,3 +1,22 @@
# Release Notes - v0.5.2
## Health Package
- **Kubernetes-native health probes** - Added dedicated handlers for liveness (`/healthz`), readiness (`/readyz`), and startup (`/startupz`) probes
- **Flexible configuration options** - New `WithLivenessHandler`, `WithReadinessHandler`, `WithStartupHandler`, and `WithServiceName` options
- **JSON response formats** - Structured probe responses with service identification
- **Backward compatibility** - Maintains existing `/__health` and `/` endpoints
## Logger Package
- **Runtime log level control** - Independent level management for stderr and OTLP loggers via `SetLevel()` and `SetOTLPLevel()`
- **Environment variable support** - Configure levels with `LOG_LEVEL` and `OTLP_LOG_LEVEL` env vars
- **String parsing utility** - New `ParseLevel()` function for converting string levels to `slog.Level`
- **Buffering exporter fix** - Resolved "unlock of unlocked mutex" panic in `bufferingExporter`
- **Initialization redesign** - Eliminated race conditions in TLS/tracing setup retry logic
## Database Package
- **Configuration file override** - Added `DATABASE_CONFIG_FILE` environment variable to specify custom database configuration file paths
- **Flexible path configuration** - Override default `["database.yaml", "/vault/secrets/database.yaml"]` search paths when needed
# Release Notes - v0.5.1
## Observability Enhancements

View File

@@ -1,6 +1,7 @@
package database
import (
"os"
"time"
"github.com/prometheus/client_golang/prometheus"
@@ -36,10 +37,20 @@ type ConfigOptions struct {
ConnMaxLifetime time.Duration
}
// getConfigFiles returns the list of config files to search for database configuration.
// If DATABASE_CONFIG_FILE environment variable is set, it returns that single file.
// Otherwise, it returns the default paths.
func getConfigFiles() []string {
if configFile := os.Getenv("DATABASE_CONFIG_FILE"); configFile != "" {
return []string{configFile}
}
return []string{"database.yaml", "/vault/secrets/database.yaml"}
}
// DefaultConfigOptions returns the standard configuration options used by API package
func DefaultConfigOptions() ConfigOptions {
return ConfigOptions{
ConfigFiles: []string{"database.yaml", "/vault/secrets/database.yaml"},
ConfigFiles: getConfigFiles(),
EnablePoolMonitoring: true,
PrometheusRegisterer: prometheus.DefaultRegisterer,
MaxOpenConns: 25,
@@ -51,7 +62,7 @@ func DefaultConfigOptions() ConfigOptions {
// MonitorConfigOptions returns configuration options optimized for Monitor package
func MonitorConfigOptions() ConfigOptions {
return ConfigOptions{
ConfigFiles: []string{"database.yaml", "/vault/secrets/database.yaml"},
ConfigFiles: getConfigFiles(),
EnablePoolMonitoring: false, // Monitor doesn't need metrics
PrometheusRegisterer: nil, // No Prometheus dependency
MaxOpenConns: 10,

View File

@@ -1,13 +1,71 @@
// Package health provides a standalone HTTP server for health checks.
//
// This package implements a simple health check server that can be used
// to expose health status endpoints for monitoring and load balancing.
// It supports custom health check handlers and provides structured logging
// with graceful shutdown capabilities.
// This package implements a flexible health check server that supports
// different handlers for Kubernetes probe types (liveness, readiness, startup).
// It provides structured logging, graceful shutdown, and standard HTTP endpoints
// for monitoring and load balancing.
//
// # Kubernetes Probe Types
//
// Liveness Probe: Detects when a container is "dead" and needs restarting.
// Should be a lightweight check that verifies the process is still running
// and not in an unrecoverable state.
//
// Readiness Probe: Determines when a container is ready to accept traffic.
// Controls which Pods are used as backends for Services. Should verify
// the application can handle requests properly.
//
// Startup Probe: Verifies when a container application has successfully started.
// Delays liveness and readiness probes until startup succeeds. Useful for
// slow-starting applications.
//
// # Usage Examples
//
// Basic usage with a single handler for all probes:
//
// srv := health.NewServer(myHealthHandler)
// srv.Listen(ctx, 9091)
//
// Advanced usage with separate handlers for each probe type:
//
// srv := health.NewServer(nil,
// health.WithLivenessHandler(func(w http.ResponseWriter, r *http.Request) {
// // Simple alive check
// w.WriteHeader(http.StatusOK)
// }),
// health.WithReadinessHandler(func(w http.ResponseWriter, r *http.Request) {
// // Check if ready to serve traffic
// if err := checkDatabase(); err != nil {
// w.WriteHeader(http.StatusServiceUnavailable)
// return
// }
// w.WriteHeader(http.StatusOK)
// }),
// health.WithStartupHandler(func(w http.ResponseWriter, r *http.Request) {
// // Check if startup is complete
// if !applicationReady() {
// w.WriteHeader(http.StatusServiceUnavailable)
// return
// }
// w.WriteHeader(http.StatusOK)
// }),
// health.WithServiceName("my-service"),
// )
// srv.Listen(ctx, 9091)
//
// # Standard Endpoints
//
// The server exposes these endpoints:
// - /healthz - liveness probe (or general health if no specific handler)
// - /readyz - readiness probe (or general health if no specific handler)
// - /startupz - startup probe (or general health if no specific handler)
// - /__health - general health endpoint (backward compatibility)
// - / - general health endpoint (root path)
package health
import (
"context"
"encoding/json"
"log/slog"
"net/http"
"strconv"
@@ -21,23 +79,74 @@ import (
// It runs separately from the main application server to ensure health
// checks remain available even if the main server is experiencing issues.
//
// The server includes built-in timeouts, graceful shutdown, and structured
// logging for monitoring and debugging health check behavior.
// The server supports separate handlers for different Kubernetes probe types
// (liveness, readiness, startup) and includes built-in timeouts, graceful
// shutdown, and structured logging.
type Server struct {
log *slog.Logger
healthFn http.HandlerFunc
livenessHandler http.HandlerFunc
readinessHandler http.HandlerFunc
startupHandler http.HandlerFunc
generalHandler http.HandlerFunc // fallback for /__health and / paths
serviceName string
}
// NewServer creates a new health check server with the specified health handler.
// If healthFn is nil, a default handler that returns HTTP 200 "ok" is used.
func NewServer(healthFn http.HandlerFunc) *Server {
// Option represents a configuration option for the health server.
type Option func(*Server)
// WithLivenessHandler sets a specific handler for the /healthz endpoint.
// Liveness probes determine if a container should be restarted.
func WithLivenessHandler(handler http.HandlerFunc) Option {
return func(s *Server) {
s.livenessHandler = handler
}
}
// WithReadinessHandler sets a specific handler for the /readyz endpoint.
// Readiness probes determine if a container can receive traffic.
func WithReadinessHandler(handler http.HandlerFunc) Option {
return func(s *Server) {
s.readinessHandler = handler
}
}
// WithStartupHandler sets a specific handler for the /startupz endpoint.
// Startup probes determine if a container has finished initializing.
func WithStartupHandler(handler http.HandlerFunc) Option {
return func(s *Server) {
s.startupHandler = handler
}
}
// WithServiceName sets the service name for JSON responses and logging.
func WithServiceName(serviceName string) Option {
return func(s *Server) {
s.serviceName = serviceName
}
}
// NewServer creates a new health check server with optional probe-specific handlers.
//
// If healthFn is provided, it will be used as a fallback for any probe endpoints
// that don't have specific handlers configured. If healthFn is nil, a default
// handler that returns HTTP 200 "ok" is used as the fallback.
//
// Use the With* option functions to configure specific handlers for different
// probe types (liveness, readiness, startup).
func NewServer(healthFn http.HandlerFunc, opts ...Option) *Server {
if healthFn == nil {
healthFn = basicHealth
}
srv := &Server{
log: logger.Setup(),
healthFn: healthFn,
generalHandler: healthFn,
}
for _, opt := range opts {
opt(srv)
}
return srv
}
@@ -47,13 +156,27 @@ func (srv *Server) SetLogger(log *slog.Logger) {
}
// Listen starts the health server on the specified port and blocks until ctx is cancelled.
// The server exposes the health handler at "/__health" with graceful shutdown support.
// The server exposes health check endpoints with graceful shutdown support.
//
// Standard endpoints exposed:
// - /healthz - liveness probe (uses livenessHandler or falls back to generalHandler)
// - /readyz - readiness probe (uses readinessHandler or falls back to generalHandler)
// - /startupz - startup probe (uses startupHandler or falls back to generalHandler)
// - /__health - general health endpoint (uses generalHandler)
// - / - root health endpoint (uses generalHandler)
func (srv *Server) Listen(ctx context.Context, port int) error {
srv.log.Info("starting health listener", "port", port)
serveMux := http.NewServeMux()
serveMux.HandleFunc("/__health", srv.healthFn)
// Register probe-specific handlers
serveMux.HandleFunc("/healthz", srv.createProbeHandler("liveness"))
serveMux.HandleFunc("/readyz", srv.createProbeHandler("readiness"))
serveMux.HandleFunc("/startupz", srv.createProbeHandler("startup"))
// Register general health endpoints for backward compatibility
serveMux.HandleFunc("/__health", srv.createGeneralHandler())
serveMux.HandleFunc("/", srv.createGeneralHandler())
hsrv := &http.Server{
Addr: ":" + strconv.Itoa(port),
@@ -89,6 +212,121 @@ func (srv *Server) Listen(ctx context.Context, port int) error {
return g.Wait()
}
// createProbeHandler creates a handler for a specific probe type that provides
// appropriate JSON responses and falls back to the general handler if no specific
// handler is configured.
func (srv *Server) createProbeHandler(probeType string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var handler http.HandlerFunc
// Select the appropriate handler
switch probeType {
case "liveness":
handler = srv.livenessHandler
case "readiness":
handler = srv.readinessHandler
case "startup":
handler = srv.startupHandler
}
// Fall back to general handler if no specific handler is configured
if handler == nil {
handler = srv.generalHandler
}
// Create a response recorder to capture the handler's status code
recorder := &statusRecorder{ResponseWriter: w, statusCode: 200}
handler(recorder, r)
// If the handler already wrote a response, we're done
if recorder.written {
return
}
// Otherwise, provide a standard JSON response based on the status code
w.Header().Set("Content-Type", "application/json")
if recorder.statusCode >= 400 {
// Handler indicated unhealthy
switch probeType {
case "liveness":
json.NewEncoder(w).Encode(map[string]string{"status": "unhealthy"})
case "readiness":
json.NewEncoder(w).Encode(map[string]bool{"ready": false})
case "startup":
json.NewEncoder(w).Encode(map[string]bool{"started": false})
}
} else {
// Handler indicated healthy
switch probeType {
case "liveness":
json.NewEncoder(w).Encode(map[string]string{"status": "alive"})
case "readiness":
json.NewEncoder(w).Encode(map[string]bool{"ready": true})
case "startup":
json.NewEncoder(w).Encode(map[string]bool{"started": true})
}
}
}
}
// createGeneralHandler creates a handler for general health endpoints that provides
// comprehensive health information.
func (srv *Server) createGeneralHandler() http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// Create a response recorder to capture the handler's status code
// Use a buffer to prevent the handler from writing to the actual response
recorder := &statusRecorder{ResponseWriter: &discardWriter{}, statusCode: 200}
srv.generalHandler(recorder, r)
// Always provide a comprehensive JSON response for general endpoints
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(recorder.statusCode)
response := map[string]interface{}{
"status": map[bool]string{true: "healthy", false: "unhealthy"}[recorder.statusCode < 400],
}
if srv.serviceName != "" {
response["service"] = srv.serviceName
}
json.NewEncoder(w).Encode(response)
}
}
// statusRecorder captures the response status code from handlers while allowing
// them to write their own response content if needed.
type statusRecorder struct {
http.ResponseWriter
statusCode int
written bool
}
func (r *statusRecorder) WriteHeader(code int) {
r.statusCode = code
r.ResponseWriter.WriteHeader(code)
}
func (r *statusRecorder) Write(data []byte) (int, error) {
r.written = true
return r.ResponseWriter.Write(data)
}
// discardWriter implements http.ResponseWriter but discards all writes.
// Used to capture status codes without writing response content.
type discardWriter struct{}
func (d *discardWriter) Header() http.Header {
return make(http.Header)
}
func (d *discardWriter) Write([]byte) (int, error) {
return 0, nil
}
func (d *discardWriter) WriteHeader(int) {}
// HealthCheckListener runs a simple HTTP server on the specified port for health check probes.
func HealthCheckListener(ctx context.Context, port int, log *slog.Logger) error {
srv := NewServer(nil)

View File

@@ -1,13 +1,14 @@
package health
import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"testing"
)
func TestHealthHandler(t *testing.T) {
func TestBasicHealthHandler(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "/__health", nil)
w := httptest.NewRecorder()
@@ -24,3 +25,129 @@ func TestHealthHandler(t *testing.T) {
t.Errorf("expected ok got %q", string(data))
}
}
func TestProbeHandlers(t *testing.T) {
// Test with separate handlers for each probe type
srv := NewServer(nil,
WithLivenessHandler(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}),
WithReadinessHandler(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}),
WithStartupHandler(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}),
WithServiceName("test-service"),
)
tests := []struct {
handler http.HandlerFunc
expectedStatus int
expectedBody string
}{
{srv.createProbeHandler("liveness"), 200, `{"status":"alive"}`},
{srv.createProbeHandler("readiness"), 200, `{"ready":true}`},
{srv.createProbeHandler("startup"), 200, `{"started":true}`},
{srv.createGeneralHandler(), 200, `{"service":"test-service","status":"healthy"}`},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "/", nil)
w := httptest.NewRecorder()
tt.handler(w, req)
if w.Code != tt.expectedStatus {
t.Errorf("expected status %d, got %d", tt.expectedStatus, w.Code)
}
body := w.Body.String()
if body != tt.expectedBody+"\n" { // json.Encoder adds newline
t.Errorf("expected body %q, got %q", tt.expectedBody, body)
}
})
}
}
func TestProbeHandlerFallback(t *testing.T) {
// Test fallback to general handler when no specific handler is configured
generalHandler := func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
srv := NewServer(generalHandler, WithServiceName("test-service"))
tests := []struct {
handler http.HandlerFunc
expectedStatus int
expectedBody string
}{
{srv.createProbeHandler("liveness"), 200, `{"status":"alive"}`},
{srv.createProbeHandler("readiness"), 200, `{"ready":true}`},
{srv.createProbeHandler("startup"), 200, `{"started":true}`},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("fallback_%d", i), func(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "/", nil)
w := httptest.NewRecorder()
tt.handler(w, req)
if w.Code != tt.expectedStatus {
t.Errorf("expected status %d, got %d", tt.expectedStatus, w.Code)
}
body := w.Body.String()
if body != tt.expectedBody+"\n" { // json.Encoder adds newline
t.Errorf("expected body %q, got %q", tt.expectedBody, body)
}
})
}
}
func TestUnhealthyProbeHandlers(t *testing.T) {
// Test with handlers that return unhealthy status
srv := NewServer(nil,
WithLivenessHandler(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
}),
WithReadinessHandler(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
}),
WithStartupHandler(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusServiceUnavailable)
}),
WithServiceName("test-service"),
)
tests := []struct {
handler http.HandlerFunc
expectedStatus int
expectedBody string
}{
{srv.createProbeHandler("liveness"), 503, `{"status":"unhealthy"}`},
{srv.createProbeHandler("readiness"), 503, `{"ready":false}`},
{srv.createProbeHandler("startup"), 503, `{"started":false}`},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("unhealthy_%d", i), func(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "/", nil)
w := httptest.NewRecorder()
tt.handler(w, req)
if w.Code != tt.expectedStatus {
t.Errorf("expected status %d, got %d", tt.expectedStatus, w.Code)
}
body := w.Body.String()
if body != tt.expectedBody+"\n" { // json.Encoder adds newline
t.Errorf("expected body %q, got %q", tt.expectedBody, body)
}
})
}
}

View File

@@ -23,8 +23,7 @@ type bufferingExporter struct {
// Real exporter (created when tracing is configured)
exporter otellog.Exporter
// Thread-safe initialization
initOnce sync.Once
// Thread-safe initialization state (managed only by checkReadiness)
initErr error
// Background checker
@@ -48,13 +47,7 @@ func newBufferingExporter() *bufferingExporter {
// Export implements otellog.Exporter
func (e *bufferingExporter) Export(ctx context.Context, records []otellog.Record) error {
// Try initialization once
e.initOnce.Do(func() {
e.initErr = e.initialize()
})
// If initialization succeeded, use the exporter
if e.initErr == nil {
// Check if exporter is ready (initialization handled by checkReadiness goroutine)
e.mu.RLock()
exporter := e.exporter
e.mu.RUnlock()
@@ -62,7 +55,6 @@ func (e *bufferingExporter) Export(ctx context.Context, records []otellog.Record
if exporter != nil {
return exporter.Export(ctx, records)
}
}
// Not ready yet, buffer the records
return e.bufferRecords(records)
@@ -117,24 +109,31 @@ func (e *bufferingExporter) bufferRecords(records []otellog.Record) error {
return nil
}
// checkReadiness periodically checks if tracing is configured
// checkReadiness periodically attempts initialization until successful
func (e *bufferingExporter) checkReadiness() {
defer close(e.checkerDone)
ticker := time.NewTicker(1 * time.Second) // Reduced frequency since OTLP handles retries
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// If initialization failed, reset sync.Once to allow retry
// The OTLP exporter will handle its own retry logic
if e.initErr != nil {
e.initOnce = sync.Once{}
} else if e.exporter != nil {
// Check if we already have a working exporter
e.mu.RLock()
hasExporter := e.exporter != nil
e.mu.RUnlock()
if hasExporter {
return // Exporter ready, checker no longer needed
}
// Try to initialize
err := e.initialize()
e.mu.Lock()
e.initErr = err
e.mu.Unlock()
case <-e.stopChecker:
return
}
@@ -180,14 +179,21 @@ func (e *bufferingExporter) Shutdown(ctx context.Context) error {
// Stop the readiness checker from continuing
close(e.stopChecker)
// Give one final chance for TLS/tracing to become ready before fully shutting down
e.initOnce.Do(func() {
e.initErr = e.initialize()
})
// Wait for readiness checker goroutine to complete
<-e.checkerDone
// Give one final chance for TLS/tracing to become ready for buffer flushing
e.mu.RLock()
hasExporter := e.exporter != nil
e.mu.RUnlock()
if !hasExporter {
err := e.initialize()
e.mu.Lock()
e.initErr = err
e.mu.Unlock()
}
e.mu.Lock()
defer e.mu.Unlock()

235
logger/level_test.go Normal file
View File

@@ -0,0 +1,235 @@
package logger
import (
"context"
"log/slog"
"os"
"testing"
"time"
)
func TestParseLevel(t *testing.T) {
tests := []struct {
name string
input string
expected slog.Level
expectError bool
}{
{"empty string", "", slog.LevelInfo, false},
{"DEBUG upper", "DEBUG", slog.LevelDebug, false},
{"debug lower", "debug", slog.LevelDebug, false},
{"INFO upper", "INFO", slog.LevelInfo, false},
{"info lower", "info", slog.LevelInfo, false},
{"WARN upper", "WARN", slog.LevelWarn, false},
{"warn lower", "warn", slog.LevelWarn, false},
{"ERROR upper", "ERROR", slog.LevelError, false},
{"error lower", "error", slog.LevelError, false},
{"invalid level", "invalid", slog.LevelInfo, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
level, err := ParseLevel(tt.input)
if tt.expectError {
if err == nil {
t.Errorf("expected error for input %q, got nil", tt.input)
}
} else {
if err != nil {
t.Errorf("unexpected error for input %q: %v", tt.input, err)
}
if level != tt.expected {
t.Errorf("expected level %v for input %q, got %v", tt.expected, tt.input, level)
}
}
})
}
}
func TestSetLevel(t *testing.T) {
// Store original level to restore later
originalLevel := Level.Level()
defer Level.Set(originalLevel)
SetLevel(slog.LevelDebug)
if Level.Level() != slog.LevelDebug {
t.Errorf("expected Level to be Debug, got %v", Level.Level())
}
SetLevel(slog.LevelError)
if Level.Level() != slog.LevelError {
t.Errorf("expected Level to be Error, got %v", Level.Level())
}
}
func TestSetOTLPLevel(t *testing.T) {
// Store original level to restore later
originalLevel := OTLPLevel.Level()
defer OTLPLevel.Set(originalLevel)
SetOTLPLevel(slog.LevelWarn)
if OTLPLevel.Level() != slog.LevelWarn {
t.Errorf("expected OTLPLevel to be Warn, got %v", OTLPLevel.Level())
}
SetOTLPLevel(slog.LevelDebug)
if OTLPLevel.Level() != slog.LevelDebug {
t.Errorf("expected OTLPLevel to be Debug, got %v", OTLPLevel.Level())
}
}
func TestOTLPLevelHandler(t *testing.T) {
// Create a mock handler that counts calls
callCount := 0
mockHandler := &mockHandler{
handleFunc: func(ctx context.Context, r slog.Record) error {
callCount++
return nil
},
}
// Set OTLP level to Warn
originalLevel := OTLPLevel.Level()
defer OTLPLevel.Set(originalLevel)
OTLPLevel.Set(slog.LevelWarn)
// Create OTLP level handler
handler := newOTLPLevelHandler(mockHandler)
ctx := context.Background()
// Test that Debug and Info are filtered out
if handler.Enabled(ctx, slog.LevelDebug) {
t.Error("Debug level should be disabled when OTLP level is Warn")
}
if handler.Enabled(ctx, slog.LevelInfo) {
t.Error("Info level should be disabled when OTLP level is Warn")
}
// Test that Warn and Error are enabled
if !handler.Enabled(ctx, slog.LevelWarn) {
t.Error("Warn level should be enabled when OTLP level is Warn")
}
if !handler.Enabled(ctx, slog.LevelError) {
t.Error("Error level should be enabled when OTLP level is Warn")
}
// Test that Handle respects level filtering
now := time.Now()
debugRecord := slog.NewRecord(now, slog.LevelDebug, "debug message", 0)
warnRecord := slog.NewRecord(now, slog.LevelWarn, "warn message", 0)
handler.Handle(ctx, debugRecord)
if callCount != 0 {
t.Error("Debug record should not be passed to underlying handler")
}
handler.Handle(ctx, warnRecord)
if callCount != 1 {
t.Error("Warn record should be passed to underlying handler")
}
}
func TestEnvironmentVariables(t *testing.T) {
tests := []struct {
name string
envVar string
envValue string
configPrefix string
testFunc func(t *testing.T)
}{
{
name: "LOG_LEVEL sets stderr level",
envVar: "LOG_LEVEL",
envValue: "ERROR",
testFunc: func(t *testing.T) {
// Reset the setup state
resetLoggerSetup()
// Call setupStdErrHandler which should read the env var
handler := setupStdErrHandler()
if handler == nil {
t.Fatal("setupStdErrHandler returned nil")
}
if Level.Level() != slog.LevelError {
t.Errorf("expected Level to be Error after setting LOG_LEVEL=ERROR, got %v", Level.Level())
}
},
},
{
name: "Prefixed LOG_LEVEL",
envVar: "TEST_LOG_LEVEL",
envValue: "DEBUG",
configPrefix: "TEST",
testFunc: func(t *testing.T) {
ConfigPrefix = "TEST"
defer func() { ConfigPrefix = "" }()
resetLoggerSetup()
handler := setupStdErrHandler()
if handler == nil {
t.Fatal("setupStdErrHandler returned nil")
}
if Level.Level() != slog.LevelDebug {
t.Errorf("expected Level to be Debug after setting TEST_LOG_LEVEL=DEBUG, got %v", Level.Level())
}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Store original env value and level
originalEnv := os.Getenv(tt.envVar)
originalLevel := Level.Level()
defer func() {
os.Setenv(tt.envVar, originalEnv)
Level.Set(originalLevel)
}()
// Set test environment variable
os.Setenv(tt.envVar, tt.envValue)
// Run the test
tt.testFunc(t)
})
}
}
// mockHandler is a simple mock implementation of slog.Handler for testing
type mockHandler struct {
handleFunc func(ctx context.Context, r slog.Record) error
}
func (m *mockHandler) Enabled(ctx context.Context, level slog.Level) bool {
return true
}
func (m *mockHandler) Handle(ctx context.Context, r slog.Record) error {
if m.handleFunc != nil {
return m.handleFunc(ctx, r)
}
return nil
}
func (m *mockHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
return m
}
func (m *mockHandler) WithGroup(name string) slog.Handler {
return m
}
// resetLoggerSetup resets the sync.Once instances for testing
func resetLoggerSetup() {
// Reset package-level variables
textLogger = nil
otlpLogger = nil
multiLogger = nil
// Note: We can't easily reset sync.Once instances in tests,
// but for the specific test we're doing (environment variable parsing)
// we can test the setupStdErrHandler function directly
}

View File

@@ -18,12 +18,15 @@
// - Context propagation for request-scoped logging
//
// Environment variables:
// - DEBUG: Enable debug level logging (configurable prefix via ConfigPrefix)
// - LOG_LEVEL: Set stderr log level (DEBUG, INFO, WARN, ERROR) (configurable prefix via ConfigPrefix)
// - OTLP_LOG_LEVEL: Set OTLP log level independently (configurable prefix via ConfigPrefix)
// - DEBUG: Enable debug level logging for backward compatibility (configurable prefix via ConfigPrefix)
// - INVOCATION_ID: Systemd detection for timestamp handling
package logger
import (
"context"
"fmt"
"log"
"log/slog"
"os"
@@ -43,6 +46,16 @@ import (
// This enables multiple services to have independent logging configuration.
var ConfigPrefix = ""
var (
// Level controls the log level for the default stderr logger.
// Can be changed at runtime to adjust logging verbosity.
Level = new(slog.LevelVar) // Info by default
// OTLPLevel controls the log level for OTLP output.
// Can be changed independently from the stderr logger level.
OTLPLevel = new(slog.LevelVar) // Info by default
)
var (
textLogger *slog.Logger
otlpLogger *slog.Logger
@@ -56,21 +69,64 @@ var (
mu sync.Mutex
)
// SetLevel sets the log level for the default stderr logger.
// This affects the primary application logger returned by Setup().
func SetLevel(level slog.Level) {
Level.Set(level)
}
// SetOTLPLevel sets the log level for OTLP output.
// This affects the logger returned by SetupOLTP() and the OTLP portion of SetupMultiLogger().
func SetOTLPLevel(level slog.Level) {
OTLPLevel.Set(level)
}
// ParseLevel converts a string log level to slog.Level.
// Supported levels: "DEBUG", "INFO", "WARN", "ERROR" (case insensitive).
// Returns an error for unrecognized level strings.
func ParseLevel(level string) (slog.Level, error) {
switch {
case level == "":
return slog.LevelInfo, nil
case level == "DEBUG" || level == "debug":
return slog.LevelDebug, nil
case level == "INFO" || level == "info":
return slog.LevelInfo, nil
case level == "WARN" || level == "warn":
return slog.LevelWarn, nil
case level == "ERROR" || level == "error":
return slog.LevelError, nil
default:
return slog.LevelInfo, fmt.Errorf("unknown log level: %s", level)
}
}
func setupStdErrHandler() slog.Handler {
programLevel := new(slog.LevelVar) // Info by default
envVar := "DEBUG"
// Parse LOG_LEVEL environment variable
logLevelVar := "LOG_LEVEL"
if len(ConfigPrefix) > 0 {
envVar = ConfigPrefix + "_" + envVar
logLevelVar = ConfigPrefix + "_" + logLevelVar
}
if opt := os.Getenv(envVar); len(opt) > 0 {
if levelStr := os.Getenv(logLevelVar); levelStr != "" {
if level, err := ParseLevel(levelStr); err == nil {
Level.Set(level)
}
}
// Maintain backward compatibility with DEBUG environment variable
debugVar := "DEBUG"
if len(ConfigPrefix) > 0 {
debugVar = ConfigPrefix + "_" + debugVar
}
if opt := os.Getenv(debugVar); len(opt) > 0 {
if debug, _ := strconv.ParseBool(opt); debug {
programLevel.Set(slog.LevelDebug)
Level.Set(slog.LevelDebug)
}
}
logOptions := &slog.HandlerOptions{Level: programLevel}
logOptions := &slog.HandlerOptions{Level: Level}
if len(os.Getenv("INVOCATION_ID")) > 0 {
// don't add timestamps when running under systemd
@@ -88,6 +144,18 @@ func setupStdErrHandler() slog.Handler {
func setupOtlpLogger() *slog.Logger {
setupOtlp.Do(func() {
// Parse OTLP_LOG_LEVEL environment variable
otlpLevelVar := "OTLP_LOG_LEVEL"
if len(ConfigPrefix) > 0 {
otlpLevelVar = ConfigPrefix + "_" + otlpLevelVar
}
if levelStr := os.Getenv(otlpLevelVar); levelStr != "" {
if level, err := ParseLevel(levelStr); err == nil {
OTLPLevel.Set(level)
}
}
// Create our buffering exporter
// It will buffer until tracing is configured
bufferingExp := newBufferingExporter()
@@ -107,8 +175,9 @@ func setupOtlpLogger() *slog.Logger {
// Set global provider
global.SetLoggerProvider(provider)
// Create slog handler
handler := newLogFmtHandler(otelslog.NewHandler("common"))
// Create slog handler with level control
baseHandler := newLogFmtHandler(otelslog.NewHandler("common"))
handler := newOTLPLevelHandler(baseHandler)
otlpLogger = slog.New(handler)
})
return otlpLogger

48
logger/otlp_handler.go Normal file
View File

@@ -0,0 +1,48 @@
package logger
import (
"context"
"log/slog"
)
// otlpLevelHandler is a wrapper that enforces level checking for OTLP handlers.
// This allows independent level control for OTLP output separate from stderr logging.
type otlpLevelHandler struct {
next slog.Handler
}
// newOTLPLevelHandler creates a new OTLP level wrapper handler.
func newOTLPLevelHandler(next slog.Handler) slog.Handler {
return &otlpLevelHandler{
next: next,
}
}
// Enabled checks if the log level should be processed by the OTLP handler.
// It uses the OTLPLevel variable to determine if the record should be processed.
func (h *otlpLevelHandler) Enabled(ctx context.Context, level slog.Level) bool {
return level >= OTLPLevel.Level()
}
// Handle processes the log record if the level is enabled.
// If disabled by level checking, the record is silently dropped.
func (h *otlpLevelHandler) Handle(ctx context.Context, r slog.Record) error {
if !h.Enabled(ctx, r.Level) {
return nil
}
return h.next.Handle(ctx, r)
}
// WithAttrs returns a new handler with the specified attributes added.
func (h *otlpLevelHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
return &otlpLevelHandler{
next: h.next.WithAttrs(attrs),
}
}
// WithGroup returns a new handler with the specified group name.
func (h *otlpLevelHandler) WithGroup(name string) slog.Handler {
return &otlpLevelHandler{
next: h.next.WithGroup(name),
}
}